aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/bus.c4
-rw-r--r--drivers/acpi/power.c11
-rw-r--r--drivers/acpi/scan.c21
-rw-r--r--drivers/acpi/sleep.c52
-rw-r--r--drivers/amba/bus.c53
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/ahci_platform.c1
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/libata-eh.c3
-rw-r--r--drivers/ata/libata-scsi.c38
-rw-r--r--drivers/ata/pata_arasan_cf.c4
-rw-r--r--drivers/atm/ambassador.c2
-rw-r--r--drivers/atm/horizon.c5
-rw-r--r--drivers/atm/idt77252.c2
-rw-r--r--drivers/base/regmap/regmap.c4
-rw-r--r--drivers/bcma/sprom.c7
-rw-r--r--drivers/block/DAC960.c23
-rw-r--r--drivers/block/drbd/drbd_nl.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c6
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c4
-rw-r--r--drivers/bluetooth/ath3k.c4
-rw-r--r--drivers/bluetooth/btusb.c6
-rw-r--r--drivers/char/virtio_console.c7
-rw-r--r--drivers/clk/clkdev.c142
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/dma/amba-pl08x.c1
-rw-r--r--drivers/dma/at_hdmac.c8
-rw-r--r--drivers/dma/ep93xx_dma.c4
-rw-r--r--drivers/dma/imx-dma.c9
-rw-r--r--drivers/dma/mxs-dma.c10
-rw-r--r--drivers/dma/pl330.c28
-rw-r--r--drivers/dma/ste_dma40.c323
-rw-r--r--drivers/dma/ste_dma40_ll.h2
-rw-r--r--drivers/firmware/efivars.c196
-rw-r--r--drivers/gpio/gpio-omap.c9
-rw-r--r--drivers/gpio/gpio-pch.c57
-rw-r--r--drivers/gpio/gpio-pxa.c21
-rw-r--r--drivers/gpio/gpio-samsung.c18
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c30
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c3
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c29
-rw-r--r--drivers/gpu/drm/i915/intel_display.c9
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c11
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hdmi.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c199
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv10_gpio.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fb.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c3
-rw-r--r--drivers/hsi/clients/hsi_char.c2
-rw-r--r--drivers/hsi/hsi.c223
-rw-r--r--drivers/hv/ring_buffer.c31
-rw-r--r--drivers/hwmon/ad7314.c12
-rw-r--r--drivers/hwmon/coretemp.c6
-rw-r--r--drivers/hwmon/fam15h_power.c9
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c4
-rw-r--r--drivers/i2c/busses/i2c-mxs.c8
-rw-r--r--drivers/i2c/busses/i2c-pnx.c3
-rw-r--r--drivers/i2c/busses/i2c-tegra.c8
-rw-r--r--drivers/ieee802154/Kconfig8
-rw-r--r--drivers/ieee802154/Makefile1
-rw-r--r--drivers/ieee802154/fakelb.c294
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/core/mad.c8
-rw-r--r--drivers/infiniband/core/netlink.c3
-rw-r--r--drivers/infiniband/core/ucma.c10
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/input/mouse/synaptics.c3
-rw-r--r--drivers/isdn/capi/capi.c50
-rw-r--r--drivers/isdn/capi/capidrv.c8
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c44
-rw-r--r--drivers/isdn/gigaset/capi.c118
-rw-r--r--drivers/isdn/gigaset/common.c59
-rw-r--r--drivers/isdn/gigaset/dummyll.c2
-rw-r--r--drivers/isdn/gigaset/ev-layer.c319
-rw-r--r--drivers/isdn/gigaset/gigaset.h30
-rw-r--r--drivers/isdn/gigaset/i4l.c12
-rw-r--r--drivers/isdn/gigaset/isocdata.c12
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c21
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c19
-rw-r--r--drivers/isdn/hardware/eicon/capifunc.c6
-rw-r--r--drivers/isdn/hardware/eicon/capimain.c4
-rw-r--r--drivers/isdn/hardware/eicon/diddfunc.c8
-rw-r--r--drivers/isdn/hardware/eicon/diva_didd.c6
-rw-r--r--drivers/isdn/hardware/eicon/divamnt.c6
-rw-r--r--drivers/isdn/hardware/eicon/divasfunc.c4
-rw-r--r--drivers/isdn/hardware/eicon/divasi.c8
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c6
-rw-r--r--drivers/isdn/hardware/eicon/idifunc.c10
-rw-r--r--drivers/isdn/hardware/eicon/mntfunc.c8
-rw-r--r--drivers/isdn/hardware/eicon/platform.h3
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c228
-rw-r--r--drivers/isdn/hardware/mISDN/hfc_multi.h15
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c706
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c103
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c137
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c145
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c131
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c218
-rw-r--r--drivers/isdn/hardware/mISDN/speedfax.c5
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c140
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c10
-rw-r--r--drivers/isdn/i4l/isdn_bsdcomp.c2
-rw-r--r--drivers/isdn/mISDN/core.c16
-rw-r--r--drivers/isdn/mISDN/dsp.h4
-rw-r--r--drivers/isdn/mISDN/dsp_cmx.c19
-rw-r--r--drivers/isdn/mISDN/dsp_core.c1
-rw-r--r--drivers/isdn/mISDN/dsp_dtmf.c19
-rw-r--r--drivers/isdn/mISDN/hwchannel.c162
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c2
-rw-r--r--drivers/isdn/mISDN/layer1.c36
-rw-r--r--drivers/isdn/mISDN/layer2.c120
-rw-r--r--drivers/isdn/mISDN/tei.c72
-rw-r--r--drivers/leds/leds-netxbig.c4
-rw-r--r--drivers/leds/leds-ns2.c2
-rw-r--r--drivers/md/bitmap.c3
-rw-r--r--drivers/md/bitmap.h3
-rw-r--r--drivers/md/dm-log-userspace-transfer.c2
-rw-r--r--drivers/md/dm-mpath.c4
-rw-r--r--drivers/md/dm-raid.c4
-rw-r--r--drivers/md/dm-thin.c48
-rw-r--r--drivers/md/md.c9
-rw-r--r--drivers/md/raid10.c56
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c4
-rw-r--r--drivers/media/rc/ene_ir.c32
-rw-r--r--drivers/media/rc/fintek-cir.c22
-rw-r--r--drivers/media/rc/ite-cir.c20
-rw-r--r--drivers/media/rc/nuvoton-cir.c36
-rw-r--r--drivers/media/rc/winbond-cir.c78
-rw-r--r--drivers/media/video/gspca/sonixj.c8
-rw-r--r--drivers/media/video/marvell-ccic/mmp-driver.c1
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c33
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c4
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.h2
-rw-r--r--drivers/media/video/soc_camera.c8
-rw-r--r--drivers/media/video/videobuf2-dma-contig.c3
-rw-r--r--drivers/media/video/videobuf2-memops.c1
-rw-r--r--drivers/message/fusion/mptlan.h1
-rw-r--r--drivers/mfd/omap-usb-host.c1
-rw-r--r--drivers/mmc/host/mmci.c18
-rw-r--r--drivers/mmc/host/mxs-mmc.c3
-rw-r--r--drivers/mtd/mtdchar.c2
-rw-r--r--drivers/mtd/nand/ams-delta.c17
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c1
-rw-r--r--drivers/net/Kconfig7
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/Space.c62
-rw-r--r--drivers/net/arcnet/arc-rimi.c8
-rw-r--r--drivers/net/bonding/bond_3ad.c18
-rw-r--r--drivers/net/bonding/bond_3ad.h2
-rw-r--r--drivers/net/bonding/bond_alb.c70
-rw-r--r--drivers/net/bonding/bond_main.c70
-rw-r--r--drivers/net/bonding/bonding.h2
-rw-r--r--drivers/net/caif/caif_hsi.c359
-rw-r--r--drivers/net/caif/caif_shmcore.c4
-rw-r--r--drivers/net/can/dev.c31
-rw-r--r--drivers/net/can/pch_can.c12
-rw-r--r--drivers/net/can/sja1000/ems_pci.c14
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c13
-rw-r--r--drivers/net/can/sja1000/peak_pci.c12
-rw-r--r--drivers/net/can/sja1000/plx_pci.c13
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c2
-rw-r--r--drivers/net/dummy.c6
-rw-r--r--drivers/net/ethernet/3com/3c509.c123
-rw-r--r--drivers/net/ethernet/8390/Kconfig25
-rw-r--r--drivers/net/ethernet/8390/Makefile1
-rw-r--r--drivers/net/ethernet/8390/ax88796.c1
-rw-r--r--drivers/net/ethernet/8390/etherh.c1
-rw-r--r--drivers/net/ethernet/8390/ne2.c798
-rw-r--r--drivers/net/ethernet/8390/smc-mca.c575
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c54
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c20
-rw-r--r--drivers/net/ethernet/amd/ariadne.c8
-rw-r--r--drivers/net/ethernet/amd/atarilance.c11
-rw-r--r--drivers/net/ethernet/amd/depca.c213
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h59
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c9
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c569
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.h983
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c1007
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c17
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c181
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.h20
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c46
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h45
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c474
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h383
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h268
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h219
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c745
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c1134
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c114
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h39
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c273
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h15
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c65
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c61
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c142
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_reg.h6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c316
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h11
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c6
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c535
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.h1
-rw-r--r--drivers/net/ethernet/cadence/macb.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c92
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2221
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c34
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.c2
-rw-r--r--drivers/net/ethernet/davicom/Kconfig2
-rw-r--r--drivers/net/ethernet/dec/ewrk3.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c34
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c301
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c27
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c443
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c17
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c280
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c468
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h26
-rw-r--r--drivers/net/ethernet/dlink/sundance.c12
-rw-r--r--drivers/net/ethernet/dnet.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h55
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c166
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h101
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c326
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h76
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c495
-rw-r--r--drivers/net/ethernet/fealnx.c14
-rw-r--r--drivers/net/ethernet/freescale/fec.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c13
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c30
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c6
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c1
-rw-r--r--drivers/net/ethernet/fujitsu/at1700.c120
-rw-r--r--drivers/net/ethernet/i825xx/3c523.c1312
-rw-r--r--drivers/net/ethernet/i825xx/3c523.h355
-rw-r--r--drivers/net/ethernet/i825xx/3c527.c1660
-rw-r--r--drivers/net/ethernet/i825xx/3c527.h81
-rw-r--r--drivers/net/ethernet/i825xx/Kconfig22
-rw-r--r--drivers/net/ethernet/i825xx/Makefile2
-rw-r--r--drivers/net/ethernet/i825xx/eexpress.c60
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c62
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_phyp.h2
-rw-r--r--drivers/net/ethernet/intel/Kconfig32
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c68
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c41
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h8
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h51
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c88
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h72
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c795
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c12
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c186
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c103
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c115
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c276
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h35
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h14
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c603
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h76
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c147
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h14
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h30
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c141
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c323
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c385
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h68
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c92
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c831
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c69
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c104
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c134
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c124
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c45
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c478
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c900
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c245
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h97
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c12
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c1
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c1
-rw-r--r--drivers/net/ethernet/marvell/sky2.c35
-rw-r--r--drivers/net/ethernet/marvell/sky2.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c255
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c271
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c28
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c7
-rw-r--r--drivers/net/ethernet/natsemi/Kconfig20
-rw-r--r--drivers/net/ethernet/natsemi/Makefile1
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c67
-rw-r--r--drivers/net/ethernet/neterion/s2io.c14
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c24
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h15
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c9
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c6
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c89
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c11
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c32
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h20
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c18
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h26
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c140
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h63
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c73
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c208
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c56
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c76
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c31
-rw-r--r--drivers/net/ethernet/realtek/8139too.c136
-rw-r--r--drivers/net/ethernet/realtek/r8169.c714
-rw-r--r--drivers/net/ethernet/renesas/Kconfig7
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c114
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h5
-rw-r--r--drivers/net/ethernet/s6gmac.c2
-rw-r--r--drivers/net/ethernet/sfc/efx.c38
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c37
-rw-r--r--drivers/net/ethernet/sfc/mcdi_phy.c76
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h8
-rw-r--r--drivers/net/ethernet/sfc/qt202x_phy.c33
-rw-r--r--drivers/net/ethernet/sfc/rx.c31
-rw-r--r--drivers/net/ethernet/silan/sc92031.c34
-rw-r--r--drivers/net/ethernet/sis/sis190.c26
-rw-r--r--drivers/net/ethernet/sis/sis900.c375
-rw-r--r--drivers/net/ethernet/smsc/epic100.c403
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c18
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h50
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c163
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c18
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c6
-rw-r--r--drivers/net/ethernet/sun/sunhme.c18
-rw-r--r--drivers/net/ethernet/sun/sunhme.h1
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c13
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c3
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c5
-rw-r--r--drivers/net/ethernet/ti/tlan.c4
-rw-r--r--drivers/net/ethernet/tile/tilepro.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c8
-rw-r--r--drivers/net/ethernet/via/via-rhine.c12
-rw-r--r--drivers/net/ethernet/via/via-velocity.c9
-rw-r--r--drivers/net/ethernet/wiznet/Kconfig73
-rw-r--r--drivers/net/ethernet/wiznet/Makefile2
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c808
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c720
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c6
-rw-r--r--drivers/net/ethernet/xscale/Kconfig6
-rw-r--r--drivers/net/ethernet/xscale/Makefile1
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/Kconfig6
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/Makefile3
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/caleb.c136
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/caleb.h22
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/enp2611.c232
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c212
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h115
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc408
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode130
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc272
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode98
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixpdev.c437
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixpdev.h29
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h57
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/pm3386.c351
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/pm3386.h29
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c29
-rw-r--r--drivers/net/hippi/rrunner.c83
-rw-r--r--drivers/net/hyperv/hyperv_net.h290
-rw-r--r--drivers/net/hyperv/netvsc.c41
-rw-r--r--drivers/net/hyperv/netvsc_drv.c44
-rw-r--r--drivers/net/hyperv/rndis_filter.c46
-rw-r--r--drivers/net/irda/Kconfig4
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/sh_irda.c2
-rw-r--r--drivers/net/irda/sh_sir.c2
-rw-r--r--drivers/net/irda/smsc-ircc2.c1
-rw-r--r--drivers/net/macvlan.c85
-rw-r--r--drivers/net/macvtap.c100
-rw-r--r--drivers/net/phy/Kconfig19
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/bcm63xx.c5
-rw-r--r--drivers/net/phy/davicom.c7
-rw-r--r--drivers/net/phy/dp83640.c31
-rw-r--r--drivers/net/phy/icplus.c12
-rw-r--r--drivers/net/phy/marvell.c18
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c142
-rw-r--r--drivers/net/phy/mdio-mux.c192
-rw-r--r--drivers/net/phy/mdio_bus.c32
-rw-r--r--drivers/net/phy/phy_device.c3
-rw-r--r--drivers/net/phy/spi_ks8995.c1
-rw-r--r--drivers/net/ppp/ppp_async.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c29
-rw-r--r--drivers/net/ppp/ppp_synctty.c4
-rw-r--r--drivers/net/ppp/pppoe.c18
-rw-r--r--drivers/net/ppp/pptp.c6
-rw-r--r--drivers/net/team/Kconfig11
-rw-r--r--drivers/net/team/Makefile1
-rw-r--r--drivers/net/team/team.c523
-rw-r--r--drivers/net/team/team_mode_activebackup.c20
-rw-r--r--drivers/net/team/team_mode_loadbalance.c174
-rw-r--r--drivers/net/team/team_mode_roundrobin.c2
-rw-r--r--drivers/net/tokenring/3c359.c1843
-rw-r--r--drivers/net/tokenring/3c359.h291
-rw-r--r--drivers/net/tokenring/Kconfig199
-rw-r--r--drivers/net/tokenring/Makefile16
-rw-r--r--drivers/net/tokenring/abyss.c468
-rw-r--r--drivers/net/tokenring/abyss.h58
-rw-r--r--drivers/net/tokenring/ibmtr.c1964
-rw-r--r--drivers/net/tokenring/ibmtr_cs.c370
-rw-r--r--drivers/net/tokenring/lanstreamer.c1917
-rw-r--r--drivers/net/tokenring/lanstreamer.h343
-rw-r--r--drivers/net/tokenring/madgemc.c761
-rw-r--r--drivers/net/tokenring/madgemc.h70
-rw-r--r--drivers/net/tokenring/olympic.c1749
-rw-r--r--drivers/net/tokenring/olympic.h321
-rw-r--r--drivers/net/tokenring/proteon.c422
-rw-r--r--drivers/net/tokenring/skisa.c432
-rw-r--r--drivers/net/tokenring/smctr.c5717
-rw-r--r--drivers/net/tokenring/smctr.h1585
-rw-r--r--drivers/net/tokenring/tms380tr.c2306
-rw-r--r--drivers/net/tokenring/tms380tr.h1141
-rw-r--r--drivers/net/tokenring/tmspci.c248
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/asix.c4
-rw-r--r--drivers/net/usb/cdc_ether.c86
-rw-r--r--drivers/net/usb/qmi_wwan.c69
-rw-r--r--drivers/net/usb/rndis_host.c83
-rw-r--r--drivers/net/usb/smsc75xx.c58
-rw-r--r--drivers/net/usb/smsc75xx.h1
-rw-r--r--drivers/net/usb/smsc95xx.c3
-rw-r--r--drivers/net/usb/usbnet.c60
-rw-r--r--drivers/net/virtio_net.c71
-rw-r--r--drivers/net/wan/dscc4.c13
-rw-r--r--drivers/net/wan/farsync.c1
-rw-r--r--drivers/net/wan/lmc/lmc_main.c15
-rw-r--r--drivers/net/wimax/i2400m/Kconfig3
-rw-r--r--drivers/net/wimax/i2400m/usb-rx.c2
-rw-r--r--drivers/net/wimax/i2400m/usb.c2
-rw-r--r--drivers/net/wireless/Kconfig3
-rw-r--r--drivers/net/wireless/Makefile4
-rw-r--r--drivers/net/wireless/adm8211.c17
-rw-r--r--drivers/net/wireless/at76c50x-usb.c12
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c8
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c44
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h31
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c28
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c17
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/initvals.c5
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c29
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c9
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c10
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/sysfs.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c419
-rw-r--r--drivers/net/wireless/ath/ath6kl/common.h4
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c30
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h34
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif-ops.h34
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h6
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc-ops.h113
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h98
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c (renamed from drivers/net/wireless/ath/ath6kl/htc.c)85
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c1713
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c59
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.c5
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c25
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c785
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c80
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h40
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c57
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c116
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c97
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c170
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h44
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c84
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c46
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.h45
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c300
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h104
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pri_detector.c452
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pri_detector.h52
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c178
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h22
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c174
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c10
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.h6
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/main.c4
-rw-r--r--drivers/net/wireless/ath/regd.c4
-rw-r--r--drivers/net/wireless/atmel.c3
-rw-r--r--drivers/net/wireless/atmel_pci.c13
-rw-r--r--drivers/net/wireless/b43/main.c26
-rw-r--r--drivers/net/wireless/b43/sdio.c2
-rw-r--r--drivers/net/wireless/b43/xmit.c5
-rw-r--r--drivers/net/wireless/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c15
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig9
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c97
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c113
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c127
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c109
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h22
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c36
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/d11.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c11
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c41
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h40
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c16
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c16
-rw-r--r--drivers/net/wireless/ipw2x00/ipw.h23
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c166
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.h10
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c70
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h55
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c16
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c4
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c6
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c2
-rw-r--r--drivers/net/wireless/iwlegacy/common.c14
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig33
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c132
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c157
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c293
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c257
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-devices.c755
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c317
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c84
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h36
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rx.c348
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c688
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c153
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c191
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1270
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h221
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h (renamed from drivers/net/wireless/iwlwifi/iwl-shared.h)282
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c1480
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h234
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h34
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c509
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h192
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c233
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c246
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h67
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-mac80211.c231
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h126
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.c44
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.h21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-pci.c65
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c288
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.h129
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c85
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c74
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.c105
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h233
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c549
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c334
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c571
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h164
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-ucode.c172
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig2
-rw-r--r--drivers/net/wireless/libertas/Makefile1
-rw-r--r--drivers/net/wireless/libertas/cfg.c9
-rw-r--r--drivers/net/wireless/libertas/decl.h11
-rw-r--r--drivers/net/wireless/libertas/dev.h10
-rw-r--r--drivers/net/wireless/libertas/firmware.c224
-rw-r--r--drivers/net/wireless/libertas/if_cs.c90
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c229
-rw-r--r--drivers/net/wireless/libertas/if_spi.c11
-rw-r--r--drivers/net/wireless/libertas/if_usb.c265
-rw-r--r--drivers/net/wireless/libertas/main.c117
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c57
-rw-r--r--drivers/net/wireless/mwifiex/11n.c17
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c36
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig15
-rw-r--r--drivers/net/wireless/mwifiex/Makefile3
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c112
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c31
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c63
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c2
-rw-r--r--drivers/net/wireless/mwifiex/decl.h1
-rw-r--r--drivers/net/wireless/mwifiex/fw.h34
-rw-r--r--drivers/net/wireless/mwifiex/init.c66
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h52
-rw-r--r--drivers/net/wireless/mwifiex/join.c64
-rw-r--r--drivers/net/wireless/mwifiex/main.c132
-rw-r--r--drivers/net/wireless/mwifiex/main.h54
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c3
-rw-r--r--drivers/net/wireless/mwifiex/pcie.h18
-rw-r--r--drivers/net/wireless/mwifiex/scan.c80
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c10
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h9
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c100
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c80
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c15
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c120
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c15
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c12
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c21
-rw-r--r--drivers/net/wireless/mwifiex/usb.c1052
-rw-r--r--drivers/net/wireless/mwifiex/usb.h99
-rw-r--r--drivers/net/wireless/mwifiex/util.c22
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c18
-rw-r--r--drivers/net/wireless/mwl8k.c15
-rw-r--r--drivers/net/wireless/orinoco/fw.c7
-rw-r--r--drivers/net/wireless/p54/main.c11
-rw-r--r--drivers/net/wireless/p54/p54.h1
-rw-r--r--drivers/net/wireless/p54/p54pci.c13
-rw-r--r--drivers/net/wireless/p54/p54usb.c197
-rw-r--r--drivers/net/wireless/p54/p54usb.h3
-rw-r--r--drivers/net/wireless/p54/txrx.c5
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c6
-rw-r--r--drivers/net/wireless/rndis_wlan.c373
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c55
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c28
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c25
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c82
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.c16
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c47
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c13
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c1
-rw-r--r--drivers/net/wireless/rtlwifi/base.c2
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c5
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c19
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c290
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h35
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.h35
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c19
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.h7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/def.h16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c185
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.h51
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.h8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/def.h7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.c156
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.h44
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/fw.h6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c19
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c11
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c10
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h87
-rw-r--r--drivers/net/wireless/ti/Kconfig14
-rw-r--r--drivers/net/wireless/ti/Makefile4
-rw-r--r--drivers/net/wireless/ti/wl1251/Kconfig (renamed from drivers/net/wireless/wl1251/Kconfig)0
-rw-r--r--drivers/net/wireless/ti/wl1251/Makefile (renamed from drivers/net/wireless/wl1251/Makefile)0
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c (renamed from drivers/net/wireless/wl1251/acx.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.h (renamed from drivers/net/wireless/wl1251/acx.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/boot.c (renamed from drivers/net/wireless/wl1251/boot.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/boot.h (renamed from drivers/net/wireless/wl1251/boot.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c (renamed from drivers/net/wireless/wl1251/cmd.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.h (renamed from drivers/net/wireless/wl1251/cmd.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/debugfs.c (renamed from drivers/net/wireless/wl1251/debugfs.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/debugfs.h (renamed from drivers/net/wireless/wl1251/debugfs.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c (renamed from drivers/net/wireless/wl1251/event.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/event.h (renamed from drivers/net/wireless/wl1251/event.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/init.c (renamed from drivers/net/wireless/wl1251/init.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/init.h (renamed from drivers/net/wireless/wl1251/init.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/io.c (renamed from drivers/net/wireless/wl1251/io.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/io.h (renamed from drivers/net/wireless/wl1251/io.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c (renamed from drivers/net/wireless/wl1251/main.c)1
-rw-r--r--drivers/net/wireless/ti/wl1251/ps.c (renamed from drivers/net/wireless/wl1251/ps.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/ps.h (renamed from drivers/net/wireless/wl1251/ps.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/reg.h (renamed from drivers/net/wireless/wl1251/reg.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/rx.c (renamed from drivers/net/wireless/wl1251/rx.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/rx.h (renamed from drivers/net/wireless/wl1251/rx.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c (renamed from drivers/net/wireless/wl1251/sdio.c)2
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c (renamed from drivers/net/wireless/wl1251/spi.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.h (renamed from drivers/net/wireless/wl1251/spi.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/tx.c (renamed from drivers/net/wireless/wl1251/tx.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/tx.h (renamed from drivers/net/wireless/wl1251/tx.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h (renamed from drivers/net/wireless/wl1251/wl1251.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/wl12xx_80211.h (renamed from drivers/net/wireless/wl1251/wl12xx_80211.h)0
-rw-r--r--drivers/net/wireless/ti/wl12xx/Kconfig8
-rw-r--r--drivers/net/wireless/ti/wl12xx/Makefile3
-rw-r--r--drivers/net/wireless/ti/wl12xx/acx.c53
-rw-r--r--drivers/net/wireless/ti/wl12xx/acx.h36
-rw-r--r--drivers/net/wireless/ti/wl12xx/cmd.c254
-rw-r--r--drivers/net/wireless/ti/wl12xx/cmd.h112
-rw-r--r--drivers/net/wireless/ti/wl12xx/conf.h50
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c1388
-rw-r--r--drivers/net/wireless/ti/wl12xx/reg.h (renamed from drivers/net/wireless/wl12xx/reg.h)315
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h31
-rw-r--r--drivers/net/wireless/ti/wlcore/Kconfig41
-rw-r--r--drivers/net/wireless/ti/wlcore/Makefile15
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c (renamed from drivers/net/wireless/wl12xx/acx.c)42
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h (renamed from drivers/net/wireless/wl12xx/acx.h)10
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.c443
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.h54
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c (renamed from drivers/net/wireless/wl12xx/cmd.c)285
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h (renamed from drivers/net/wireless/wl12xx/cmd.h)98
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h (renamed from drivers/net/wireless/wl12xx/conf.h)85
-rw-r--r--drivers/net/wireless/ti/wlcore/debug.h (renamed from drivers/net/wireless/wl12xx/debug.h)1
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c (renamed from drivers/net/wireless/wl12xx/debugfs.c)3
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.h (renamed from drivers/net/wireless/wl12xx/debugfs.h)2
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c (renamed from drivers/net/wireless/wl12xx/event.c)31
-rw-r--r--drivers/net/wireless/ti/wlcore/event.h (renamed from drivers/net/wireless/wl12xx/event.h)3
-rw-r--r--drivers/net/wireless/ti/wlcore/hw_ops.h122
-rw-r--r--drivers/net/wireless/ti/wlcore/ini.h (renamed from drivers/net/wireless/wl12xx/ini.h)0
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c (renamed from drivers/net/wireless/wl12xx/init.c)66
-rw-r--r--drivers/net/wireless/ti/wlcore/init.h (renamed from drivers/net/wireless/wl12xx/init.h)2
-rw-r--r--drivers/net/wireless/ti/wlcore/io.c (renamed from drivers/net/wireless/wl12xx/io.c)191
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h (renamed from drivers/net/wireless/wl12xx/io.h)88
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c (renamed from drivers/net/wireless/wl12xx/main.c)824
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c (renamed from drivers/net/wireless/wl12xx/ps.c)8
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.h (renamed from drivers/net/wireless/wl12xx/ps.h)2
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c (renamed from drivers/net/wireless/wl12xx/rx.c)130
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h (renamed from drivers/net/wireless/wl12xx/rx.h)12
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c (renamed from drivers/net/wireless/wl12xx/scan.c)30
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.h (renamed from drivers/net/wireless/wl12xx/scan.h)4
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c (renamed from drivers/net/wireless/wl12xx/sdio.c)6
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c (renamed from drivers/net/wireless/wl12xx/spi.c)4
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c (renamed from drivers/net/wireless/wl12xx/testmode.c)12
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.h (renamed from drivers/net/wireless/wl12xx/testmode.h)0
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c (renamed from drivers/net/wireless/wl12xx/tx.c)125
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h (renamed from drivers/net/wireless/wl12xx/tx.h)7
-rw-r--r--drivers/net/wireless/ti/wlcore/wl12xx.h (renamed from drivers/net/wireless/wl12xx/wl12xx.h)271
-rw-r--r--drivers/net/wireless/ti/wlcore/wl12xx_80211.h (renamed from drivers/net/wireless/wl12xx/wl12xx_80211.h)0
-rw-r--r--drivers/net/wireless/ti/wlcore/wl12xx_platform_data.c (renamed from drivers/net/wireless/wl12xx/wl12xx_platform_data.c)0
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h448
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig48
-rw-r--r--drivers/net/wireless/wl12xx/Makefile15
-rw-r--r--drivers/net/wireless/wl12xx/boot.c786
-rw-r--r--drivers/net/wireless/wl12xx/boot.h120
-rw-r--r--drivers/nfc/pn533.c228
-rw-r--r--drivers/of/of_mdio.c2
-rw-r--r--drivers/parisc/sba_iommu.c1
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/pci/quirks.c12
-rw-r--r--drivers/platform/x86/acerhdf.c67
-rw-r--r--drivers/platform/x86/dell-laptop.c1
-rw-r--r--drivers/platform/x86/intel_ips.c2
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c2
-rw-r--r--drivers/ptp/Kconfig10
-rw-r--r--drivers/ptp/ptp_clock.c6
-rw-r--r--drivers/ptp/ptp_ixp46x.c3
-rw-r--r--drivers/ptp/ptp_pch.c8
-rw-r--r--drivers/regulator/core.c5
-rw-r--r--drivers/regulator/max8997.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c1
-rw-r--r--drivers/rtc/rtc-mpc5121.c3
-rw-r--r--drivers/rtc/rtc-pl031.c18
-rw-r--r--drivers/s390/char/sclp_cmd.c12
-rw-r--r--drivers/s390/char/tape.h43
-rw-r--r--drivers/s390/char/tape_34xx.c136
-rw-r--r--drivers/s390/char/tape_3590.c105
-rw-r--r--drivers/s390/char/tape_char.c13
-rw-r--r--drivers/s390/char/tape_core.c16
-rw-r--r--drivers/s390/cio/ccwgroup.c112
-rw-r--r--drivers/s390/cio/cio.c73
-rw-r--r--drivers/s390/cio/device.c13
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/qdio_main.c47
-rw-r--r--drivers/s390/crypto/ap_bus.c24
-rw-r--r--drivers/s390/crypto/ap_bus.h7
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c3
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c3
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c3
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c5
-rw-r--r--drivers/s390/net/Kconfig5
-rw-r--r--drivers/s390/net/claw.c165
-rw-r--r--drivers/s390/net/ctcm_main.c52
-rw-r--r--drivers/s390/net/ctcm_main.h8
-rw-r--r--drivers/s390/net/ctcm_sysfs.c37
-rw-r--r--drivers/s390/net/lcs.c73
-rw-r--r--drivers/s390/net/qeth_core.h28
-rw-r--r--drivers/s390/net/qeth_core_main.c192
-rw-r--r--drivers/s390/net/qeth_core_mpc.h10
-rw-r--r--drivers/s390/net/qeth_core_sys.c49
-rw-r--r--drivers/s390/net/qeth_l2_main.c16
-rw-r--r--drivers/s390/net/qeth_l3_main.c110
-rw-r--r--drivers/s390/net/qeth_l3_sys.c112
-rw-r--r--drivers/scsi/atari_scsi.c26
-rw-r--r--drivers/scsi/atari_scsi.h5
-rw-r--r--drivers/scsi/hosts.c3
-rw-r--r--drivers/scsi/ipr.c6
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/libfc/fc_lport.c12
-rw-r--r--drivers/scsi/libsas/sas_ata.c33
-rw-r--r--drivers/scsi/libsas/sas_discover.c61
-rw-r--r--drivers/scsi/libsas/sas_event.c24
-rw-r--r--drivers/scsi/libsas/sas_expander.c56
-rw-r--r--drivers/scsi/libsas/sas_init.c11
-rw-r--r--drivers/scsi/libsas/sas_internal.h6
-rw-r--r--drivers/scsi/libsas/sas_phy.c21
-rw-r--r--drivers/scsi/libsas/sas_port.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/virtio_scsi.c24
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-bcm63xx.c163
-rw-r--r--drivers/spi/spi-bfin-sport.c21
-rw-r--r--drivers/spi/spi-bfin5xx.c14
-rw-r--r--drivers/spi/spi-ep93xx.c24
-rw-r--r--drivers/spi/spi-pl022.c58
-rw-r--r--drivers/staging/octeon/ethernet-rx.c1
-rw-r--r--drivers/staging/octeon/ethernet-tx.c3
-rw-r--r--drivers/staging/octeon/ethernet.c1
-rw-r--r--drivers/staging/ozwpan/ozpd.c2
-rw-r--r--drivers/staging/ramster/cluster/tcp.c2
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c20
-rw-r--r--drivers/staging/tidspbridge/core/wdt.c8
-rw-r--r--drivers/staging/zcache/Kconfig2
-rw-r--r--drivers/target/target_core_file.c22
-rw-r--r--drivers/target/target_core_pr.c3
-rw-r--r--drivers/target/target_core_tpg.c22
-rw-r--r--drivers/tty/serial/pmac_zilog.c6
-rw-r--r--drivers/tty/vt/keyboard.c28
-rw-r--r--drivers/usb/class/cdc-wdm.c7
-rw-r--r--drivers/usb/core/hcd-pci.c9
-rw-r--r--drivers/usb/gadget/dummy_hcd.c1
-rw-r--r--drivers/usb/gadget/f_mass_storage.c2
-rw-r--r--drivers/usb/gadget/f_rndis.c6
-rw-r--r--drivers/usb/gadget/file_storage.c2
-rw-r--r--drivers/usb/gadget/ndis.h164
-rw-r--r--drivers/usb/gadget/rndis.c271
-rw-r--r--drivers/usb/gadget/rndis.h48
-rw-r--r--drivers/usb/gadget/udc-core.c4
-rw-r--r--drivers/usb/gadget/uvc.h2
-rw-r--r--drivers/usb/gadget/uvc_v4l2.c2
-rw-r--r--drivers/usb/host/ehci-pci.c8
-rw-r--r--drivers/usb/host/ehci-tegra.c376
-rw-r--r--drivers/usb/musb/davinci.c3
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musb_io.h2
-rw-r--r--drivers/usb/otg/gpio_vbus.c15
-rw-r--r--drivers/vhost/net.c16
-rw-r--r--drivers/vhost/vhost.c6
-rw-r--r--drivers/vhost/vhost.h2
-rw-r--r--drivers/video/bfin-lq035q1-fb.c1
-rw-r--r--drivers/video/console/sticore.c2
-rw-r--r--drivers/video/uvesafb.c2
-rw-r--r--drivers/video/xen-fbfront.c27
-rw-r--r--drivers/virtio/virtio_balloon.c1
-rw-r--r--drivers/watchdog/hpwdt.c6
-rw-r--r--drivers/xen/Kconfig22
-rw-r--r--drivers/xen/events.c2
-rw-r--r--drivers/xen/xen-acpi-processor.c5
1027 files changed, 43722 insertions, 56429 deletions
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 3263b68cdfa3..3188da3df8da 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -250,6 +250,10 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
250 return -ENODEV; 250 return -ENODEV;
251 } 251 }
252 252
253 /* For D3cold we should execute _PS3, not _PS4. */
254 if (state == ACPI_STATE_D3_COLD)
255 object_name[3] = '3';
256
253 /* 257 /*
254 * Transition Power 258 * Transition Power
255 * ---------------- 259 * ----------------
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 7049a7d27c4f..0500f719f63e 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -631,7 +631,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
631 * We know a device's inferred power state when all the resources 631 * We know a device's inferred power state when all the resources
632 * required for a given D-state are 'on'. 632 * required for a given D-state are 'on'.
633 */ 633 */
634 for (i = ACPI_STATE_D0; i < ACPI_STATE_D3; i++) { 634 for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) {
635 list = &device->power.states[i].resources; 635 list = &device->power.states[i].resources;
636 if (list->count < 1) 636 if (list->count < 1)
637 continue; 637 continue;
@@ -660,7 +660,7 @@ int acpi_power_on_resources(struct acpi_device *device, int state)
660 660
661int acpi_power_transition(struct acpi_device *device, int state) 661int acpi_power_transition(struct acpi_device *device, int state)
662{ 662{
663 int result; 663 int result = 0;
664 664
665 if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD)) 665 if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
666 return -EINVAL; 666 return -EINVAL;
@@ -679,8 +679,11 @@ int acpi_power_transition(struct acpi_device *device, int state)
679 * (e.g. so the device doesn't lose power while transitioning). Then, 679 * (e.g. so the device doesn't lose power while transitioning). Then,
680 * we dereference all power resources used in the current list. 680 * we dereference all power resources used in the current list.
681 */ 681 */
682 result = acpi_power_on_list(&device->power.states[state].resources); 682 if (state < ACPI_STATE_D3_COLD)
683 if (!result) 683 result = acpi_power_on_list(
684 &device->power.states[state].resources);
685
686 if (!result && device->power.state < ACPI_STATE_D3_COLD)
684 acpi_power_off_list( 687 acpi_power_off_list(
685 &device->power.states[device->power.state].resources); 688 &device->power.states[device->power.state].resources);
686 689
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 767e2dcb9616..85cbfdccc97c 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -869,7 +869,7 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
869 /* 869 /*
870 * Enumerate supported power management states 870 * Enumerate supported power management states
871 */ 871 */
872 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3; i++) { 872 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
873 struct acpi_device_power_state *ps = &device->power.states[i]; 873 struct acpi_device_power_state *ps = &device->power.states[i];
874 char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' }; 874 char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' };
875 875
@@ -884,21 +884,18 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
884 acpi_bus_add_power_resource(ps->resources.handles[j]); 884 acpi_bus_add_power_resource(ps->resources.handles[j]);
885 } 885 }
886 886
887 /* The exist of _PR3 indicates D3Cold support */
888 if (i == ACPI_STATE_D3) {
889 status = acpi_get_handle(device->handle, object_name, &handle);
890 if (ACPI_SUCCESS(status))
891 device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
892 }
893
894 /* Evaluate "_PSx" to see if we can do explicit sets */ 887 /* Evaluate "_PSx" to see if we can do explicit sets */
895 object_name[2] = 'S'; 888 object_name[2] = 'S';
896 status = acpi_get_handle(device->handle, object_name, &handle); 889 status = acpi_get_handle(device->handle, object_name, &handle);
897 if (ACPI_SUCCESS(status)) 890 if (ACPI_SUCCESS(status))
898 ps->flags.explicit_set = 1; 891 ps->flags.explicit_set = 1;
899 892
900 /* State is valid if we have some power control */ 893 /*
901 if (ps->resources.count || ps->flags.explicit_set) 894 * State is valid if there are means to put the device into it.
895 * D3hot is only valid if _PR3 present.
896 */
897 if (ps->resources.count ||
898 (ps->flags.explicit_set && i < ACPI_STATE_D3_HOT))
902 ps->flags.valid = 1; 899 ps->flags.valid = 1;
903 900
904 ps->power = -1; /* Unknown - driver assigned */ 901 ps->power = -1; /* Unknown - driver assigned */
@@ -911,6 +908,10 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
911 device->power.states[ACPI_STATE_D3].flags.valid = 1; 908 device->power.states[ACPI_STATE_D3].flags.valid = 1;
912 device->power.states[ACPI_STATE_D3].power = 0; 909 device->power.states[ACPI_STATE_D3].power = 0;
913 910
911 /* Set D3cold's explicit_set flag if _PS3 exists. */
912 if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set)
913 device->power.states[ACPI_STATE_D3_COLD].flags.explicit_set = 1;
914
914 acpi_bus_init_power(device); 915 acpi_bus_init_power(device);
915 916
916 return 0; 917 return 0;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 1d661b5c3287..eb6fd233764b 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -28,23 +28,33 @@
28#include "internal.h" 28#include "internal.h"
29#include "sleep.h" 29#include "sleep.h"
30 30
31u8 wake_sleep_flags = ACPI_NO_OPTIONAL_METHODS;
31static unsigned int gts, bfs; 32static unsigned int gts, bfs;
32module_param(gts, uint, 0644); 33static int set_param_wake_flag(const char *val, struct kernel_param *kp)
33module_param(bfs, uint, 0644);
34MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
35MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
36
37static u8 wake_sleep_flags(void)
38{ 34{
39 u8 flags = ACPI_NO_OPTIONAL_METHODS; 35 int ret = param_set_int(val, kp);
40 36
41 if (gts) 37 if (ret)
42 flags |= ACPI_EXECUTE_GTS; 38 return ret;
43 if (bfs)
44 flags |= ACPI_EXECUTE_BFS;
45 39
46 return flags; 40 if (kp->arg == (const char *)&gts) {
41 if (gts)
42 wake_sleep_flags |= ACPI_EXECUTE_GTS;
43 else
44 wake_sleep_flags &= ~ACPI_EXECUTE_GTS;
45 }
46 if (kp->arg == (const char *)&bfs) {
47 if (bfs)
48 wake_sleep_flags |= ACPI_EXECUTE_BFS;
49 else
50 wake_sleep_flags &= ~ACPI_EXECUTE_BFS;
51 }
52 return ret;
47} 53}
54module_param_call(gts, set_param_wake_flag, param_get_int, &gts, 0644);
55module_param_call(bfs, set_param_wake_flag, param_get_int, &bfs, 0644);
56MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
57MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
48 58
49static u8 sleep_states[ACPI_S_STATE_COUNT]; 59static u8 sleep_states[ACPI_S_STATE_COUNT];
50 60
@@ -263,7 +273,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
263{ 273{
264 acpi_status status = AE_OK; 274 acpi_status status = AE_OK;
265 u32 acpi_state = acpi_target_sleep_state; 275 u32 acpi_state = acpi_target_sleep_state;
266 u8 flags = wake_sleep_flags();
267 int error; 276 int error;
268 277
269 ACPI_FLUSH_CPU_CACHE(); 278 ACPI_FLUSH_CPU_CACHE();
@@ -271,7 +280,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
271 switch (acpi_state) { 280 switch (acpi_state) {
272 case ACPI_STATE_S1: 281 case ACPI_STATE_S1:
273 barrier(); 282 barrier();
274 status = acpi_enter_sleep_state(acpi_state, flags); 283 status = acpi_enter_sleep_state(acpi_state, wake_sleep_flags);
275 break; 284 break;
276 285
277 case ACPI_STATE_S3: 286 case ACPI_STATE_S3:
@@ -286,7 +295,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
286 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); 295 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
287 296
288 /* Reprogram control registers and execute _BFS */ 297 /* Reprogram control registers and execute _BFS */
289 acpi_leave_sleep_state_prep(acpi_state, flags); 298 acpi_leave_sleep_state_prep(acpi_state, wake_sleep_flags);
290 299
291 /* ACPI 3.0 specs (P62) says that it's the responsibility 300 /* ACPI 3.0 specs (P62) says that it's the responsibility
292 * of the OSPM to clear the status bit [ implying that the 301 * of the OSPM to clear the status bit [ implying that the
@@ -550,30 +559,27 @@ static int acpi_hibernation_begin(void)
550 559
551static int acpi_hibernation_enter(void) 560static int acpi_hibernation_enter(void)
552{ 561{
553 u8 flags = wake_sleep_flags();
554 acpi_status status = AE_OK; 562 acpi_status status = AE_OK;
555 563
556 ACPI_FLUSH_CPU_CACHE(); 564 ACPI_FLUSH_CPU_CACHE();
557 565
558 /* This shouldn't return. If it returns, we have a problem */ 566 /* This shouldn't return. If it returns, we have a problem */
559 status = acpi_enter_sleep_state(ACPI_STATE_S4, flags); 567 status = acpi_enter_sleep_state(ACPI_STATE_S4, wake_sleep_flags);
560 /* Reprogram control registers and execute _BFS */ 568 /* Reprogram control registers and execute _BFS */
561 acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags); 569 acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags);
562 570
563 return ACPI_SUCCESS(status) ? 0 : -EFAULT; 571 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
564} 572}
565 573
566static void acpi_hibernation_leave(void) 574static void acpi_hibernation_leave(void)
567{ 575{
568 u8 flags = wake_sleep_flags();
569
570 /* 576 /*
571 * If ACPI is not enabled by the BIOS and the boot kernel, we need to 577 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
572 * enable it here. 578 * enable it here.
573 */ 579 */
574 acpi_enable(); 580 acpi_enable();
575 /* Reprogram control registers and execute _BFS */ 581 /* Reprogram control registers and execute _BFS */
576 acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags); 582 acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags);
577 /* Check the hardware signature */ 583 /* Check the hardware signature */
578 if (facs && s4_hardware_signature != facs->hardware_signature) { 584 if (facs && s4_hardware_signature != facs->hardware_signature) {
579 printk(KERN_EMERG "ACPI: Hardware changed while hibernated, " 585 printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
@@ -828,12 +834,10 @@ static void acpi_power_off_prepare(void)
828 834
829static void acpi_power_off(void) 835static void acpi_power_off(void)
830{ 836{
831 u8 flags = wake_sleep_flags();
832
833 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ 837 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
834 printk(KERN_DEBUG "%s called\n", __func__); 838 printk(KERN_DEBUG "%s called\n", __func__);
835 local_irq_disable(); 839 local_irq_disable();
836 acpi_enter_sleep_state(ACPI_STATE_S5, flags); 840 acpi_enter_sleep_state(ACPI_STATE_S5, wake_sleep_flags);
837} 841}
838 842
839/* 843/*
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index cc273226dbd0..b7e728517284 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -527,9 +527,9 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
527 if (ret) 527 if (ret)
528 goto err_release; 528 goto err_release;
529 529
530 if (dev->irq[0] && dev->irq[0] != NO_IRQ) 530 if (dev->irq[0])
531 ret = device_create_file(&dev->dev, &dev_attr_irq0); 531 ret = device_create_file(&dev->dev, &dev_attr_irq0);
532 if (ret == 0 && dev->irq[1] && dev->irq[1] != NO_IRQ) 532 if (ret == 0 && dev->irq[1])
533 ret = device_create_file(&dev->dev, &dev_attr_irq1); 533 ret = device_create_file(&dev->dev, &dev_attr_irq1);
534 if (ret == 0) 534 if (ret == 0)
535 return ret; 535 return ret;
@@ -543,6 +543,55 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
543} 543}
544EXPORT_SYMBOL_GPL(amba_device_add); 544EXPORT_SYMBOL_GPL(amba_device_add);
545 545
546static struct amba_device *
547amba_aphb_device_add(struct device *parent, const char *name,
548 resource_size_t base, size_t size, int irq1, int irq2,
549 void *pdata, unsigned int periphid, u64 dma_mask)
550{
551 struct amba_device *dev;
552 int ret;
553
554 dev = amba_device_alloc(name, base, size);
555 if (!dev)
556 return ERR_PTR(-ENOMEM);
557
558 dev->dma_mask = dma_mask;
559 dev->dev.coherent_dma_mask = dma_mask;
560 dev->irq[0] = irq1;
561 dev->irq[1] = irq2;
562 dev->periphid = periphid;
563 dev->dev.platform_data = pdata;
564 dev->dev.parent = parent;
565
566 ret = amba_device_add(dev, &iomem_resource);
567 if (ret) {
568 amba_device_put(dev);
569 return ERR_PTR(ret);
570 }
571
572 return dev;
573}
574
575struct amba_device *
576amba_apb_device_add(struct device *parent, const char *name,
577 resource_size_t base, size_t size, int irq1, int irq2,
578 void *pdata, unsigned int periphid)
579{
580 return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
581 periphid, 0);
582}
583EXPORT_SYMBOL_GPL(amba_apb_device_add);
584
585struct amba_device *
586amba_ahb_device_add(struct device *parent, const char *name,
587 resource_size_t base, size_t size, int irq1, int irq2,
588 void *pdata, unsigned int periphid)
589{
590 return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
591 periphid, ~0ULL);
592}
593EXPORT_SYMBOL_GPL(amba_ahb_device_add);
594
546static void amba_device_initialize(struct amba_device *dev, const char *name) 595static void amba_device_initialize(struct amba_device *dev, const char *name)
547{ 596{
548 device_initialize(&dev->dev); 597 device_initialize(&dev->dev);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 79a1e9dd56d9..ebaf67e4b2bc 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -394,6 +394,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
394 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ 394 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
395 { PCI_DEVICE(0x1b4b, 0x9125), 395 { PCI_DEVICE(0x1b4b, 0x9125),
396 .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ 396 .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
397 { PCI_DEVICE(0x1b4b, 0x917a),
398 .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
397 { PCI_DEVICE(0x1b4b, 0x91a3), 399 { PCI_DEVICE(0x1b4b, 0x91a3),
398 .driver_data = board_ahci_yes_fbs }, 400 .driver_data = board_ahci_yes_fbs },
399 401
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 0c86c77764bc..9e419e1c2006 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -280,6 +280,7 @@ static struct dev_pm_ops ahci_pm_ops = {
280 280
281static const struct of_device_id ahci_of_match[] = { 281static const struct of_device_id ahci_of_match[] = {
282 { .compatible = "calxeda,hb-ahci", }, 282 { .compatible = "calxeda,hb-ahci", },
283 { .compatible = "snps,spear-ahci", },
283 {}, 284 {},
284}; 285};
285MODULE_DEVICE_TABLE(of, ahci_of_match); 286MODULE_DEVICE_TABLE(of, ahci_of_match);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 28db50b57b91..23763a1ec570 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -95,7 +95,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
95static void ata_dev_xfermask(struct ata_device *dev); 95static void ata_dev_xfermask(struct ata_device *dev);
96static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 96static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
97 97
98atomic_t ata_print_id = ATOMIC_INIT(1); 98atomic_t ata_print_id = ATOMIC_INIT(0);
99 99
100struct ata_force_param { 100struct ata_force_param {
101 const char *name; 101 const char *name;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c61316e9d2f7..d1fbd59ead16 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3501,7 +3501,8 @@ static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg
3501 u64 now = get_jiffies_64(); 3501 u64 now = get_jiffies_64();
3502 int *trials = void_arg; 3502 int *trials = void_arg;
3503 3503
3504 if (ent->timestamp < now - min(now, interval)) 3504 if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
3505 (ent->timestamp < now - min(now, interval)))
3505 return -1; 3506 return -1;
3506 3507
3507 (*trials)++; 3508 (*trials)++;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 93dabdcd2cbe..22226350cd0c 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3399,7 +3399,8 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
3399 */ 3399 */
3400 shost->max_host_blocked = 1; 3400 shost->max_host_blocked = 1;
3401 3401
3402 rc = scsi_add_host(ap->scsi_host, &ap->tdev); 3402 rc = scsi_add_host_with_dma(ap->scsi_host,
3403 &ap->tdev, ap->host->dev);
3403 if (rc) 3404 if (rc)
3404 goto err_add; 3405 goto err_add;
3405 } 3406 }
@@ -3838,18 +3839,25 @@ void ata_sas_port_stop(struct ata_port *ap)
3838} 3839}
3839EXPORT_SYMBOL_GPL(ata_sas_port_stop); 3840EXPORT_SYMBOL_GPL(ata_sas_port_stop);
3840 3841
3841int ata_sas_async_port_init(struct ata_port *ap) 3842/**
3843 * ata_sas_async_probe - simply schedule probing and return
3844 * @ap: Port to probe
3845 *
3846 * For batch scheduling of probe for sas attached ata devices, assumes
3847 * the port has already been through ata_sas_port_init()
3848 */
3849void ata_sas_async_probe(struct ata_port *ap)
3842{ 3850{
3843 int rc = ap->ops->port_start(ap); 3851 __ata_port_probe(ap);
3844 3852}
3845 if (!rc) { 3853EXPORT_SYMBOL_GPL(ata_sas_async_probe);
3846 ap->print_id = atomic_inc_return(&ata_print_id);
3847 __ata_port_probe(ap);
3848 }
3849 3854
3850 return rc; 3855int ata_sas_sync_probe(struct ata_port *ap)
3856{
3857 return ata_port_probe(ap);
3851} 3858}
3852EXPORT_SYMBOL_GPL(ata_sas_async_port_init); 3859EXPORT_SYMBOL_GPL(ata_sas_sync_probe);
3860
3853 3861
3854/** 3862/**
3855 * ata_sas_port_init - Initialize a SATA device 3863 * ata_sas_port_init - Initialize a SATA device
@@ -3866,12 +3874,10 @@ int ata_sas_port_init(struct ata_port *ap)
3866{ 3874{
3867 int rc = ap->ops->port_start(ap); 3875 int rc = ap->ops->port_start(ap);
3868 3876
3869 if (!rc) { 3877 if (rc)
3870 ap->print_id = atomic_inc_return(&ata_print_id); 3878 return rc;
3871 rc = ata_port_probe(ap); 3879 ap->print_id = atomic_inc_return(&ata_print_id);
3872 } 3880 return 0;
3873
3874 return rc;
3875} 3881}
3876EXPORT_SYMBOL_GPL(ata_sas_port_init); 3882EXPORT_SYMBOL_GPL(ata_sas_port_init);
3877 3883
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index fc2db2a89a6b..3239517f4d90 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -943,9 +943,9 @@ static int arasan_cf_resume(struct device *dev)
943 943
944 return 0; 944 return 0;
945} 945}
946#endif
946 947
947static SIMPLE_DEV_PM_OPS(arasan_cf_pm_ops, arasan_cf_suspend, arasan_cf_resume); 948static SIMPLE_DEV_PM_OPS(arasan_cf_pm_ops, arasan_cf_suspend, arasan_cf_resume);
948#endif
949 949
950static struct platform_driver arasan_cf_driver = { 950static struct platform_driver arasan_cf_driver = {
951 .probe = arasan_cf_probe, 951 .probe = arasan_cf_probe,
@@ -953,9 +953,7 @@ static struct platform_driver arasan_cf_driver = {
953 .driver = { 953 .driver = {
954 .name = DRIVER_NAME, 954 .name = DRIVER_NAME,
955 .owner = THIS_MODULE, 955 .owner = THIS_MODULE,
956#ifdef CONFIG_PM
957 .pm = &arasan_cf_pm_ops, 956 .pm = &arasan_cf_pm_ops,
958#endif
959 }, 957 },
960}; 958};
961 959
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index f8f41e0e8a8c..89b30f32ba68 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -802,7 +802,7 @@ static void fill_rx_pool (amb_dev * dev, unsigned char pool,
802 } 802 }
803 // cast needed as there is no %? for pointer differences 803 // cast needed as there is no %? for pointer differences
804 PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li", 804 PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li",
805 skb, skb->head, (long) (skb_end_pointer(skb) - skb->head)); 805 skb, skb->head, (long) skb_end_offset(skb));
806 rx.handle = virt_to_bus (skb); 806 rx.handle = virt_to_bus (skb);
807 rx.host_address = cpu_to_be32 (virt_to_bus (skb->data)); 807 rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
808 if (rx_give (dev, &rx, pool)) 808 if (rx_give (dev, &rx, pool))
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 75fd691cd43e..7d01c2a75256 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -2182,7 +2182,6 @@ static int hrz_open (struct atm_vcc *atm_vcc)
2182 default: 2182 default:
2183 PRINTD (DBG_QOS|DBG_VCC, "Bad AAL!"); 2183 PRINTD (DBG_QOS|DBG_VCC, "Bad AAL!");
2184 return -EINVAL; 2184 return -EINVAL;
2185 break;
2186 } 2185 }
2187 2186
2188 // TX traffic parameters 2187 // TX traffic parameters
@@ -2357,7 +2356,6 @@ static int hrz_open (struct atm_vcc *atm_vcc)
2357 default: { 2356 default: {
2358 PRINTD (DBG_QOS, "unsupported TX traffic class"); 2357 PRINTD (DBG_QOS, "unsupported TX traffic class");
2359 return -EINVAL; 2358 return -EINVAL;
2360 break;
2361 } 2359 }
2362 } 2360 }
2363 } 2361 }
@@ -2433,7 +2431,6 @@ static int hrz_open (struct atm_vcc *atm_vcc)
2433 default: { 2431 default: {
2434 PRINTD (DBG_QOS, "unsupported RX traffic class"); 2432 PRINTD (DBG_QOS, "unsupported RX traffic class");
2435 return -EINVAL; 2433 return -EINVAL;
2436 break;
2437 } 2434 }
2438 } 2435 }
2439 } 2436 }
@@ -2581,7 +2578,6 @@ static int hrz_getsockopt (struct atm_vcc * atm_vcc, int level, int optname,
2581// break; 2578// break;
2582 default: 2579 default:
2583 return -ENOPROTOOPT; 2580 return -ENOPROTOOPT;
2584 break;
2585 }; 2581 };
2586 break; 2582 break;
2587 } 2583 }
@@ -2601,7 +2597,6 @@ static int hrz_setsockopt (struct atm_vcc * atm_vcc, int level, int optname,
2601// break; 2597// break;
2602 default: 2598 default:
2603 return -ENOPROTOOPT; 2599 return -ENOPROTOOPT;
2604 break;
2605 }; 2600 };
2606 break; 2601 break;
2607 } 2602 }
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 1c052127548c..8974bd2b961e 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1258,7 +1258,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
1258 tail = readl(SAR_REG_RAWCT); 1258 tail = readl(SAR_REG_RAWCT);
1259 1259
1260 pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue), 1260 pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue),
1261 skb_end_pointer(queue) - queue->head - 16, 1261 skb_end_offset(queue) - 16,
1262 PCI_DMA_FROMDEVICE); 1262 PCI_DMA_FROMDEVICE);
1263 1263
1264 while (head != tail) { 1264 while (head != tail) {
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 7a3f535e481c..bb80853ff27a 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -775,9 +775,11 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
775 map->format.parse_val(val + i); 775 map->format.parse_val(val + i);
776 } else { 776 } else {
777 for (i = 0; i < val_count; i++) { 777 for (i = 0; i < val_count; i++) {
778 ret = regmap_read(map, reg + i, val + (i * val_bytes)); 778 unsigned int ival;
779 ret = regmap_read(map, reg + i, &ival);
779 if (ret != 0) 780 if (ret != 0)
780 return ret; 781 return ret;
782 memcpy(val + (i * val_bytes), &ival, val_bytes);
781 } 783 }
782 } 784 }
783 785
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index cdcf75c0954f..3e2a6002aae6 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -404,16 +404,19 @@ int bcma_sprom_get(struct bcma_bus *bus)
404 return -EOPNOTSUPP; 404 return -EOPNOTSUPP;
405 405
406 if (!bcma_sprom_ext_available(bus)) { 406 if (!bcma_sprom_ext_available(bus)) {
407 bool sprom_onchip;
408
407 /* 409 /*
408 * External SPROM takes precedence so check 410 * External SPROM takes precedence so check
409 * on-chip OTP only when no external SPROM 411 * on-chip OTP only when no external SPROM
410 * is present. 412 * is present.
411 */ 413 */
412 if (bcma_sprom_onchip_available(bus)) { 414 sprom_onchip = bcma_sprom_onchip_available(bus);
415 if (sprom_onchip) {
413 /* determine offset */ 416 /* determine offset */
414 offset = bcma_sprom_onchip_offset(bus); 417 offset = bcma_sprom_onchip_offset(bus);
415 } 418 }
416 if (!offset) { 419 if (!offset || !sprom_onchip) {
417 /* 420 /*
418 * Maybe there is no SPROM on the device? 421 * Maybe there is no SPROM on the device?
419 * Now we ask the arch code if there is some sprom 422 * Now we ask the arch code if there is some sprom
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 8db9089127c5..9a13e889837e 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -6580,24 +6580,21 @@ static const struct file_operations dac960_user_command_proc_fops = {
6580 6580
6581static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller) 6581static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller)
6582{ 6582{
6583 struct proc_dir_entry *StatusProcEntry;
6584 struct proc_dir_entry *ControllerProcEntry; 6583 struct proc_dir_entry *ControllerProcEntry;
6585 struct proc_dir_entry *UserCommandProcEntry;
6586 6584
6587 if (DAC960_ProcDirectoryEntry == NULL) { 6585 if (DAC960_ProcDirectoryEntry == NULL) {
6588 DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL); 6586 DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL);
6589 StatusProcEntry = proc_create("status", 0, 6587 proc_create("status", 0, DAC960_ProcDirectoryEntry,
6590 DAC960_ProcDirectoryEntry, 6588 &dac960_proc_fops);
6591 &dac960_proc_fops);
6592 } 6589 }
6593 6590
6594 sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber); 6591 sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber);
6595 ControllerProcEntry = proc_mkdir(Controller->ControllerName, 6592 ControllerProcEntry = proc_mkdir(Controller->ControllerName,
6596 DAC960_ProcDirectoryEntry); 6593 DAC960_ProcDirectoryEntry);
6597 proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller); 6594 proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller);
6598 proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller); 6595 proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller);
6599 UserCommandProcEntry = proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller); 6596 proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller);
6600 Controller->ControllerProcEntry = ControllerProcEntry; 6597 Controller->ControllerProcEntry = ControllerProcEntry;
6601} 6598}
6602 6599
6603 6600
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index abfaacaaf346..946166e13953 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -2297,7 +2297,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
2297 return; 2297 return;
2298 } 2298 }
2299 2299
2300 if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) { 2300 if (!capable(CAP_SYS_ADMIN)) {
2301 retcode = ERR_PERM; 2301 retcode = ERR_PERM;
2302 goto fail; 2302 goto fail;
2303 } 2303 }
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 43beaca53179..436f519bed1c 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -664,7 +664,7 @@ static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
664 timeo = mdev->net_conf->try_connect_int * HZ; 664 timeo = mdev->net_conf->try_connect_int * HZ;
665 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */ 665 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
666 666
667 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */ 667 s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
668 s_listen->sk->sk_rcvtimeo = timeo; 668 s_listen->sk->sk_rcvtimeo = timeo;
669 s_listen->sk->sk_sndtimeo = timeo; 669 s_listen->sk->sk_sndtimeo = timeo;
670 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size, 670 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
@@ -841,8 +841,8 @@ retry:
841 } 841 }
842 } while (1); 842 } while (1);
843 843
844 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */ 844 msock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
845 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */ 845 sock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
846 846
847 sock->sk->sk_allocation = GFP_NOIO; 847 sock->sk->sk_allocation = GFP_NOIO;
848 msock->sk->sk_allocation = GFP_NOIO; 848 msock->sk->sk_allocation = GFP_NOIO;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 00f9fc992090..304000c3d433 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2510,8 +2510,10 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
2510 up(&dd->port->cmd_slot); 2510 up(&dd->port->cmd_slot);
2511 return NULL; 2511 return NULL;
2512 } 2512 }
2513 if (unlikely(*tag < 0)) 2513 if (unlikely(*tag < 0)) {
2514 up(&dd->port->cmd_slot);
2514 return NULL; 2515 return NULL;
2516 }
2515 2517
2516 return dd->port->commands[*tag].sg; 2518 return dd->port->commands[*tag].sg;
2517} 2519}
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index ae9edca7b56d..57fd867553d7 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -75,6 +75,8 @@ static struct usb_device_id ath3k_table[] = {
75 { USB_DEVICE(0x0CF3, 0x311D) }, 75 { USB_DEVICE(0x0CF3, 0x311D) },
76 { USB_DEVICE(0x13d3, 0x3375) }, 76 { USB_DEVICE(0x13d3, 0x3375) },
77 { USB_DEVICE(0x04CA, 0x3005) }, 77 { USB_DEVICE(0x04CA, 0x3005) },
78 { USB_DEVICE(0x13d3, 0x3362) },
79 { USB_DEVICE(0x0CF3, 0xE004) },
78 80
79 /* Atheros AR5BBU12 with sflash firmware */ 81 /* Atheros AR5BBU12 with sflash firmware */
80 { USB_DEVICE(0x0489, 0xE02C) }, 82 { USB_DEVICE(0x0489, 0xE02C) },
@@ -94,6 +96,8 @@ static struct usb_device_id ath3k_blist_tbl[] = {
94 { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, 96 { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
95 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, 97 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
96 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 98 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
99 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
100 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
97 101
98 { } /* Terminating entry */ 102 { } /* Terminating entry */
99}; 103};
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 3311b812a0c6..9217121362e1 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -101,12 +101,16 @@ static struct usb_device_id btusb_table[] = {
101 { USB_DEVICE(0x0c10, 0x0000) }, 101 { USB_DEVICE(0x0c10, 0x0000) },
102 102
103 /* Broadcom BCM20702A0 */ 103 /* Broadcom BCM20702A0 */
104 { USB_DEVICE(0x0489, 0xe042) },
104 { USB_DEVICE(0x0a5c, 0x21e3) }, 105 { USB_DEVICE(0x0a5c, 0x21e3) },
105 { USB_DEVICE(0x0a5c, 0x21e6) }, 106 { USB_DEVICE(0x0a5c, 0x21e6) },
106 { USB_DEVICE(0x0a5c, 0x21e8) }, 107 { USB_DEVICE(0x0a5c, 0x21e8) },
107 { USB_DEVICE(0x0a5c, 0x21f3) }, 108 { USB_DEVICE(0x0a5c, 0x21f3) },
108 { USB_DEVICE(0x413c, 0x8197) }, 109 { USB_DEVICE(0x413c, 0x8197) },
109 110
111 /* Foxconn - Hon Hai */
112 { USB_DEVICE(0x0489, 0xe033) },
113
110 { } /* Terminating entry */ 114 { } /* Terminating entry */
111}; 115};
112 116
@@ -133,6 +137,8 @@ static struct usb_device_id blacklist_table[] = {
133 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, 137 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
134 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, 138 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
135 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 139 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
140 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
141 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
136 142
137 /* Atheros AR5BBU12 with sflash firmware */ 143 /* Atheros AR5BBU12 with sflash firmware */
138 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, 144 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index ddf86b6500b7..cdf2f5451c76 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1895,6 +1895,13 @@ static int virtcons_restore(struct virtio_device *vdev)
1895 1895
1896 /* Get port open/close status on the host */ 1896 /* Get port open/close status on the host */
1897 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); 1897 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
1898
1899 /*
1900 * If a port was open at the time of suspending, we
1901 * have to let the host know that it's still open.
1902 */
1903 if (port->guest_connected)
1904 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
1898 } 1905 }
1899 return 0; 1906 return 0;
1900} 1907}
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 6db161f64ae0..c535cf8c5770 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -35,7 +35,12 @@ static DEFINE_MUTEX(clocks_mutex);
35static struct clk_lookup *clk_find(const char *dev_id, const char *con_id) 35static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
36{ 36{
37 struct clk_lookup *p, *cl = NULL; 37 struct clk_lookup *p, *cl = NULL;
38 int match, best = 0; 38 int match, best_found = 0, best_possible = 0;
39
40 if (dev_id)
41 best_possible += 2;
42 if (con_id)
43 best_possible += 1;
39 44
40 list_for_each_entry(p, &clocks, node) { 45 list_for_each_entry(p, &clocks, node) {
41 match = 0; 46 match = 0;
@@ -50,10 +55,10 @@ static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
50 match += 1; 55 match += 1;
51 } 56 }
52 57
53 if (match > best) { 58 if (match > best_found) {
54 cl = p; 59 cl = p;
55 if (match != 3) 60 if (match != best_possible)
56 best = match; 61 best_found = match;
57 else 62 else
58 break; 63 break;
59 } 64 }
@@ -89,6 +94,51 @@ void clk_put(struct clk *clk)
89} 94}
90EXPORT_SYMBOL(clk_put); 95EXPORT_SYMBOL(clk_put);
91 96
97static void devm_clk_release(struct device *dev, void *res)
98{
99 clk_put(*(struct clk **)res);
100}
101
102struct clk *devm_clk_get(struct device *dev, const char *id)
103{
104 struct clk **ptr, *clk;
105
106 ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL);
107 if (!ptr)
108 return ERR_PTR(-ENOMEM);
109
110 clk = clk_get(dev, id);
111 if (!IS_ERR(clk)) {
112 *ptr = clk;
113 devres_add(dev, ptr);
114 } else {
115 devres_free(ptr);
116 }
117
118 return clk;
119}
120EXPORT_SYMBOL(devm_clk_get);
121
122static int devm_clk_match(struct device *dev, void *res, void *data)
123{
124 struct clk **c = res;
125 if (!c || !*c) {
126 WARN_ON(!c || !*c);
127 return 0;
128 }
129 return *c == data;
130}
131
132void devm_clk_put(struct device *dev, struct clk *clk)
133{
134 int ret;
135
136 ret = devres_destroy(dev, devm_clk_release, devm_clk_match, clk);
137
138 WARN_ON(ret);
139}
140EXPORT_SYMBOL(devm_clk_put);
141
92void clkdev_add(struct clk_lookup *cl) 142void clkdev_add(struct clk_lookup *cl)
93{ 143{
94 mutex_lock(&clocks_mutex); 144 mutex_lock(&clocks_mutex);
@@ -116,8 +166,9 @@ struct clk_lookup_alloc {
116 char con_id[MAX_CON_ID]; 166 char con_id[MAX_CON_ID];
117}; 167};
118 168
119struct clk_lookup * __init_refok 169static struct clk_lookup * __init_refok
120clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...) 170vclkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt,
171 va_list ap)
121{ 172{
122 struct clk_lookup_alloc *cla; 173 struct clk_lookup_alloc *cla;
123 174
@@ -132,16 +183,25 @@ clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...)
132 } 183 }
133 184
134 if (dev_fmt) { 185 if (dev_fmt) {
135 va_list ap;
136
137 va_start(ap, dev_fmt);
138 vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap); 186 vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap);
139 cla->cl.dev_id = cla->dev_id; 187 cla->cl.dev_id = cla->dev_id;
140 va_end(ap);
141 } 188 }
142 189
143 return &cla->cl; 190 return &cla->cl;
144} 191}
192
193struct clk_lookup * __init_refok
194clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...)
195{
196 struct clk_lookup *cl;
197 va_list ap;
198
199 va_start(ap, dev_fmt);
200 cl = vclkdev_alloc(clk, con_id, dev_fmt, ap);
201 va_end(ap);
202
203 return cl;
204}
145EXPORT_SYMBOL(clkdev_alloc); 205EXPORT_SYMBOL(clkdev_alloc);
146 206
147int clk_add_alias(const char *alias, const char *alias_dev_name, char *id, 207int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
@@ -173,3 +233,65 @@ void clkdev_drop(struct clk_lookup *cl)
173 kfree(cl); 233 kfree(cl);
174} 234}
175EXPORT_SYMBOL(clkdev_drop); 235EXPORT_SYMBOL(clkdev_drop);
236
237/**
238 * clk_register_clkdev - register one clock lookup for a struct clk
239 * @clk: struct clk to associate with all clk_lookups
240 * @con_id: connection ID string on device
241 * @dev_id: format string describing device name
242 *
243 * con_id or dev_id may be NULL as a wildcard, just as in the rest of
244 * clkdev.
245 *
246 * To make things easier for mass registration, we detect error clks
247 * from a previous clk_register() call, and return the error code for
248 * those. This is to permit this function to be called immediately
249 * after clk_register().
250 */
251int clk_register_clkdev(struct clk *clk, const char *con_id,
252 const char *dev_fmt, ...)
253{
254 struct clk_lookup *cl;
255 va_list ap;
256
257 if (IS_ERR(clk))
258 return PTR_ERR(clk);
259
260 va_start(ap, dev_fmt);
261 cl = vclkdev_alloc(clk, con_id, dev_fmt, ap);
262 va_end(ap);
263
264 if (!cl)
265 return -ENOMEM;
266
267 clkdev_add(cl);
268
269 return 0;
270}
271
272/**
273 * clk_register_clkdevs - register a set of clk_lookup for a struct clk
274 * @clk: struct clk to associate with all clk_lookups
275 * @cl: array of clk_lookup structures with con_id and dev_id pre-initialized
276 * @num: number of clk_lookup structures to register
277 *
278 * To make things easier for mass registration, we detect error clks
279 * from a previous clk_register() call, and return the error code for
280 * those. This is to permit this function to be called immediately
281 * after clk_register().
282 */
283int clk_register_clkdevs(struct clk *clk, struct clk_lookup *cl, size_t num)
284{
285 unsigned i;
286
287 if (IS_ERR(clk))
288 return PTR_ERR(clk);
289
290 for (i = 0; i < num; i++, cl++) {
291 cl->clk = clk;
292 clkdev_add(cl);
293 }
294
295 return 0;
296}
297EXPORT_SYMBOL(clk_register_clkdevs);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index ab9abb46d01a..371f13cc38eb 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -111,6 +111,7 @@ config CRYPTO_DES_S390
111 depends on S390 111 depends on S390
112 select CRYPTO_ALGAPI 112 select CRYPTO_ALGAPI
113 select CRYPTO_BLKCIPHER 113 select CRYPTO_BLKCIPHER
114 select CRYPTO_DES
114 help 115 help
115 This is the s390 hardware accelerated implementation of the 116 This is the s390 hardware accelerated implementation of the
116 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). 117 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
@@ -164,6 +165,7 @@ config CRYPTO_DEV_MV_CESA
164 select CRYPTO_ALGAPI 165 select CRYPTO_ALGAPI
165 select CRYPTO_AES 166 select CRYPTO_AES
166 select CRYPTO_BLKCIPHER2 167 select CRYPTO_BLKCIPHER2
168 select CRYPTO_HASH
167 help 169 help
168 This driver allows you to utilize the Cryptographic Engines and 170 This driver allows you to utilize the Cryptographic Engines and
169 Security Accelerator (CESA) which can be found on the Marvell Orion 171 Security Accelerator (CESA) which can be found on the Marvell Orion
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index c301a8ec31aa..3d704abd7912 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1429,6 +1429,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1429 * signal 1429 * signal
1430 */ 1430 */
1431 release_phy_channel(plchan); 1431 release_phy_channel(plchan);
1432 plchan->phychan_hold = 0;
1432 } 1433 }
1433 /* Dequeue jobs and free LLIs */ 1434 /* Dequeue jobs and free LLIs */
1434 if (plchan->at) { 1435 if (plchan->at) {
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 7aa58d204892..bf0d7e4e345b 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -221,10 +221,6 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
221 221
222 vdbg_dump_regs(atchan); 222 vdbg_dump_regs(atchan);
223 223
224 /* clear any pending interrupt */
225 while (dma_readl(atdma, EBCISR))
226 cpu_relax();
227
228 channel_writel(atchan, SADDR, 0); 224 channel_writel(atchan, SADDR, 0);
229 channel_writel(atchan, DADDR, 0); 225 channel_writel(atchan, DADDR, 0);
230 channel_writel(atchan, CTRLA, 0); 226 channel_writel(atchan, CTRLA, 0);
@@ -249,7 +245,9 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
249 dev_vdbg(chan2dev(&atchan->chan_common), 245 dev_vdbg(chan2dev(&atchan->chan_common),
250 "descriptor %u complete\n", txd->cookie); 246 "descriptor %u complete\n", txd->cookie);
251 247
252 dma_cookie_complete(txd); 248 /* mark the descriptor as complete for non cyclic cases only */
249 if (!atc_chan_is_cyclic(atchan))
250 dma_cookie_complete(txd);
253 251
254 /* move children to free_list */ 252 /* move children to free_list */
255 list_splice_init(&desc->tx_list, &atchan->free_list); 253 list_splice_init(&desc->tx_list, &atchan->free_list);
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index e6f133b78dc2..f6e9b572b998 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -703,7 +703,9 @@ static void ep93xx_dma_tasklet(unsigned long data)
703 desc = ep93xx_dma_get_active(edmac); 703 desc = ep93xx_dma_get_active(edmac);
704 if (desc) { 704 if (desc) {
705 if (desc->complete) { 705 if (desc->complete) {
706 dma_cookie_complete(&desc->txd); 706 /* mark descriptor complete for non cyclic case only */
707 if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
708 dma_cookie_complete(&desc->txd);
707 list_splice_init(&edmac->active, &list); 709 list_splice_init(&edmac->active, &list);
708 } 710 }
709 callback = desc->txd.callback; 711 callback = desc->txd.callback;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index a45b5d2a5987..bb787d8e1529 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -571,11 +571,14 @@ static void imxdma_tasklet(unsigned long data)
571 if (desc->desc.callback) 571 if (desc->desc.callback)
572 desc->desc.callback(desc->desc.callback_param); 572 desc->desc.callback(desc->desc.callback_param);
573 573
574 dma_cookie_complete(&desc->desc); 574 /* If we are dealing with a cyclic descriptor keep it on ld_active
575 575 * and dont mark the descripor as complete.
576 /* If we are dealing with a cyclic descriptor keep it on ld_active */ 576 * Only in non-cyclic cases it would be marked as complete
577 */
577 if (imxdma_chan_is_doing_cyclic(imxdmac)) 578 if (imxdma_chan_is_doing_cyclic(imxdmac))
578 goto out; 579 goto out;
580 else
581 dma_cookie_complete(&desc->desc);
579 582
580 /* Free 2D slot if it was an interleaved transfer */ 583 /* Free 2D slot if it was an interleaved transfer */
581 if (imxdmac->enabled_2d) { 584 if (imxdmac->enabled_2d) {
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index c81ef7e10e08..655d4ce6ed0d 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -201,10 +201,6 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
201 201
202static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) 202static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
203{ 203{
204 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan);
205
206 mxs_dma_enable_chan(mxs_chan);
207
208 return dma_cookie_assign(tx); 204 return dma_cookie_assign(tx);
209} 205}
210 206
@@ -558,9 +554,9 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
558 554
559static void mxs_dma_issue_pending(struct dma_chan *chan) 555static void mxs_dma_issue_pending(struct dma_chan *chan)
560{ 556{
561 /* 557 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
562 * Nothing to do. We only have a single descriptor. 558
563 */ 559 mxs_dma_enable_chan(mxs_chan);
564} 560}
565 561
566static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) 562static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 282caf118be8..fa3fb21e60be 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2225,12 +2225,9 @@ static inline void free_desc_list(struct list_head *list)
2225{ 2225{
2226 struct dma_pl330_dmac *pdmac; 2226 struct dma_pl330_dmac *pdmac;
2227 struct dma_pl330_desc *desc; 2227 struct dma_pl330_desc *desc;
2228 struct dma_pl330_chan *pch; 2228 struct dma_pl330_chan *pch = NULL;
2229 unsigned long flags; 2229 unsigned long flags;
2230 2230
2231 if (list_empty(list))
2232 return;
2233
2234 /* Finish off the work list */ 2231 /* Finish off the work list */
2235 list_for_each_entry(desc, list, node) { 2232 list_for_each_entry(desc, list, node) {
2236 dma_async_tx_callback callback; 2233 dma_async_tx_callback callback;
@@ -2247,6 +2244,10 @@ static inline void free_desc_list(struct list_head *list)
2247 desc->pchan = NULL; 2244 desc->pchan = NULL;
2248 } 2245 }
2249 2246
2247 /* pch will be unset if list was empty */
2248 if (!pch)
2249 return;
2250
2250 pdmac = pch->dmac; 2251 pdmac = pch->dmac;
2251 2252
2252 spin_lock_irqsave(&pdmac->pool_lock, flags); 2253 spin_lock_irqsave(&pdmac->pool_lock, flags);
@@ -2257,12 +2258,9 @@ static inline void free_desc_list(struct list_head *list)
2257static inline void handle_cyclic_desc_list(struct list_head *list) 2258static inline void handle_cyclic_desc_list(struct list_head *list)
2258{ 2259{
2259 struct dma_pl330_desc *desc; 2260 struct dma_pl330_desc *desc;
2260 struct dma_pl330_chan *pch; 2261 struct dma_pl330_chan *pch = NULL;
2261 unsigned long flags; 2262 unsigned long flags;
2262 2263
2263 if (list_empty(list))
2264 return;
2265
2266 list_for_each_entry(desc, list, node) { 2264 list_for_each_entry(desc, list, node) {
2267 dma_async_tx_callback callback; 2265 dma_async_tx_callback callback;
2268 2266
@@ -2274,6 +2272,10 @@ static inline void handle_cyclic_desc_list(struct list_head *list)
2274 callback(desc->txd.callback_param); 2272 callback(desc->txd.callback_param);
2275 } 2273 }
2276 2274
2275 /* pch will be unset if list was empty */
2276 if (!pch)
2277 return;
2278
2277 spin_lock_irqsave(&pch->lock, flags); 2279 spin_lock_irqsave(&pch->lock, flags);
2278 list_splice_tail_init(list, &pch->work_list); 2280 list_splice_tail_init(list, &pch->work_list);
2279 spin_unlock_irqrestore(&pch->lock, flags); 2281 spin_unlock_irqrestore(&pch->lock, flags);
@@ -2320,7 +2322,8 @@ static void pl330_tasklet(unsigned long data)
2320 /* Pick up ripe tomatoes */ 2322 /* Pick up ripe tomatoes */
2321 list_for_each_entry_safe(desc, _dt, &pch->work_list, node) 2323 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
2322 if (desc->status == DONE) { 2324 if (desc->status == DONE) {
2323 dma_cookie_complete(&desc->txd); 2325 if (pch->cyclic)
2326 dma_cookie_complete(&desc->txd);
2324 list_move_tail(&desc->node, &list); 2327 list_move_tail(&desc->node, &list);
2325 } 2328 }
2326 2329
@@ -2926,8 +2929,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2926 INIT_LIST_HEAD(&pd->channels); 2929 INIT_LIST_HEAD(&pd->channels);
2927 2930
2928 /* Initialize channel parameters */ 2931 /* Initialize channel parameters */
2929 num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri, 2932 if (pdat)
2930 (u8)pi->pcfg.num_chan); 2933 num_chan = max_t(int, pdat->nr_valid_peri, pi->pcfg.num_chan);
2934 else
2935 num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
2936
2931 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); 2937 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
2932 2938
2933 for (i = 0; i < num_chan; i++) { 2939 for (i = 0; i < num_chan; i++) {
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index bdd41d4bfa8d..2ed1ac3513f3 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -18,6 +18,7 @@
18#include <linux/pm_runtime.h> 18#include <linux/pm_runtime.h>
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/amba/bus.h> 20#include <linux/amba/bus.h>
21#include <linux/regulator/consumer.h>
21 22
22#include <plat/ste_dma40.h> 23#include <plat/ste_dma40.h>
23 24
@@ -69,6 +70,22 @@ enum d40_command {
69}; 70};
70 71
71/* 72/*
73 * enum d40_events - The different Event Enables for the event lines.
74 *
75 * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
76 * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
77 * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
78 * @D40_ROUND_EVENTLINE: Status check for event line.
79 */
80
81enum d40_events {
82 D40_DEACTIVATE_EVENTLINE = 0,
83 D40_ACTIVATE_EVENTLINE = 1,
84 D40_SUSPEND_REQ_EVENTLINE = 2,
85 D40_ROUND_EVENTLINE = 3
86};
87
88/*
72 * These are the registers that has to be saved and later restored 89 * These are the registers that has to be saved and later restored
73 * when the DMA hw is powered off. 90 * when the DMA hw is powered off.
74 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. 91 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
@@ -870,8 +887,8 @@ static void d40_save_restore_registers(struct d40_base *base, bool save)
870} 887}
871#endif 888#endif
872 889
873static int d40_channel_execute_command(struct d40_chan *d40c, 890static int __d40_execute_command_phy(struct d40_chan *d40c,
874 enum d40_command command) 891 enum d40_command command)
875{ 892{
876 u32 status; 893 u32 status;
877 int i; 894 int i;
@@ -880,6 +897,12 @@ static int d40_channel_execute_command(struct d40_chan *d40c,
880 unsigned long flags; 897 unsigned long flags;
881 u32 wmask; 898 u32 wmask;
882 899
900 if (command == D40_DMA_STOP) {
901 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
902 if (ret)
903 return ret;
904 }
905
883 spin_lock_irqsave(&d40c->base->execmd_lock, flags); 906 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
884 907
885 if (d40c->phy_chan->num % 2 == 0) 908 if (d40c->phy_chan->num % 2 == 0)
@@ -973,67 +996,109 @@ static void d40_term_all(struct d40_chan *d40c)
973 } 996 }
974 997
975 d40c->pending_tx = 0; 998 d40c->pending_tx = 0;
976 d40c->busy = false;
977} 999}
978 1000
979static void __d40_config_set_event(struct d40_chan *d40c, bool enable, 1001static void __d40_config_set_event(struct d40_chan *d40c,
980 u32 event, int reg) 1002 enum d40_events event_type, u32 event,
1003 int reg)
981{ 1004{
982 void __iomem *addr = chan_base(d40c) + reg; 1005 void __iomem *addr = chan_base(d40c) + reg;
983 int tries; 1006 int tries;
1007 u32 status;
1008
1009 switch (event_type) {
1010
1011 case D40_DEACTIVATE_EVENTLINE:
984 1012
985 if (!enable) {
986 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) 1013 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
987 | ~D40_EVENTLINE_MASK(event), addr); 1014 | ~D40_EVENTLINE_MASK(event), addr);
988 return; 1015 break;
989 } 1016
1017 case D40_SUSPEND_REQ_EVENTLINE:
1018 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1019 D40_EVENTLINE_POS(event);
1020
1021 if (status == D40_DEACTIVATE_EVENTLINE ||
1022 status == D40_SUSPEND_REQ_EVENTLINE)
1023 break;
990 1024
1025 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1026 | ~D40_EVENTLINE_MASK(event), addr);
1027
1028 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1029
1030 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1031 D40_EVENTLINE_POS(event);
1032
1033 cpu_relax();
1034 /*
1035 * Reduce the number of bus accesses while
1036 * waiting for the DMA to suspend.
1037 */
1038 udelay(3);
1039
1040 if (status == D40_DEACTIVATE_EVENTLINE)
1041 break;
1042 }
1043
1044 if (tries == D40_SUSPEND_MAX_IT) {
1045 chan_err(d40c,
1046 "unable to stop the event_line chl %d (log: %d)"
1047 "status %x\n", d40c->phy_chan->num,
1048 d40c->log_num, status);
1049 }
1050 break;
1051
1052 case D40_ACTIVATE_EVENTLINE:
991 /* 1053 /*
992 * The hardware sometimes doesn't register the enable when src and dst 1054 * The hardware sometimes doesn't register the enable when src and dst
993 * event lines are active on the same logical channel. Retry to ensure 1055 * event lines are active on the same logical channel. Retry to ensure
994 * it does. Usually only one retry is sufficient. 1056 * it does. Usually only one retry is sufficient.
995 */ 1057 */
996 tries = 100; 1058 tries = 100;
997 while (--tries) { 1059 while (--tries) {
998 writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) 1060 writel((D40_ACTIVATE_EVENTLINE <<
999 | ~D40_EVENTLINE_MASK(event), addr); 1061 D40_EVENTLINE_POS(event)) |
1062 ~D40_EVENTLINE_MASK(event), addr);
1000 1063
1001 if (readl(addr) & D40_EVENTLINE_MASK(event)) 1064 if (readl(addr) & D40_EVENTLINE_MASK(event))
1002 break; 1065 break;
1003 } 1066 }
1004 1067
1005 if (tries != 99) 1068 if (tries != 99)
1006 dev_dbg(chan2dev(d40c), 1069 dev_dbg(chan2dev(d40c),
1007 "[%s] workaround enable S%cLNK (%d tries)\n", 1070 "[%s] workaround enable S%cLNK (%d tries)\n",
1008 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', 1071 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1009 100 - tries); 1072 100 - tries);
1010 1073
1011 WARN_ON(!tries); 1074 WARN_ON(!tries);
1012} 1075 break;
1013 1076
1014static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) 1077 case D40_ROUND_EVENTLINE:
1015{ 1078 BUG();
1016 unsigned long flags; 1079 break;
1017 1080
1018 spin_lock_irqsave(&d40c->phy_chan->lock, flags); 1081 }
1082}
1019 1083
1084static void d40_config_set_event(struct d40_chan *d40c,
1085 enum d40_events event_type)
1086{
1020 /* Enable event line connected to device (or memcpy) */ 1087 /* Enable event line connected to device (or memcpy) */
1021 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || 1088 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
1022 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { 1089 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
1023 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1090 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1024 1091
1025 __d40_config_set_event(d40c, do_enable, event, 1092 __d40_config_set_event(d40c, event_type, event,
1026 D40_CHAN_REG_SSLNK); 1093 D40_CHAN_REG_SSLNK);
1027 } 1094 }
1028 1095
1029 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { 1096 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
1030 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 1097 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1031 1098
1032 __d40_config_set_event(d40c, do_enable, event, 1099 __d40_config_set_event(d40c, event_type, event,
1033 D40_CHAN_REG_SDLNK); 1100 D40_CHAN_REG_SDLNK);
1034 } 1101 }
1035
1036 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1037} 1102}
1038 1103
1039static u32 d40_chan_has_events(struct d40_chan *d40c) 1104static u32 d40_chan_has_events(struct d40_chan *d40c)
@@ -1047,6 +1112,64 @@ static u32 d40_chan_has_events(struct d40_chan *d40c)
1047 return val; 1112 return val;
1048} 1113}
1049 1114
1115static int
1116__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1117{
1118 unsigned long flags;
1119 int ret = 0;
1120 u32 active_status;
1121 void __iomem *active_reg;
1122
1123 if (d40c->phy_chan->num % 2 == 0)
1124 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1125 else
1126 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1127
1128
1129 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1130
1131 switch (command) {
1132 case D40_DMA_STOP:
1133 case D40_DMA_SUSPEND_REQ:
1134
1135 active_status = (readl(active_reg) &
1136 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1137 D40_CHAN_POS(d40c->phy_chan->num);
1138
1139 if (active_status == D40_DMA_RUN)
1140 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1141 else
1142 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1143
1144 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1145 ret = __d40_execute_command_phy(d40c, command);
1146
1147 break;
1148
1149 case D40_DMA_RUN:
1150
1151 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1152 ret = __d40_execute_command_phy(d40c, command);
1153 break;
1154
1155 case D40_DMA_SUSPENDED:
1156 BUG();
1157 break;
1158 }
1159
1160 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1161 return ret;
1162}
1163
1164static int d40_channel_execute_command(struct d40_chan *d40c,
1165 enum d40_command command)
1166{
1167 if (chan_is_logical(d40c))
1168 return __d40_execute_command_log(d40c, command);
1169 else
1170 return __d40_execute_command_phy(d40c, command);
1171}
1172
1050static u32 d40_get_prmo(struct d40_chan *d40c) 1173static u32 d40_get_prmo(struct d40_chan *d40c)
1051{ 1174{
1052 static const unsigned int phy_map[] = { 1175 static const unsigned int phy_map[] = {
@@ -1149,15 +1272,7 @@ static int d40_pause(struct d40_chan *d40c)
1149 spin_lock_irqsave(&d40c->lock, flags); 1272 spin_lock_irqsave(&d40c->lock, flags);
1150 1273
1151 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1274 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1152 if (res == 0) { 1275
1153 if (chan_is_logical(d40c)) {
1154 d40_config_set_event(d40c, false);
1155 /* Resume the other logical channels if any */
1156 if (d40_chan_has_events(d40c))
1157 res = d40_channel_execute_command(d40c,
1158 D40_DMA_RUN);
1159 }
1160 }
1161 pm_runtime_mark_last_busy(d40c->base->dev); 1276 pm_runtime_mark_last_busy(d40c->base->dev);
1162 pm_runtime_put_autosuspend(d40c->base->dev); 1277 pm_runtime_put_autosuspend(d40c->base->dev);
1163 spin_unlock_irqrestore(&d40c->lock, flags); 1278 spin_unlock_irqrestore(&d40c->lock, flags);
@@ -1174,45 +1289,17 @@ static int d40_resume(struct d40_chan *d40c)
1174 1289
1175 spin_lock_irqsave(&d40c->lock, flags); 1290 spin_lock_irqsave(&d40c->lock, flags);
1176 pm_runtime_get_sync(d40c->base->dev); 1291 pm_runtime_get_sync(d40c->base->dev);
1177 if (d40c->base->rev == 0)
1178 if (chan_is_logical(d40c)) {
1179 res = d40_channel_execute_command(d40c,
1180 D40_DMA_SUSPEND_REQ);
1181 goto no_suspend;
1182 }
1183 1292
1184 /* If bytes left to transfer or linked tx resume job */ 1293 /* If bytes left to transfer or linked tx resume job */
1185 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { 1294 if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1186
1187 if (chan_is_logical(d40c))
1188 d40_config_set_event(d40c, true);
1189
1190 res = d40_channel_execute_command(d40c, D40_DMA_RUN); 1295 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1191 }
1192 1296
1193no_suspend:
1194 pm_runtime_mark_last_busy(d40c->base->dev); 1297 pm_runtime_mark_last_busy(d40c->base->dev);
1195 pm_runtime_put_autosuspend(d40c->base->dev); 1298 pm_runtime_put_autosuspend(d40c->base->dev);
1196 spin_unlock_irqrestore(&d40c->lock, flags); 1299 spin_unlock_irqrestore(&d40c->lock, flags);
1197 return res; 1300 return res;
1198} 1301}
1199 1302
1200static int d40_terminate_all(struct d40_chan *chan)
1201{
1202 unsigned long flags;
1203 int ret = 0;
1204
1205 ret = d40_pause(chan);
1206 if (!ret && chan_is_physical(chan))
1207 ret = d40_channel_execute_command(chan, D40_DMA_STOP);
1208
1209 spin_lock_irqsave(&chan->lock, flags);
1210 d40_term_all(chan);
1211 spin_unlock_irqrestore(&chan->lock, flags);
1212
1213 return ret;
1214}
1215
1216static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) 1303static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1217{ 1304{
1218 struct d40_chan *d40c = container_of(tx->chan, 1305 struct d40_chan *d40c = container_of(tx->chan,
@@ -1232,20 +1319,6 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1232 1319
1233static int d40_start(struct d40_chan *d40c) 1320static int d40_start(struct d40_chan *d40c)
1234{ 1321{
1235 if (d40c->base->rev == 0) {
1236 int err;
1237
1238 if (chan_is_logical(d40c)) {
1239 err = d40_channel_execute_command(d40c,
1240 D40_DMA_SUSPEND_REQ);
1241 if (err)
1242 return err;
1243 }
1244 }
1245
1246 if (chan_is_logical(d40c))
1247 d40_config_set_event(d40c, true);
1248
1249 return d40_channel_execute_command(d40c, D40_DMA_RUN); 1322 return d40_channel_execute_command(d40c, D40_DMA_RUN);
1250} 1323}
1251 1324
@@ -1258,10 +1331,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1258 d40d = d40_first_queued(d40c); 1331 d40d = d40_first_queued(d40c);
1259 1332
1260 if (d40d != NULL) { 1333 if (d40d != NULL) {
1261 if (!d40c->busy) 1334 if (!d40c->busy) {
1262 d40c->busy = true; 1335 d40c->busy = true;
1263 1336 pm_runtime_get_sync(d40c->base->dev);
1264 pm_runtime_get_sync(d40c->base->dev); 1337 }
1265 1338
1266 /* Remove from queue */ 1339 /* Remove from queue */
1267 d40_desc_remove(d40d); 1340 d40_desc_remove(d40d);
@@ -1388,8 +1461,8 @@ static void dma_tasklet(unsigned long data)
1388 1461
1389 return; 1462 return;
1390 1463
1391 err: 1464err:
1392 /* Rescue manoeuvre if receiving double interrupts */ 1465 /* Rescue manouver if receiving double interrupts */
1393 if (d40c->pending_tx > 0) 1466 if (d40c->pending_tx > 0)
1394 d40c->pending_tx--; 1467 d40c->pending_tx--;
1395 spin_unlock_irqrestore(&d40c->lock, flags); 1468 spin_unlock_irqrestore(&d40c->lock, flags);
@@ -1770,7 +1843,6 @@ static int d40_config_memcpy(struct d40_chan *d40c)
1770 return 0; 1843 return 0;
1771} 1844}
1772 1845
1773
1774static int d40_free_dma(struct d40_chan *d40c) 1846static int d40_free_dma(struct d40_chan *d40c)
1775{ 1847{
1776 1848
@@ -1806,43 +1878,18 @@ static int d40_free_dma(struct d40_chan *d40c)
1806 } 1878 }
1807 1879
1808 pm_runtime_get_sync(d40c->base->dev); 1880 pm_runtime_get_sync(d40c->base->dev);
1809 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1881 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1810 if (res) { 1882 if (res) {
1811 chan_err(d40c, "suspend failed\n"); 1883 chan_err(d40c, "stop failed\n");
1812 goto out; 1884 goto out;
1813 } 1885 }
1814 1886
1815 if (chan_is_logical(d40c)) { 1887 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
1816 /* Release logical channel, deactivate the event line */
1817 1888
1818 d40_config_set_event(d40c, false); 1889 if (chan_is_logical(d40c))
1819 d40c->base->lookup_log_chans[d40c->log_num] = NULL; 1890 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1820 1891 else
1821 /* 1892 d40c->base->lookup_phy_chans[phy->num] = NULL;
1822 * Check if there are more logical allocation
1823 * on this phy channel.
1824 */
1825 if (!d40_alloc_mask_free(phy, is_src, event)) {
1826 /* Resume the other logical channels if any */
1827 if (d40_chan_has_events(d40c)) {
1828 res = d40_channel_execute_command(d40c,
1829 D40_DMA_RUN);
1830 if (res)
1831 chan_err(d40c,
1832 "Executing RUN command\n");
1833 }
1834 goto out;
1835 }
1836 } else {
1837 (void) d40_alloc_mask_free(phy, is_src, 0);
1838 }
1839
1840 /* Release physical channel */
1841 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1842 if (res) {
1843 chan_err(d40c, "Failed to stop channel\n");
1844 goto out;
1845 }
1846 1893
1847 if (d40c->busy) { 1894 if (d40c->busy) {
1848 pm_runtime_mark_last_busy(d40c->base->dev); 1895 pm_runtime_mark_last_busy(d40c->base->dev);
@@ -1852,7 +1899,6 @@ static int d40_free_dma(struct d40_chan *d40c)
1852 d40c->busy = false; 1899 d40c->busy = false;
1853 d40c->phy_chan = NULL; 1900 d40c->phy_chan = NULL;
1854 d40c->configured = false; 1901 d40c->configured = false;
1855 d40c->base->lookup_phy_chans[phy->num] = NULL;
1856out: 1902out:
1857 1903
1858 pm_runtime_mark_last_busy(d40c->base->dev); 1904 pm_runtime_mark_last_busy(d40c->base->dev);
@@ -2070,7 +2116,7 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2070 if (sg_next(&sg_src[sg_len - 1]) == sg_src) 2116 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2071 desc->cyclic = true; 2117 desc->cyclic = true;
2072 2118
2073 if (direction != DMA_NONE) { 2119 if (direction != DMA_TRANS_NONE) {
2074 dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); 2120 dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
2075 2121
2076 if (direction == DMA_DEV_TO_MEM) 2122 if (direction == DMA_DEV_TO_MEM)
@@ -2371,6 +2417,31 @@ static void d40_issue_pending(struct dma_chan *chan)
2371 spin_unlock_irqrestore(&d40c->lock, flags); 2417 spin_unlock_irqrestore(&d40c->lock, flags);
2372} 2418}
2373 2419
2420static void d40_terminate_all(struct dma_chan *chan)
2421{
2422 unsigned long flags;
2423 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2424 int ret;
2425
2426 spin_lock_irqsave(&d40c->lock, flags);
2427
2428 pm_runtime_get_sync(d40c->base->dev);
2429 ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2430 if (ret)
2431 chan_err(d40c, "Failed to stop channel\n");
2432
2433 d40_term_all(d40c);
2434 pm_runtime_mark_last_busy(d40c->base->dev);
2435 pm_runtime_put_autosuspend(d40c->base->dev);
2436 if (d40c->busy) {
2437 pm_runtime_mark_last_busy(d40c->base->dev);
2438 pm_runtime_put_autosuspend(d40c->base->dev);
2439 }
2440 d40c->busy = false;
2441
2442 spin_unlock_irqrestore(&d40c->lock, flags);
2443}
2444
2374static int 2445static int
2375dma40_config_to_halfchannel(struct d40_chan *d40c, 2446dma40_config_to_halfchannel(struct d40_chan *d40c,
2376 struct stedma40_half_channel_info *info, 2447 struct stedma40_half_channel_info *info,
@@ -2551,7 +2622,8 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2551 2622
2552 switch (cmd) { 2623 switch (cmd) {
2553 case DMA_TERMINATE_ALL: 2624 case DMA_TERMINATE_ALL:
2554 return d40_terminate_all(d40c); 2625 d40_terminate_all(chan);
2626 return 0;
2555 case DMA_PAUSE: 2627 case DMA_PAUSE:
2556 return d40_pause(d40c); 2628 return d40_pause(d40c);
2557 case DMA_RESUME: 2629 case DMA_RESUME:
@@ -2908,6 +2980,12 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2908 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", 2980 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2909 rev, res->start); 2981 rev, res->start);
2910 2982
2983 if (rev < 2) {
2984 d40_err(&pdev->dev, "hardware revision: %d is not supported",
2985 rev);
2986 goto failure;
2987 }
2988
2911 plat_data = pdev->dev.platform_data; 2989 plat_data = pdev->dev.platform_data;
2912 2990
2913 /* Count the number of logical channels in use */ 2991 /* Count the number of logical channels in use */
@@ -2998,6 +3076,7 @@ failure:
2998 3076
2999 if (base) { 3077 if (base) {
3000 kfree(base->lcla_pool.alloc_map); 3078 kfree(base->lcla_pool.alloc_map);
3079 kfree(base->reg_val_backup_chan);
3001 kfree(base->lookup_log_chans); 3080 kfree(base->lookup_log_chans);
3002 kfree(base->lookup_phy_chans); 3081 kfree(base->lookup_phy_chans);
3003 kfree(base->phy_res); 3082 kfree(base->phy_res);
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 8d3d490968a3..51e8e5396e9b 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -62,8 +62,6 @@
62#define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS) 62#define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS)
63 63
64/* Link register */ 64/* Link register */
65#define D40_DEACTIVATE_EVENTLINE 0x0
66#define D40_ACTIVATE_EVENTLINE 0x1
67#define D40_EVENTLINE_POS(i) (2 * i) 65#define D40_EVENTLINE_POS(i) (2 * i)
68#define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i)) 66#define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i))
69 67
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index d25599f2a3f8..47408e802ab6 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -191,6 +191,190 @@ utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len)
191 } 191 }
192} 192}
193 193
194static bool
195validate_device_path(struct efi_variable *var, int match, u8 *buffer,
196 unsigned long len)
197{
198 struct efi_generic_dev_path *node;
199 int offset = 0;
200
201 node = (struct efi_generic_dev_path *)buffer;
202
203 if (len < sizeof(*node))
204 return false;
205
206 while (offset <= len - sizeof(*node) &&
207 node->length >= sizeof(*node) &&
208 node->length <= len - offset) {
209 offset += node->length;
210
211 if ((node->type == EFI_DEV_END_PATH ||
212 node->type == EFI_DEV_END_PATH2) &&
213 node->sub_type == EFI_DEV_END_ENTIRE)
214 return true;
215
216 node = (struct efi_generic_dev_path *)(buffer + offset);
217 }
218
219 /*
220 * If we're here then either node->length pointed past the end
221 * of the buffer or we reached the end of the buffer without
222 * finding a device path end node.
223 */
224 return false;
225}
226
227static bool
228validate_boot_order(struct efi_variable *var, int match, u8 *buffer,
229 unsigned long len)
230{
231 /* An array of 16-bit integers */
232 if ((len % 2) != 0)
233 return false;
234
235 return true;
236}
237
238static bool
239validate_load_option(struct efi_variable *var, int match, u8 *buffer,
240 unsigned long len)
241{
242 u16 filepathlength;
243 int i, desclength = 0, namelen;
244
245 namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName));
246
247 /* Either "Boot" or "Driver" followed by four digits of hex */
248 for (i = match; i < match+4; i++) {
249 if (var->VariableName[i] > 127 ||
250 hex_to_bin(var->VariableName[i] & 0xff) < 0)
251 return true;
252 }
253
254 /* Reject it if there's 4 digits of hex and then further content */
255 if (namelen > match + 4)
256 return false;
257
258 /* A valid entry must be at least 8 bytes */
259 if (len < 8)
260 return false;
261
262 filepathlength = buffer[4] | buffer[5] << 8;
263
264 /*
265 * There's no stored length for the description, so it has to be
266 * found by hand
267 */
268 desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
269
270 /* Each boot entry must have a descriptor */
271 if (!desclength)
272 return false;
273
274 /*
275 * If the sum of the length of the description, the claimed filepath
276 * length and the original header are greater than the length of the
277 * variable, it's malformed
278 */
279 if ((desclength + filepathlength + 6) > len)
280 return false;
281
282 /*
283 * And, finally, check the filepath
284 */
285 return validate_device_path(var, match, buffer + desclength + 6,
286 filepathlength);
287}
288
289static bool
290validate_uint16(struct efi_variable *var, int match, u8 *buffer,
291 unsigned long len)
292{
293 /* A single 16-bit integer */
294 if (len != 2)
295 return false;
296
297 return true;
298}
299
300static bool
301validate_ascii_string(struct efi_variable *var, int match, u8 *buffer,
302 unsigned long len)
303{
304 int i;
305
306 for (i = 0; i < len; i++) {
307 if (buffer[i] > 127)
308 return false;
309
310 if (buffer[i] == 0)
311 return true;
312 }
313
314 return false;
315}
316
317struct variable_validate {
318 char *name;
319 bool (*validate)(struct efi_variable *var, int match, u8 *data,
320 unsigned long len);
321};
322
323static const struct variable_validate variable_validate[] = {
324 { "BootNext", validate_uint16 },
325 { "BootOrder", validate_boot_order },
326 { "DriverOrder", validate_boot_order },
327 { "Boot*", validate_load_option },
328 { "Driver*", validate_load_option },
329 { "ConIn", validate_device_path },
330 { "ConInDev", validate_device_path },
331 { "ConOut", validate_device_path },
332 { "ConOutDev", validate_device_path },
333 { "ErrOut", validate_device_path },
334 { "ErrOutDev", validate_device_path },
335 { "Timeout", validate_uint16 },
336 { "Lang", validate_ascii_string },
337 { "PlatformLang", validate_ascii_string },
338 { "", NULL },
339};
340
341static bool
342validate_var(struct efi_variable *var, u8 *data, unsigned long len)
343{
344 int i;
345 u16 *unicode_name = var->VariableName;
346
347 for (i = 0; variable_validate[i].validate != NULL; i++) {
348 const char *name = variable_validate[i].name;
349 int match;
350
351 for (match = 0; ; match++) {
352 char c = name[match];
353 u16 u = unicode_name[match];
354
355 /* All special variables are plain ascii */
356 if (u > 127)
357 return true;
358
359 /* Wildcard in the matching name means we've matched */
360 if (c == '*')
361 return variable_validate[i].validate(var,
362 match, data, len);
363
364 /* Case sensitive match */
365 if (c != u)
366 break;
367
368 /* Reached the end of the string while matching */
369 if (!c)
370 return variable_validate[i].validate(var,
371 match, data, len);
372 }
373 }
374
375 return true;
376}
377
194static efi_status_t 378static efi_status_t
195get_var_data_locked(struct efivars *efivars, struct efi_variable *var) 379get_var_data_locked(struct efivars *efivars, struct efi_variable *var)
196{ 380{
@@ -324,6 +508,12 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
324 return -EINVAL; 508 return -EINVAL;
325 } 509 }
326 510
511 if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
512 validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
513 printk(KERN_ERR "efivars: Malformed variable content\n");
514 return -EINVAL;
515 }
516
327 spin_lock(&efivars->lock); 517 spin_lock(&efivars->lock);
328 status = efivars->ops->set_variable(new_var->VariableName, 518 status = efivars->ops->set_variable(new_var->VariableName,
329 &new_var->VendorGuid, 519 &new_var->VendorGuid,
@@ -626,6 +816,12 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
626 if (!capable(CAP_SYS_ADMIN)) 816 if (!capable(CAP_SYS_ADMIN))
627 return -EACCES; 817 return -EACCES;
628 818
819 if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
820 validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
821 printk(KERN_ERR "efivars: Malformed variable content\n");
822 return -EINVAL;
823 }
824
629 spin_lock(&efivars->lock); 825 spin_lock(&efivars->lock);
630 826
631 /* 827 /*
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 1adc2ec1e383..4461540653a8 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -965,18 +965,15 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
965 } 965 }
966 966
967 _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv); 967 _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv);
968 _gpio_rmw(base, bank->regs->irqstatus, l, 968 _gpio_rmw(base, bank->regs->irqstatus, l, !bank->regs->irqenable_inv);
969 bank->regs->irqenable_inv == false);
970 _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->debounce_en != 0);
971 _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->ctrl != 0);
972 if (bank->regs->debounce_en) 969 if (bank->regs->debounce_en)
973 _gpio_rmw(base, bank->regs->debounce_en, 0, 1); 970 __raw_writel(0, base + bank->regs->debounce_en);
974 971
975 /* Save OE default value (0xffffffff) in the context */ 972 /* Save OE default value (0xffffffff) in the context */
976 bank->context.oe = __raw_readl(bank->base + bank->regs->direction); 973 bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
977 /* Initialize interface clk ungated, module enabled */ 974 /* Initialize interface clk ungated, module enabled */
978 if (bank->regs->ctrl) 975 if (bank->regs->ctrl)
979 _gpio_rmw(base, bank->regs->ctrl, 0, 1); 976 __raw_writel(0, base + bank->regs->ctrl);
980} 977}
981 978
982static __devinit void 979static __devinit void
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index e8729cc2ba2b..2cd958e0b822 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -230,16 +230,12 @@ static void pch_gpio_setup(struct pch_gpio *chip)
230 230
231static int pch_irq_type(struct irq_data *d, unsigned int type) 231static int pch_irq_type(struct irq_data *d, unsigned int type)
232{ 232{
233 u32 im;
234 u32 __iomem *im_reg;
235 u32 ien;
236 u32 im_pos;
237 int ch;
238 unsigned long flags;
239 u32 val;
240 int irq = d->irq;
241 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 233 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
242 struct pch_gpio *chip = gc->private; 234 struct pch_gpio *chip = gc->private;
235 u32 im, im_pos, val;
236 u32 __iomem *im_reg;
237 unsigned long flags;
238 int ch, irq = d->irq;
243 239
244 ch = irq - chip->irq_base; 240 ch = irq - chip->irq_base;
245 if (irq <= chip->irq_base + 7) { 241 if (irq <= chip->irq_base + 7) {
@@ -270,30 +266,22 @@ static int pch_irq_type(struct irq_data *d, unsigned int type)
270 case IRQ_TYPE_LEVEL_LOW: 266 case IRQ_TYPE_LEVEL_LOW:
271 val = PCH_LEVEL_L; 267 val = PCH_LEVEL_L;
272 break; 268 break;
273 case IRQ_TYPE_PROBE:
274 goto end;
275 default: 269 default:
276 dev_warn(chip->dev, "%s: unknown type(%dd)", 270 goto unlock;
277 __func__, type);
278 goto end;
279 } 271 }
280 272
281 /* Set interrupt mode */ 273 /* Set interrupt mode */
282 im = ioread32(im_reg) & ~(PCH_IM_MASK << (im_pos * 4)); 274 im = ioread32(im_reg) & ~(PCH_IM_MASK << (im_pos * 4));
283 iowrite32(im | (val << (im_pos * 4)), im_reg); 275 iowrite32(im | (val << (im_pos * 4)), im_reg);
284 276
285 /* iclr */ 277 /* And the handler */
286 iowrite32(BIT(ch), &chip->reg->iclr); 278 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
279 __irq_set_handler_locked(d->irq, handle_level_irq);
280 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
281 __irq_set_handler_locked(d->irq, handle_edge_irq);
287 282
288 /* IMASKCLR */ 283unlock:
289 iowrite32(BIT(ch), &chip->reg->imaskclr);
290
291 /* Enable interrupt */
292 ien = ioread32(&chip->reg->ien);
293 iowrite32(ien | BIT(ch), &chip->reg->ien);
294end:
295 spin_unlock_irqrestore(&chip->spinlock, flags); 284 spin_unlock_irqrestore(&chip->spinlock, flags);
296
297 return 0; 285 return 0;
298} 286}
299 287
@@ -313,18 +301,24 @@ static void pch_irq_mask(struct irq_data *d)
313 iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->imask); 301 iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->imask);
314} 302}
315 303
304static void pch_irq_ack(struct irq_data *d)
305{
306 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
307 struct pch_gpio *chip = gc->private;
308
309 iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->iclr);
310}
311
316static irqreturn_t pch_gpio_handler(int irq, void *dev_id) 312static irqreturn_t pch_gpio_handler(int irq, void *dev_id)
317{ 313{
318 struct pch_gpio *chip = dev_id; 314 struct pch_gpio *chip = dev_id;
319 u32 reg_val = ioread32(&chip->reg->istatus); 315 u32 reg_val = ioread32(&chip->reg->istatus);
320 int i; 316 int i, ret = IRQ_NONE;
321 int ret = IRQ_NONE;
322 317
323 for (i = 0; i < gpio_pins[chip->ioh]; i++) { 318 for (i = 0; i < gpio_pins[chip->ioh]; i++) {
324 if (reg_val & BIT(i)) { 319 if (reg_val & BIT(i)) {
325 dev_dbg(chip->dev, "%s:[%d]:irq=%d status=0x%x\n", 320 dev_dbg(chip->dev, "%s:[%d]:irq=%d status=0x%x\n",
326 __func__, i, irq, reg_val); 321 __func__, i, irq, reg_val);
327 iowrite32(BIT(i), &chip->reg->iclr);
328 generic_handle_irq(chip->irq_base + i); 322 generic_handle_irq(chip->irq_base + i);
329 ret = IRQ_HANDLED; 323 ret = IRQ_HANDLED;
330 } 324 }
@@ -343,6 +337,7 @@ static __devinit void pch_gpio_alloc_generic_chip(struct pch_gpio *chip,
343 gc->private = chip; 337 gc->private = chip;
344 ct = gc->chip_types; 338 ct = gc->chip_types;
345 339
340 ct->chip.irq_ack = pch_irq_ack;
346 ct->chip.irq_mask = pch_irq_mask; 341 ct->chip.irq_mask = pch_irq_mask;
347 ct->chip.irq_unmask = pch_irq_unmask; 342 ct->chip.irq_unmask = pch_irq_unmask;
348 ct->chip.irq_set_type = pch_irq_type; 343 ct->chip.irq_set_type = pch_irq_type;
@@ -357,6 +352,7 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
357 s32 ret; 352 s32 ret;
358 struct pch_gpio *chip; 353 struct pch_gpio *chip;
359 int irq_base; 354 int irq_base;
355 u32 msk;
360 356
361 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 357 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
362 if (chip == NULL) 358 if (chip == NULL)
@@ -408,8 +404,13 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
408 } 404 }
409 chip->irq_base = irq_base; 405 chip->irq_base = irq_base;
410 406
407 /* Mask all interrupts, but enable them */
408 msk = (1 << gpio_pins[chip->ioh]) - 1;
409 iowrite32(msk, &chip->reg->imask);
410 iowrite32(msk, &chip->reg->ien);
411
411 ret = request_irq(pdev->irq, pch_gpio_handler, 412 ret = request_irq(pdev->irq, pch_gpio_handler,
412 IRQF_SHARED, KBUILD_MODNAME, chip); 413 IRQF_SHARED, KBUILD_MODNAME, chip);
413 if (ret != 0) { 414 if (ret != 0) {
414 dev_err(&pdev->dev, 415 dev_err(&pdev->dev,
415 "%s request_irq failed\n", __func__); 416 "%s request_irq failed\n", __func__);
@@ -418,8 +419,6 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
418 419
419 pch_gpio_alloc_generic_chip(chip, irq_base, gpio_pins[chip->ioh]); 420 pch_gpio_alloc_generic_chip(chip, irq_base, gpio_pins[chip->ioh]);
420 421
421 /* Initialize interrupt ien register */
422 iowrite32(0, &chip->reg->ien);
423end: 422end:
424 return 0; 423 return 0;
425 424
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 5689ce62fd81..fc3ace3fd4cb 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -64,6 +64,7 @@ struct pxa_gpio_chip {
64 unsigned long irq_mask; 64 unsigned long irq_mask;
65 unsigned long irq_edge_rise; 65 unsigned long irq_edge_rise;
66 unsigned long irq_edge_fall; 66 unsigned long irq_edge_fall;
67 int (*set_wake)(unsigned int gpio, unsigned int on);
67 68
68#ifdef CONFIG_PM 69#ifdef CONFIG_PM
69 unsigned long saved_gplr; 70 unsigned long saved_gplr;
@@ -269,7 +270,8 @@ static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
269 (value ? GPSR_OFFSET : GPCR_OFFSET)); 270 (value ? GPSR_OFFSET : GPCR_OFFSET));
270} 271}
271 272
272static int __devinit pxa_init_gpio_chip(int gpio_end) 273static int __devinit pxa_init_gpio_chip(int gpio_end,
274 int (*set_wake)(unsigned int, unsigned int))
273{ 275{
274 int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1; 276 int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1;
275 struct pxa_gpio_chip *chips; 277 struct pxa_gpio_chip *chips;
@@ -285,6 +287,7 @@ static int __devinit pxa_init_gpio_chip(int gpio_end)
285 287
286 sprintf(chips[i].label, "gpio-%d", i); 288 sprintf(chips[i].label, "gpio-%d", i);
287 chips[i].regbase = gpio_reg_base + BANK_OFF(i); 289 chips[i].regbase = gpio_reg_base + BANK_OFF(i);
290 chips[i].set_wake = set_wake;
288 291
289 c->base = gpio; 292 c->base = gpio;
290 c->label = chips[i].label; 293 c->label = chips[i].label;
@@ -412,6 +415,17 @@ static void pxa_mask_muxed_gpio(struct irq_data *d)
412 writel_relaxed(gfer, c->regbase + GFER_OFFSET); 415 writel_relaxed(gfer, c->regbase + GFER_OFFSET);
413} 416}
414 417
418static int pxa_gpio_set_wake(struct irq_data *d, unsigned int on)
419{
420 int gpio = pxa_irq_to_gpio(d->irq);
421 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
422
423 if (c->set_wake)
424 return c->set_wake(gpio, on);
425 else
426 return 0;
427}
428
415static void pxa_unmask_muxed_gpio(struct irq_data *d) 429static void pxa_unmask_muxed_gpio(struct irq_data *d)
416{ 430{
417 int gpio = pxa_irq_to_gpio(d->irq); 431 int gpio = pxa_irq_to_gpio(d->irq);
@@ -427,6 +441,7 @@ static struct irq_chip pxa_muxed_gpio_chip = {
427 .irq_mask = pxa_mask_muxed_gpio, 441 .irq_mask = pxa_mask_muxed_gpio,
428 .irq_unmask = pxa_unmask_muxed_gpio, 442 .irq_unmask = pxa_unmask_muxed_gpio,
429 .irq_set_type = pxa_gpio_irq_type, 443 .irq_set_type = pxa_gpio_irq_type,
444 .irq_set_wake = pxa_gpio_set_wake,
430}; 445};
431 446
432static int pxa_gpio_nums(void) 447static int pxa_gpio_nums(void)
@@ -471,6 +486,7 @@ static int __devinit pxa_gpio_probe(struct platform_device *pdev)
471 struct pxa_gpio_chip *c; 486 struct pxa_gpio_chip *c;
472 struct resource *res; 487 struct resource *res;
473 struct clk *clk; 488 struct clk *clk;
489 struct pxa_gpio_platform_data *info;
474 int gpio, irq, ret; 490 int gpio, irq, ret;
475 int irq0 = 0, irq1 = 0, irq_mux, gpio_offset = 0; 491 int irq0 = 0, irq1 = 0, irq_mux, gpio_offset = 0;
476 492
@@ -516,7 +532,8 @@ static int __devinit pxa_gpio_probe(struct platform_device *pdev)
516 } 532 }
517 533
518 /* Initialize GPIO chips */ 534 /* Initialize GPIO chips */
519 pxa_init_gpio_chip(pxa_last_gpio); 535 info = dev_get_platdata(&pdev->dev);
536 pxa_init_gpio_chip(pxa_last_gpio, info ? info->gpio_set_wake : NULL);
520 537
521 /* clear all GPIO edge detects */ 538 /* clear all GPIO edge detects */
522 for_each_gpio_chip(gpio, c) { 539 for_each_gpio_chip(gpio, c) {
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 19d6fc0229c3..e991d9171961 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -452,12 +452,14 @@ static struct samsung_gpio_cfg s3c24xx_gpiocfg_banka = {
452}; 452};
453#endif 453#endif
454 454
455#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5)
455static struct samsung_gpio_cfg exynos_gpio_cfg = { 456static struct samsung_gpio_cfg exynos_gpio_cfg = {
456 .set_pull = exynos_gpio_setpull, 457 .set_pull = exynos_gpio_setpull,
457 .get_pull = exynos_gpio_getpull, 458 .get_pull = exynos_gpio_getpull,
458 .set_config = samsung_gpio_setcfg_4bit, 459 .set_config = samsung_gpio_setcfg_4bit,
459 .get_config = samsung_gpio_getcfg_4bit, 460 .get_config = samsung_gpio_getcfg_4bit,
460}; 461};
462#endif
461 463
462#if defined(CONFIG_CPU_S5P6440) || defined(CONFIG_CPU_S5P6450) 464#if defined(CONFIG_CPU_S5P6440) || defined(CONFIG_CPU_S5P6450)
463static struct samsung_gpio_cfg s5p64x0_gpio_cfg_rbank = { 465static struct samsung_gpio_cfg s5p64x0_gpio_cfg_rbank = {
@@ -2123,8 +2125,8 @@ static struct samsung_gpio_chip s5pv210_gpios_4bit[] = {
2123 * uses the above macro and depends on the banks being listed in order here. 2125 * uses the above macro and depends on the banks being listed in order here.
2124 */ 2126 */
2125 2127
2126static struct samsung_gpio_chip exynos4_gpios_1[] = {
2127#ifdef CONFIG_ARCH_EXYNOS4 2128#ifdef CONFIG_ARCH_EXYNOS4
2129static struct samsung_gpio_chip exynos4_gpios_1[] = {
2128 { 2130 {
2129 .chip = { 2131 .chip = {
2130 .base = EXYNOS4_GPA0(0), 2132 .base = EXYNOS4_GPA0(0),
@@ -2222,11 +2224,11 @@ static struct samsung_gpio_chip exynos4_gpios_1[] = {
2222 .label = "GPF3", 2224 .label = "GPF3",
2223 }, 2225 },
2224 }, 2226 },
2225#endif
2226}; 2227};
2228#endif
2227 2229
2228static struct samsung_gpio_chip exynos4_gpios_2[] = {
2229#ifdef CONFIG_ARCH_EXYNOS4 2230#ifdef CONFIG_ARCH_EXYNOS4
2231static struct samsung_gpio_chip exynos4_gpios_2[] = {
2230 { 2232 {
2231 .chip = { 2233 .chip = {
2232 .base = EXYNOS4_GPJ0(0), 2234 .base = EXYNOS4_GPJ0(0),
@@ -2367,11 +2369,11 @@ static struct samsung_gpio_chip exynos4_gpios_2[] = {
2367 .to_irq = samsung_gpiolib_to_irq, 2369 .to_irq = samsung_gpiolib_to_irq,
2368 }, 2370 },
2369 }, 2371 },
2370#endif
2371}; 2372};
2373#endif
2372 2374
2373static struct samsung_gpio_chip exynos4_gpios_3[] = {
2374#ifdef CONFIG_ARCH_EXYNOS4 2375#ifdef CONFIG_ARCH_EXYNOS4
2376static struct samsung_gpio_chip exynos4_gpios_3[] = {
2375 { 2377 {
2376 .chip = { 2378 .chip = {
2377 .base = EXYNOS4_GPZ(0), 2379 .base = EXYNOS4_GPZ(0),
@@ -2379,8 +2381,8 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = {
2379 .label = "GPZ", 2381 .label = "GPZ",
2380 }, 2382 },
2381 }, 2383 },
2382#endif
2383}; 2384};
2385#endif
2384 2386
2385#ifdef CONFIG_ARCH_EXYNOS5 2387#ifdef CONFIG_ARCH_EXYNOS5
2386static struct samsung_gpio_chip exynos5_gpios_1[] = { 2388static struct samsung_gpio_chip exynos5_gpios_1[] = {
@@ -2719,7 +2721,9 @@ static __init int samsung_gpiolib_init(void)
2719{ 2721{
2720 struct samsung_gpio_chip *chip; 2722 struct samsung_gpio_chip *chip;
2721 int i, nr_chips; 2723 int i, nr_chips;
2724#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS5250)
2722 void __iomem *gpio_base1, *gpio_base2, *gpio_base3, *gpio_base4; 2725 void __iomem *gpio_base1, *gpio_base2, *gpio_base3, *gpio_base4;
2726#endif
2723 int group = 0; 2727 int group = 0;
2724 2728
2725 samsung_gpiolib_set_cfg(samsung_gpio_cfgs, ARRAY_SIZE(samsung_gpio_cfgs)); 2729 samsung_gpiolib_set_cfg(samsung_gpio_cfgs, ARRAY_SIZE(samsung_gpio_cfgs));
@@ -2971,6 +2975,7 @@ static __init int samsung_gpiolib_init(void)
2971 2975
2972 return 0; 2976 return 0;
2973 2977
2978#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS5250)
2974err_ioremap4: 2979err_ioremap4:
2975 iounmap(gpio_base3); 2980 iounmap(gpio_base3);
2976err_ioremap3: 2981err_ioremap3:
@@ -2979,6 +2984,7 @@ err_ioremap2:
2979 iounmap(gpio_base1); 2984 iounmap(gpio_base1);
2980err_ioremap1: 2985err_ioremap1:
2981 return -ENOMEM; 2986 return -ENOMEM;
2987#endif
2982} 2988}
2983core_initcall(samsung_gpiolib_init); 2989core_initcall(samsung_gpiolib_init);
2984 2990
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 392ce71ed6a1..1dffa8359f88 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -149,22 +149,12 @@ static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
149 unsigned long pfn; 149 unsigned long pfn;
150 150
151 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { 151 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
152 unsigned long usize = buf->size;
153
154 if (!buf->pages) 152 if (!buf->pages)
155 return -EINTR; 153 return -EINTR;
156 154
157 while (usize > 0) { 155 pfn = page_to_pfn(buf->pages[page_offset++]);
158 pfn = page_to_pfn(buf->pages[page_offset++]); 156 } else
159 vm_insert_mixed(vma, f_vaddr, pfn); 157 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
160 f_vaddr += PAGE_SIZE;
161 usize -= PAGE_SIZE;
162 }
163
164 return 0;
165 }
166
167 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
168 158
169 return vm_insert_mixed(vma, f_vaddr, pfn); 159 return vm_insert_mixed(vma, f_vaddr, pfn);
170} 160}
@@ -524,6 +514,8 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
524 if (!buffer->pages) 514 if (!buffer->pages)
525 return -EINVAL; 515 return -EINVAL;
526 516
517 vma->vm_flags |= VM_MIXEDMAP;
518
527 do { 519 do {
528 ret = vm_insert_page(vma, uaddr, buffer->pages[i++]); 520 ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
529 if (ret) { 521 if (ret) {
@@ -710,7 +702,6 @@ int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
710int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 702int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
711{ 703{
712 struct drm_gem_object *obj = vma->vm_private_data; 704 struct drm_gem_object *obj = vma->vm_private_data;
713 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
714 struct drm_device *dev = obj->dev; 705 struct drm_device *dev = obj->dev;
715 unsigned long f_vaddr; 706 unsigned long f_vaddr;
716 pgoff_t page_offset; 707 pgoff_t page_offset;
@@ -722,21 +713,10 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
722 713
723 mutex_lock(&dev->struct_mutex); 714 mutex_lock(&dev->struct_mutex);
724 715
725 /*
726 * allocate all pages as desired size if user wants to allocate
727 * physically non-continuous memory.
728 */
729 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
730 ret = exynos_drm_gem_get_pages(obj);
731 if (ret < 0)
732 goto err;
733 }
734
735 ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset); 716 ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
736 if (ret < 0) 717 if (ret < 0)
737 DRM_ERROR("failed to map pages.\n"); 718 DRM_ERROR("failed to map pages.\n");
738 719
739err:
740 mutex_unlock(&dev->struct_mutex); 720 mutex_unlock(&dev->struct_mutex);
741 721
742 return convert_to_vm_err_msg(ret); 722 return convert_to_vm_err_msg(ret);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b505b70dba05..e6162a1681f0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1224,6 +1224,9 @@ static int i915_emon_status(struct seq_file *m, void *unused)
1224 unsigned long temp, chipset, gfx; 1224 unsigned long temp, chipset, gfx;
1225 int ret; 1225 int ret;
1226 1226
1227 if (!IS_GEN5(dev))
1228 return -ENODEV;
1229
1227 ret = mutex_lock_interruptible(&dev->struct_mutex); 1230 ret = mutex_lock_interruptible(&dev->struct_mutex);
1228 if (ret) 1231 if (ret)
1229 return ret; 1232 return ret;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 785f67f963ef..ba60f3c8f911 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1701,6 +1701,9 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
1701 unsigned long diffms; 1701 unsigned long diffms;
1702 u32 count; 1702 u32 count;
1703 1703
1704 if (dev_priv->info->gen != 5)
1705 return;
1706
1704 getrawmonotonic(&now); 1707 getrawmonotonic(&now);
1705 diff1 = timespec_sub(now, dev_priv->last_time2); 1708 diff1 = timespec_sub(now, dev_priv->last_time2);
1706 1709
@@ -2121,12 +2124,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2121 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 2124 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
2122 (unsigned long) dev); 2125 (unsigned long) dev);
2123 2126
2124 spin_lock(&mchdev_lock); 2127 if (IS_GEN5(dev)) {
2125 i915_mch_dev = dev_priv; 2128 spin_lock(&mchdev_lock);
2126 dev_priv->mchdev_lock = &mchdev_lock; 2129 i915_mch_dev = dev_priv;
2127 spin_unlock(&mchdev_lock); 2130 dev_priv->mchdev_lock = &mchdev_lock;
2131 spin_unlock(&mchdev_lock);
2128 2132
2129 ips_ping_for_i915_load(); 2133 ips_ping_for_i915_load();
2134 }
2130 2135
2131 return 0; 2136 return 0;
2132 2137
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f51a696486cb..de431942ded4 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1133,6 +1133,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1133 return -EINVAL; 1133 return -EINVAL;
1134 } 1134 }
1135 1135
1136 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1137 DRM_DEBUG("execbuf with %u cliprects\n",
1138 args->num_cliprects);
1139 return -EINVAL;
1140 }
1136 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), 1141 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
1137 GFP_KERNEL); 1142 GFP_KERNEL);
1138 if (cliprects == NULL) { 1143 if (cliprects == NULL) {
@@ -1404,7 +1409,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1404 struct drm_i915_gem_exec_object2 *exec2_list = NULL; 1409 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1405 int ret; 1410 int ret;
1406 1411
1407 if (args->buffer_count < 1) { 1412 if (args->buffer_count < 1 ||
1413 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1408 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); 1414 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1409 return -EINVAL; 1415 return -EINVAL;
1410 } 1416 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b4bb1ef77ddc..9d24d65f0c3e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -568,6 +568,7 @@
568#define CM0_MASK_SHIFT 16 568#define CM0_MASK_SHIFT 16
569#define CM0_IZ_OPT_DISABLE (1<<6) 569#define CM0_IZ_OPT_DISABLE (1<<6)
570#define CM0_ZR_OPT_DISABLE (1<<5) 570#define CM0_ZR_OPT_DISABLE (1<<5)
571#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
571#define CM0_DEPTH_EVICT_DISABLE (1<<4) 572#define CM0_DEPTH_EVICT_DISABLE (1<<4)
572#define CM0_COLOR_EVICT_DISABLE (1<<3) 573#define CM0_COLOR_EVICT_DISABLE (1<<3)
573#define CM0_DEPTH_WRITE_DISABLE (1<<1) 574#define CM0_DEPTH_WRITE_DISABLE (1<<1)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 4d3d736a4f56..90b9793fd5da 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -430,8 +430,8 @@ intel_crt_detect(struct drm_connector *connector, bool force)
430{ 430{
431 struct drm_device *dev = connector->dev; 431 struct drm_device *dev = connector->dev;
432 struct intel_crt *crt = intel_attached_crt(connector); 432 struct intel_crt *crt = intel_attached_crt(connector);
433 struct drm_crtc *crtc;
434 enum drm_connector_status status; 433 enum drm_connector_status status;
434 struct intel_load_detect_pipe tmp;
435 435
436 if (I915_HAS_HOTPLUG(dev)) { 436 if (I915_HAS_HOTPLUG(dev)) {
437 if (intel_crt_detect_hotplug(connector)) { 437 if (intel_crt_detect_hotplug(connector)) {
@@ -450,23 +450,16 @@ intel_crt_detect(struct drm_connector *connector, bool force)
450 return connector->status; 450 return connector->status;
451 451
452 /* for pre-945g platforms use load detect */ 452 /* for pre-945g platforms use load detect */
453 crtc = crt->base.base.crtc; 453 if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
454 if (crtc && crtc->enabled) { 454 &tmp)) {
455 status = intel_crt_load_detect(crt); 455 if (intel_crt_detect_ddc(connector))
456 } else { 456 status = connector_status_connected;
457 struct intel_load_detect_pipe tmp; 457 else
458 458 status = intel_crt_load_detect(crt);
459 if (intel_get_load_detect_pipe(&crt->base, connector, NULL, 459 intel_release_load_detect_pipe(&crt->base, connector,
460 &tmp)) { 460 &tmp);
461 if (intel_crt_detect_ddc(connector)) 461 } else
462 status = connector_status_connected; 462 status = connector_status_unknown;
463 else
464 status = intel_crt_load_detect(crt);
465 intel_release_load_detect_pipe(&crt->base, connector,
466 &tmp);
467 } else
468 status = connector_status_unknown;
469 }
470 463
471 return status; 464 return status;
472} 465}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5908cd563400..1b1cf3b3ff51 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7072,9 +7072,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
7072 struct drm_device *dev = crtc->dev; 7072 struct drm_device *dev = crtc->dev;
7073 drm_i915_private_t *dev_priv = dev->dev_private; 7073 drm_i915_private_t *dev_priv = dev->dev_private;
7074 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7074 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7075 int pipe = intel_crtc->pipe;
7076 int dpll_reg = DPLL(pipe);
7077 int dpll = I915_READ(dpll_reg);
7078 7075
7079 if (HAS_PCH_SPLIT(dev)) 7076 if (HAS_PCH_SPLIT(dev))
7080 return; 7077 return;
@@ -7087,10 +7084,15 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
7087 * the manual case. 7084 * the manual case.
7088 */ 7085 */
7089 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { 7086 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
7087 int pipe = intel_crtc->pipe;
7088 int dpll_reg = DPLL(pipe);
7089 u32 dpll;
7090
7090 DRM_DEBUG_DRIVER("downclocking LVDS\n"); 7091 DRM_DEBUG_DRIVER("downclocking LVDS\n");
7091 7092
7092 assert_panel_unlocked(dev_priv, pipe); 7093 assert_panel_unlocked(dev_priv, pipe);
7093 7094
7095 dpll = I915_READ(dpll_reg);
7094 dpll |= DISPLAY_RATE_SELECT_FPA1; 7096 dpll |= DISPLAY_RATE_SELECT_FPA1;
7095 I915_WRITE(dpll_reg, dpll); 7097 I915_WRITE(dpll_reg, dpll);
7096 intel_wait_for_vblank(dev, pipe); 7098 intel_wait_for_vblank(dev, pipe);
@@ -7098,7 +7100,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
7098 if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) 7100 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
7099 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); 7101 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
7100 } 7102 }
7101
7102} 7103}
7103 7104
7104/** 7105/**
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index cae3e5f17a49..2d7f47b56b6a 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -136,7 +136,7 @@ static void i9xx_write_infoframe(struct drm_encoder *encoder,
136 136
137 val &= ~VIDEO_DIP_SELECT_MASK; 137 val &= ~VIDEO_DIP_SELECT_MASK;
138 138
139 I915_WRITE(VIDEO_DIP_CTL, val | port | flags); 139 I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
140 140
141 for (i = 0; i < len; i += 4) { 141 for (i = 0; i < len; i += 4) {
142 I915_WRITE(VIDEO_DIP_DATA, *data); 142 I915_WRITE(VIDEO_DIP_DATA, *data);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 30e2c82101de..9c71183629c2 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -750,7 +750,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
750 .ident = "Hewlett-Packard t5745", 750 .ident = "Hewlett-Packard t5745",
751 .matches = { 751 .matches = {
752 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), 752 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
753 DMI_MATCH(DMI_BOARD_NAME, "hp t5745"), 753 DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"),
754 }, 754 },
755 }, 755 },
756 { 756 {
@@ -758,7 +758,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
758 .ident = "Hewlett-Packard st5747", 758 .ident = "Hewlett-Packard st5747",
759 .matches = { 759 .matches = {
760 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), 760 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
761 DMI_MATCH(DMI_BOARD_NAME, "hp st5747"), 761 DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"),
762 }, 762 },
763 }, 763 },
764 { 764 {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f75806e5bff5..62892a826ede 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -398,6 +398,17 @@ static int init_render_ring(struct intel_ring_buffer *ring)
398 return ret; 398 return ret;
399 } 399 }
400 400
401
402 if (IS_GEN6(dev)) {
403 /* From the Sandybridge PRM, volume 1 part 3, page 24:
404 * "If this bit is set, STCunit will have LRA as replacement
405 * policy. [...] This bit must be reset. LRA replacement
406 * policy is not supported."
407 */
408 I915_WRITE(CACHE_MODE_0,
409 CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
410 }
411
401 if (INTEL_INFO(dev)->gen >= 6) { 412 if (INTEL_INFO(dev)->gen >= 6) {
402 I915_WRITE(INSTPM, 413 I915_WRITE(INSTPM,
403 INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING); 414 INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e36b171c1e7d..ae5e748f39bb 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -731,6 +731,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
731 uint16_t width, height; 731 uint16_t width, height;
732 uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; 732 uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
733 uint16_t h_sync_offset, v_sync_offset; 733 uint16_t h_sync_offset, v_sync_offset;
734 int mode_clock;
734 735
735 width = mode->crtc_hdisplay; 736 width = mode->crtc_hdisplay;
736 height = mode->crtc_vdisplay; 737 height = mode->crtc_vdisplay;
@@ -745,7 +746,11 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
745 h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; 746 h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
746 v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; 747 v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
747 748
748 dtd->part1.clock = mode->clock / 10; 749 mode_clock = mode->clock;
750 mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
751 mode_clock /= 10;
752 dtd->part1.clock = mode_clock;
753
749 dtd->part1.h_active = width & 0xff; 754 dtd->part1.h_active = width & 0xff;
750 dtd->part1.h_blank = h_blank_len & 0xff; 755 dtd->part1.h_blank = h_blank_len & 0xff;
751 dtd->part1.h_high = (((width >> 8) & 0xf) << 4) | 756 dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
@@ -996,7 +1001,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
996 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); 1001 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
997 u32 sdvox; 1002 u32 sdvox;
998 struct intel_sdvo_in_out_map in_out; 1003 struct intel_sdvo_in_out_map in_out;
999 struct intel_sdvo_dtd input_dtd; 1004 struct intel_sdvo_dtd input_dtd, output_dtd;
1000 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 1005 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
1001 int rate; 1006 int rate;
1002 1007
@@ -1021,20 +1026,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1021 intel_sdvo->attached_output)) 1026 intel_sdvo->attached_output))
1022 return; 1027 return;
1023 1028
1024 /* We have tried to get input timing in mode_fixup, and filled into 1029 /* lvds has a special fixed output timing. */
1025 * adjusted_mode. 1030 if (intel_sdvo->is_lvds)
1026 */ 1031 intel_sdvo_get_dtd_from_mode(&output_dtd,
1027 if (intel_sdvo->is_tv || intel_sdvo->is_lvds) { 1032 intel_sdvo->sdvo_lvds_fixed_mode);
1028 input_dtd = intel_sdvo->input_dtd; 1033 else
1029 } else { 1034 intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
1030 /* Set the output timing to the screen */ 1035 (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd);
1031 if (!intel_sdvo_set_target_output(intel_sdvo,
1032 intel_sdvo->attached_output))
1033 return;
1034
1035 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1036 (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
1037 }
1038 1036
1039 /* Set the input timing to the screen. Assume always input 0. */ 1037 /* Set the input timing to the screen. Assume always input 0. */
1040 if (!intel_sdvo_set_target_input(intel_sdvo)) 1038 if (!intel_sdvo_set_target_input(intel_sdvo))
@@ -1052,6 +1050,10 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1052 !intel_sdvo_set_tv_format(intel_sdvo)) 1050 !intel_sdvo_set_tv_format(intel_sdvo))
1053 return; 1051 return;
1054 1052
1053 /* We have tried to get input timing in mode_fixup, and filled into
1054 * adjusted_mode.
1055 */
1056 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1055 (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); 1057 (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
1056 1058
1057 switch (pixel_multiplier) { 1059 switch (pixel_multiplier) {
@@ -1218,8 +1220,14 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
1218 1220
1219static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) 1221static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
1220{ 1222{
1223 struct drm_device *dev = intel_sdvo->base.base.dev;
1221 u8 response[2]; 1224 u8 response[2];
1222 1225
1226 /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
1227 * on the line. */
1228 if (IS_I945G(dev) || IS_I945GM(dev))
1229 return false;
1230
1223 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, 1231 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
1224 &response, 2) && response[0]; 1232 &response, 2) && response[0];
1225} 1233}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 7814a760c164..284bd25d5d21 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -270,7 +270,7 @@ static bool nouveau_dsm_detect(void)
270 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; 270 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
271 struct pci_dev *pdev = NULL; 271 struct pci_dev *pdev = NULL;
272 int has_dsm = 0; 272 int has_dsm = 0;
273 int has_optimus; 273 int has_optimus = 0;
274 int vga_count = 0; 274 int vga_count = 0;
275 bool guid_valid; 275 bool guid_valid;
276 int retval; 276 int retval;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 80963d05b54a..0be4a815e706 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6156,10 +6156,14 @@ dcb_fake_connectors(struct nvbios *bios)
6156 6156
6157 /* heuristic: if we ever get a non-zero connector field, assume 6157 /* heuristic: if we ever get a non-zero connector field, assume
6158 * that all the indices are valid and we don't need fake them. 6158 * that all the indices are valid and we don't need fake them.
6159 *
6160 * and, as usual, a blacklist of boards with bad bios data..
6159 */ 6161 */
6160 for (i = 0; i < dcbt->entries; i++) { 6162 if (!nv_match_device(bios->dev, 0x0392, 0x107d, 0x20a2)) {
6161 if (dcbt->entry[i].connector) 6163 for (i = 0; i < dcbt->entries; i++) {
6162 return; 6164 if (dcbt->entry[i].connector)
6165 return;
6166 }
6163 } 6167 }
6164 6168
6165 /* no useful connector info available, we need to make it up 6169 /* no useful connector info available, we need to make it up
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
index 59ea1c14eca0..c3de36384522 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hdmi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
@@ -32,7 +32,9 @@ static bool
32hdmi_sor(struct drm_encoder *encoder) 32hdmi_sor(struct drm_encoder *encoder)
33{ 33{
34 struct drm_nouveau_private *dev_priv = encoder->dev->dev_private; 34 struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
35 if (dev_priv->chipset < 0xa3) 35 if (dev_priv->chipset < 0xa3 ||
36 dev_priv->chipset == 0xaa ||
37 dev_priv->chipset == 0xac)
36 return false; 38 return false;
37 return true; 39 return true;
38} 40}
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index e2be95af2e52..77e564667b5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -29,10 +29,6 @@
29#include "nouveau_i2c.h" 29#include "nouveau_i2c.h"
30#include "nouveau_hw.h" 30#include "nouveau_hw.h"
31 31
32#define T_TIMEOUT 2200000
33#define T_RISEFALL 1000
34#define T_HOLD 5000
35
36static void 32static void
37i2c_drive_scl(void *data, int state) 33i2c_drive_scl(void *data, int state)
38{ 34{
@@ -113,175 +109,6 @@ i2c_sense_sda(void *data)
113 return 0; 109 return 0;
114} 110}
115 111
116static void
117i2c_delay(struct nouveau_i2c_chan *port, u32 nsec)
118{
119 udelay((nsec + 500) / 1000);
120}
121
122static bool
123i2c_raise_scl(struct nouveau_i2c_chan *port)
124{
125 u32 timeout = T_TIMEOUT / T_RISEFALL;
126
127 i2c_drive_scl(port, 1);
128 do {
129 i2c_delay(port, T_RISEFALL);
130 } while (!i2c_sense_scl(port) && --timeout);
131
132 return timeout != 0;
133}
134
135static int
136i2c_start(struct nouveau_i2c_chan *port)
137{
138 int ret = 0;
139
140 port->state = i2c_sense_scl(port);
141 port->state |= i2c_sense_sda(port) << 1;
142 if (port->state != 3) {
143 i2c_drive_scl(port, 0);
144 i2c_drive_sda(port, 1);
145 if (!i2c_raise_scl(port))
146 ret = -EBUSY;
147 }
148
149 i2c_drive_sda(port, 0);
150 i2c_delay(port, T_HOLD);
151 i2c_drive_scl(port, 0);
152 i2c_delay(port, T_HOLD);
153 return ret;
154}
155
156static void
157i2c_stop(struct nouveau_i2c_chan *port)
158{
159 i2c_drive_scl(port, 0);
160 i2c_drive_sda(port, 0);
161 i2c_delay(port, T_RISEFALL);
162
163 i2c_drive_scl(port, 1);
164 i2c_delay(port, T_HOLD);
165 i2c_drive_sda(port, 1);
166 i2c_delay(port, T_HOLD);
167}
168
169static int
170i2c_bitw(struct nouveau_i2c_chan *port, int sda)
171{
172 i2c_drive_sda(port, sda);
173 i2c_delay(port, T_RISEFALL);
174
175 if (!i2c_raise_scl(port))
176 return -ETIMEDOUT;
177 i2c_delay(port, T_HOLD);
178
179 i2c_drive_scl(port, 0);
180 i2c_delay(port, T_HOLD);
181 return 0;
182}
183
184static int
185i2c_bitr(struct nouveau_i2c_chan *port)
186{
187 int sda;
188
189 i2c_drive_sda(port, 1);
190 i2c_delay(port, T_RISEFALL);
191
192 if (!i2c_raise_scl(port))
193 return -ETIMEDOUT;
194 i2c_delay(port, T_HOLD);
195
196 sda = i2c_sense_sda(port);
197
198 i2c_drive_scl(port, 0);
199 i2c_delay(port, T_HOLD);
200 return sda;
201}
202
203static int
204i2c_get_byte(struct nouveau_i2c_chan *port, u8 *byte, bool last)
205{
206 int i, bit;
207
208 *byte = 0;
209 for (i = 7; i >= 0; i--) {
210 bit = i2c_bitr(port);
211 if (bit < 0)
212 return bit;
213 *byte |= bit << i;
214 }
215
216 return i2c_bitw(port, last ? 1 : 0);
217}
218
219static int
220i2c_put_byte(struct nouveau_i2c_chan *port, u8 byte)
221{
222 int i, ret;
223 for (i = 7; i >= 0; i--) {
224 ret = i2c_bitw(port, !!(byte & (1 << i)));
225 if (ret < 0)
226 return ret;
227 }
228
229 ret = i2c_bitr(port);
230 if (ret == 1) /* nack */
231 ret = -EIO;
232 return ret;
233}
234
235static int
236i2c_addr(struct nouveau_i2c_chan *port, struct i2c_msg *msg)
237{
238 u32 addr = msg->addr << 1;
239 if (msg->flags & I2C_M_RD)
240 addr |= 1;
241 return i2c_put_byte(port, addr);
242}
243
244static int
245i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
246{
247 struct nouveau_i2c_chan *port = (struct nouveau_i2c_chan *)adap;
248 struct i2c_msg *msg = msgs;
249 int ret = 0, mcnt = num;
250
251 while (!ret && mcnt--) {
252 u8 remaining = msg->len;
253 u8 *ptr = msg->buf;
254
255 ret = i2c_start(port);
256 if (ret == 0)
257 ret = i2c_addr(port, msg);
258
259 if (msg->flags & I2C_M_RD) {
260 while (!ret && remaining--)
261 ret = i2c_get_byte(port, ptr++, !remaining);
262 } else {
263 while (!ret && remaining--)
264 ret = i2c_put_byte(port, *ptr++);
265 }
266
267 msg++;
268 }
269
270 i2c_stop(port);
271 return (ret < 0) ? ret : num;
272}
273
274static u32
275i2c_bit_func(struct i2c_adapter *adap)
276{
277 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
278}
279
280const struct i2c_algorithm nouveau_i2c_bit_algo = {
281 .master_xfer = i2c_bit_xfer,
282 .functionality = i2c_bit_func
283};
284
285static const uint32_t nv50_i2c_port[] = { 112static const uint32_t nv50_i2c_port[] = {
286 0x00e138, 0x00e150, 0x00e168, 0x00e180, 113 0x00e138, 0x00e150, 0x00e168, 0x00e180,
287 0x00e254, 0x00e274, 0x00e764, 0x00e780, 114 0x00e254, 0x00e274, 0x00e764, 0x00e780,
@@ -384,12 +211,10 @@ nouveau_i2c_init(struct drm_device *dev)
384 case 0: /* NV04:NV50 */ 211 case 0: /* NV04:NV50 */
385 port->drive = entry[0]; 212 port->drive = entry[0];
386 port->sense = entry[1]; 213 port->sense = entry[1];
387 port->adapter.algo = &nouveau_i2c_bit_algo;
388 break; 214 break;
389 case 4: /* NV4E */ 215 case 4: /* NV4E */
390 port->drive = 0x600800 + entry[1]; 216 port->drive = 0x600800 + entry[1];
391 port->sense = port->drive; 217 port->sense = port->drive;
392 port->adapter.algo = &nouveau_i2c_bit_algo;
393 break; 218 break;
394 case 5: /* NV50- */ 219 case 5: /* NV50- */
395 port->drive = entry[0] & 0x0f; 220 port->drive = entry[0] & 0x0f;
@@ -402,7 +227,6 @@ nouveau_i2c_init(struct drm_device *dev)
402 port->drive = 0x00d014 + (port->drive * 0x20); 227 port->drive = 0x00d014 + (port->drive * 0x20);
403 port->sense = port->drive; 228 port->sense = port->drive;
404 } 229 }
405 port->adapter.algo = &nouveau_i2c_bit_algo;
406 break; 230 break;
407 case 6: /* NV50- DP AUX */ 231 case 6: /* NV50- DP AUX */
408 port->drive = entry[0]; 232 port->drive = entry[0];
@@ -413,7 +237,7 @@ nouveau_i2c_init(struct drm_device *dev)
413 break; 237 break;
414 } 238 }
415 239
416 if (!port->adapter.algo) { 240 if (!port->adapter.algo && !port->drive) {
417 NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n", 241 NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
418 i, port->type, port->drive, port->sense); 242 i, port->type, port->drive, port->sense);
419 kfree(port); 243 kfree(port);
@@ -429,7 +253,26 @@ nouveau_i2c_init(struct drm_device *dev)
429 port->dcb = ROM32(entry[0]); 253 port->dcb = ROM32(entry[0]);
430 i2c_set_adapdata(&port->adapter, i2c); 254 i2c_set_adapdata(&port->adapter, i2c);
431 255
432 ret = i2c_add_adapter(&port->adapter); 256 if (port->adapter.algo != &nouveau_dp_i2c_algo) {
257 port->adapter.algo_data = &port->bit;
258 port->bit.udelay = 10;
259 port->bit.timeout = usecs_to_jiffies(2200);
260 port->bit.data = port;
261 port->bit.setsda = i2c_drive_sda;
262 port->bit.setscl = i2c_drive_scl;
263 port->bit.getsda = i2c_sense_sda;
264 port->bit.getscl = i2c_sense_scl;
265
266 i2c_drive_scl(port, 0);
267 i2c_drive_sda(port, 1);
268 i2c_drive_scl(port, 1);
269
270 ret = i2c_bit_add_bus(&port->adapter);
271 } else {
272 port->adapter.algo = &nouveau_dp_i2c_algo;
273 ret = i2c_add_adapter(&port->adapter);
274 }
275
433 if (ret) { 276 if (ret) {
434 NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret); 277 NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
435 kfree(port); 278 kfree(port);
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
index 4d2e4e9031be..1d083893a4d7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.h
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -34,6 +34,7 @@
34struct nouveau_i2c_chan { 34struct nouveau_i2c_chan {
35 struct i2c_adapter adapter; 35 struct i2c_adapter adapter;
36 struct drm_device *dev; 36 struct drm_device *dev;
37 struct i2c_algo_bit_data bit;
37 struct list_head head; 38 struct list_head head;
38 u8 index; 39 u8 index;
39 u8 type; 40 u8 type;
diff --git a/drivers/gpu/drm/nouveau/nv10_gpio.c b/drivers/gpu/drm/nouveau/nv10_gpio.c
index 550ad3fcf0af..9d79180069df 100644
--- a/drivers/gpu/drm/nouveau/nv10_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv10_gpio.c
@@ -65,7 +65,7 @@ nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out)
65 if (line < 10) { 65 if (line < 10) {
66 line = (line - 2) * 4; 66 line = (line - 2) * 4;
67 reg = NV_PCRTC_GPIO_EXT; 67 reg = NV_PCRTC_GPIO_EXT;
68 mask = 0x00000003 << ((line - 2) * 4); 68 mask = 0x00000003;
69 data = (dir << 1) | out; 69 data = (dir << 1) | out;
70 } else 70 } else
71 if (line < 14) { 71 if (line < 14) {
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c
index 5bf55038fd92..f704e942372e 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fb.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fb.c
@@ -54,6 +54,11 @@ nvc0_mfb_isr(struct drm_device *dev)
54 nvc0_mfb_subp_isr(dev, unit, subp); 54 nvc0_mfb_subp_isr(dev, unit, subp);
55 units &= ~(1 << unit); 55 units &= ~(1 << unit);
56 } 56 }
57
58 /* we do something horribly wrong and upset PMFB a lot, so mask off
59 * interrupts from it after the first one until it's fixed
60 */
61 nv_mask(dev, 0x000640, 0x02000000, 0x00000000);
57} 62}
58 63
59static void 64static void
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b5ff1f7b6f7e..af1054f8202a 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -575,6 +575,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
575 575
576 if (rdev->family < CHIP_RV770) 576 if (rdev->family < CHIP_RV770)
577 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; 577 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
578 /* use frac fb div on APUs */
579 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
580 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
578 } else { 581 } else {
579 pll->flags |= RADEON_PLL_LEGACY; 582 pll->flags |= RADEON_PLL_LEGACY;
580 583
@@ -955,8 +958,8 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
955 break; 958 break;
956 } 959 }
957 960
958 if (radeon_encoder->active_device & 961 if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
959 (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) { 962 (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
960 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 963 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
961 struct drm_connector *connector = 964 struct drm_connector *connector =
962 radeon_get_connector_for_encoder(encoder); 965 radeon_get_connector_for_encoder(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index ea7df16e2f84..5992502a3448 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -241,8 +241,8 @@ int radeon_wb_init(struct radeon_device *rdev)
241 rdev->wb.use_event = true; 241 rdev->wb.use_event = true;
242 } 242 }
243 } 243 }
244 /* always use writeback/events on NI */ 244 /* always use writeback/events on NI, APUs */
245 if (ASIC_IS_DCE5(rdev)) { 245 if (rdev->family >= CHIP_PALM) {
246 rdev->wb.enabled = true; 246 rdev->wb.enabled = true;
247 rdev->wb.use_event = true; 247 rdev->wb.use_event = true;
248 } 248 }
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 8086c96e0b06..0a1d4bd65edc 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -533,7 +533,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
533 radeon_legacy_init_crtc(dev, radeon_crtc); 533 radeon_legacy_init_crtc(dev, radeon_crtc);
534} 534}
535 535
536static const char *encoder_names[36] = { 536static const char *encoder_names[37] = {
537 "NONE", 537 "NONE",
538 "INTERNAL_LVDS", 538 "INTERNAL_LVDS",
539 "INTERNAL_TMDS1", 539 "INTERNAL_TMDS1",
@@ -570,6 +570,7 @@ static const char *encoder_names[36] = {
570 "INTERNAL_UNIPHY2", 570 "INTERNAL_UNIPHY2",
571 "NUTMEG", 571 "NUTMEG",
572 "TRAVIS", 572 "TRAVIS",
573 "INTERNAL_VCE"
573}; 574};
574 575
575static const char *connector_names[15] = { 576static const char *connector_names[15] = {
diff --git a/drivers/hsi/clients/hsi_char.c b/drivers/hsi/clients/hsi_char.c
index 88a050df2389..3ad91f6447d8 100644
--- a/drivers/hsi/clients/hsi_char.c
+++ b/drivers/hsi/clients/hsi_char.c
@@ -123,7 +123,7 @@ struct hsc_client_data {
123static unsigned int hsc_major; 123static unsigned int hsc_major;
124/* Maximum buffer size that hsi_char will accept from userspace */ 124/* Maximum buffer size that hsi_char will accept from userspace */
125static unsigned int max_data_size = 0x1000; 125static unsigned int max_data_size = 0x1000;
126module_param(max_data_size, uint, S_IRUSR | S_IWUSR); 126module_param(max_data_size, uint, 0);
127MODULE_PARM_DESC(max_data_size, "max read/write data size [4,8..65536] (^2)"); 127MODULE_PARM_DESC(max_data_size, "max read/write data size [4,8..65536] (^2)");
128 128
129static void hsc_add_tail(struct hsc_channel *channel, struct hsi_msg *msg, 129static void hsc_add_tail(struct hsc_channel *channel, struct hsi_msg *msg,
diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c
index 4e2d79b79334..2d58f939d27f 100644
--- a/drivers/hsi/hsi.c
+++ b/drivers/hsi/hsi.c
@@ -21,26 +21,13 @@
21 */ 21 */
22#include <linux/hsi/hsi.h> 22#include <linux/hsi/hsi.h>
23#include <linux/compiler.h> 23#include <linux/compiler.h>
24#include <linux/rwsem.h>
25#include <linux/list.h> 24#include <linux/list.h>
26#include <linux/spinlock.h>
27#include <linux/kobject.h> 25#include <linux/kobject.h>
28#include <linux/slab.h> 26#include <linux/slab.h>
29#include <linux/string.h> 27#include <linux/string.h>
28#include <linux/notifier.h>
30#include "hsi_core.h" 29#include "hsi_core.h"
31 30
32static struct device_type hsi_ctrl = {
33 .name = "hsi_controller",
34};
35
36static struct device_type hsi_cl = {
37 .name = "hsi_client",
38};
39
40static struct device_type hsi_port = {
41 .name = "hsi_port",
42};
43
44static ssize_t modalias_show(struct device *dev, 31static ssize_t modalias_show(struct device *dev,
45 struct device_attribute *a __maybe_unused, char *buf) 32 struct device_attribute *a __maybe_unused, char *buf)
46{ 33{
@@ -54,8 +41,7 @@ static struct device_attribute hsi_bus_dev_attrs[] = {
54 41
55static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 42static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
56{ 43{
57 if (dev->type == &hsi_cl) 44 add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev));
58 add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev));
59 45
60 return 0; 46 return 0;
61} 47}
@@ -80,12 +66,10 @@ static void hsi_client_release(struct device *dev)
80static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info) 66static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info)
81{ 67{
82 struct hsi_client *cl; 68 struct hsi_client *cl;
83 unsigned long flags;
84 69
85 cl = kzalloc(sizeof(*cl), GFP_KERNEL); 70 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
86 if (!cl) 71 if (!cl)
87 return; 72 return;
88 cl->device.type = &hsi_cl;
89 cl->tx_cfg = info->tx_cfg; 73 cl->tx_cfg = info->tx_cfg;
90 cl->rx_cfg = info->rx_cfg; 74 cl->rx_cfg = info->rx_cfg;
91 cl->device.bus = &hsi_bus_type; 75 cl->device.bus = &hsi_bus_type;
@@ -93,14 +77,11 @@ static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info)
93 cl->device.release = hsi_client_release; 77 cl->device.release = hsi_client_release;
94 dev_set_name(&cl->device, info->name); 78 dev_set_name(&cl->device, info->name);
95 cl->device.platform_data = info->platform_data; 79 cl->device.platform_data = info->platform_data;
96 spin_lock_irqsave(&port->clock, flags);
97 list_add_tail(&cl->link, &port->clients);
98 spin_unlock_irqrestore(&port->clock, flags);
99 if (info->archdata) 80 if (info->archdata)
100 cl->device.archdata = *info->archdata; 81 cl->device.archdata = *info->archdata;
101 if (device_register(&cl->device) < 0) { 82 if (device_register(&cl->device) < 0) {
102 pr_err("hsi: failed to register client: %s\n", info->name); 83 pr_err("hsi: failed to register client: %s\n", info->name);
103 kfree(cl); 84 put_device(&cl->device);
104 } 85 }
105} 86}
106 87
@@ -120,13 +101,6 @@ static void hsi_scan_board_info(struct hsi_controller *hsi)
120 101
121static int hsi_remove_client(struct device *dev, void *data __maybe_unused) 102static int hsi_remove_client(struct device *dev, void *data __maybe_unused)
122{ 103{
123 struct hsi_client *cl = to_hsi_client(dev);
124 struct hsi_port *port = to_hsi_port(dev->parent);
125 unsigned long flags;
126
127 spin_lock_irqsave(&port->clock, flags);
128 list_del(&cl->link);
129 spin_unlock_irqrestore(&port->clock, flags);
130 device_unregister(dev); 104 device_unregister(dev);
131 105
132 return 0; 106 return 0;
@@ -140,12 +114,17 @@ static int hsi_remove_port(struct device *dev, void *data __maybe_unused)
140 return 0; 114 return 0;
141} 115}
142 116
143static void hsi_controller_release(struct device *dev __maybe_unused) 117static void hsi_controller_release(struct device *dev)
144{ 118{
119 struct hsi_controller *hsi = to_hsi_controller(dev);
120
121 kfree(hsi->port);
122 kfree(hsi);
145} 123}
146 124
147static void hsi_port_release(struct device *dev __maybe_unused) 125static void hsi_port_release(struct device *dev)
148{ 126{
127 kfree(to_hsi_port(dev));
149} 128}
150 129
151/** 130/**
@@ -170,20 +149,12 @@ int hsi_register_controller(struct hsi_controller *hsi)
170 unsigned int i; 149 unsigned int i;
171 int err; 150 int err;
172 151
173 hsi->device.type = &hsi_ctrl; 152 err = device_add(&hsi->device);
174 hsi->device.bus = &hsi_bus_type;
175 hsi->device.release = hsi_controller_release;
176 err = device_register(&hsi->device);
177 if (err < 0) 153 if (err < 0)
178 return err; 154 return err;
179 for (i = 0; i < hsi->num_ports; i++) { 155 for (i = 0; i < hsi->num_ports; i++) {
180 hsi->port[i].device.parent = &hsi->device; 156 hsi->port[i]->device.parent = &hsi->device;
181 hsi->port[i].device.bus = &hsi_bus_type; 157 err = device_add(&hsi->port[i]->device);
182 hsi->port[i].device.release = hsi_port_release;
183 hsi->port[i].device.type = &hsi_port;
184 INIT_LIST_HEAD(&hsi->port[i].clients);
185 spin_lock_init(&hsi->port[i].clock);
186 err = device_register(&hsi->port[i].device);
187 if (err < 0) 158 if (err < 0)
188 goto out; 159 goto out;
189 } 160 }
@@ -192,7 +163,9 @@ int hsi_register_controller(struct hsi_controller *hsi)
192 163
193 return 0; 164 return 0;
194out: 165out:
195 hsi_unregister_controller(hsi); 166 while (i-- > 0)
167 device_del(&hsi->port[i]->device);
168 device_del(&hsi->device);
196 169
197 return err; 170 return err;
198} 171}
@@ -223,6 +196,29 @@ static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
223} 196}
224 197
225/** 198/**
199 * hsi_put_controller - Free an HSI controller
200 *
201 * @hsi: Pointer to the HSI controller to freed
202 *
203 * HSI controller drivers should only use this function if they need
204 * to free their allocated hsi_controller structures before a successful
205 * call to hsi_register_controller. Other use is not allowed.
206 */
207void hsi_put_controller(struct hsi_controller *hsi)
208{
209 unsigned int i;
210
211 if (!hsi)
212 return;
213
214 for (i = 0; i < hsi->num_ports; i++)
215 if (hsi->port && hsi->port[i])
216 put_device(&hsi->port[i]->device);
217 put_device(&hsi->device);
218}
219EXPORT_SYMBOL_GPL(hsi_put_controller);
220
221/**
226 * hsi_alloc_controller - Allocate an HSI controller and its ports 222 * hsi_alloc_controller - Allocate an HSI controller and its ports
227 * @n_ports: Number of ports on the HSI controller 223 * @n_ports: Number of ports on the HSI controller
228 * @flags: Kernel allocation flags 224 * @flags: Kernel allocation flags
@@ -232,55 +228,52 @@ static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
232struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags) 228struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags)
233{ 229{
234 struct hsi_controller *hsi; 230 struct hsi_controller *hsi;
235 struct hsi_port *port; 231 struct hsi_port **port;
236 unsigned int i; 232 unsigned int i;
237 233
238 if (!n_ports) 234 if (!n_ports)
239 return NULL; 235 return NULL;
240 236
241 port = kzalloc(sizeof(*port)*n_ports, flags);
242 if (!port)
243 return NULL;
244 hsi = kzalloc(sizeof(*hsi), flags); 237 hsi = kzalloc(sizeof(*hsi), flags);
245 if (!hsi) 238 if (!hsi)
246 goto out; 239 return NULL;
247 for (i = 0; i < n_ports; i++) { 240 port = kzalloc(sizeof(*port)*n_ports, flags);
248 dev_set_name(&port[i].device, "port%d", i); 241 if (!port) {
249 port[i].num = i; 242 kfree(hsi);
250 port[i].async = hsi_dummy_msg; 243 return NULL;
251 port[i].setup = hsi_dummy_cl;
252 port[i].flush = hsi_dummy_cl;
253 port[i].start_tx = hsi_dummy_cl;
254 port[i].stop_tx = hsi_dummy_cl;
255 port[i].release = hsi_dummy_cl;
256 mutex_init(&port[i].lock);
257 } 244 }
258 hsi->num_ports = n_ports; 245 hsi->num_ports = n_ports;
259 hsi->port = port; 246 hsi->port = port;
247 hsi->device.release = hsi_controller_release;
248 device_initialize(&hsi->device);
249
250 for (i = 0; i < n_ports; i++) {
251 port[i] = kzalloc(sizeof(**port), flags);
252 if (port[i] == NULL)
253 goto out;
254 port[i]->num = i;
255 port[i]->async = hsi_dummy_msg;
256 port[i]->setup = hsi_dummy_cl;
257 port[i]->flush = hsi_dummy_cl;
258 port[i]->start_tx = hsi_dummy_cl;
259 port[i]->stop_tx = hsi_dummy_cl;
260 port[i]->release = hsi_dummy_cl;
261 mutex_init(&port[i]->lock);
262 ATOMIC_INIT_NOTIFIER_HEAD(&port[i]->n_head);
263 dev_set_name(&port[i]->device, "port%d", i);
264 hsi->port[i]->device.release = hsi_port_release;
265 device_initialize(&hsi->port[i]->device);
266 }
260 267
261 return hsi; 268 return hsi;
262out: 269out:
263 kfree(port); 270 hsi_put_controller(hsi);
264 271
265 return NULL; 272 return NULL;
266} 273}
267EXPORT_SYMBOL_GPL(hsi_alloc_controller); 274EXPORT_SYMBOL_GPL(hsi_alloc_controller);
268 275
269/** 276/**
270 * hsi_free_controller - Free an HSI controller
271 * @hsi: Pointer to HSI controller
272 */
273void hsi_free_controller(struct hsi_controller *hsi)
274{
275 if (!hsi)
276 return;
277
278 kfree(hsi->port);
279 kfree(hsi);
280}
281EXPORT_SYMBOL_GPL(hsi_free_controller);
282
283/**
284 * hsi_free_msg - Free an HSI message 277 * hsi_free_msg - Free an HSI message
285 * @msg: Pointer to the HSI message 278 * @msg: Pointer to the HSI message
286 * 279 *
@@ -414,37 +407,67 @@ void hsi_release_port(struct hsi_client *cl)
414} 407}
415EXPORT_SYMBOL_GPL(hsi_release_port); 408EXPORT_SYMBOL_GPL(hsi_release_port);
416 409
417static int hsi_start_rx(struct hsi_client *cl, void *data __maybe_unused) 410static int hsi_event_notifier_call(struct notifier_block *nb,
411 unsigned long event, void *data __maybe_unused)
418{ 412{
419 if (cl->hsi_start_rx) 413 struct hsi_client *cl = container_of(nb, struct hsi_client, nb);
420 (*cl->hsi_start_rx)(cl); 414
415 (*cl->ehandler)(cl, event);
421 416
422 return 0; 417 return 0;
423} 418}
424 419
425static int hsi_stop_rx(struct hsi_client *cl, void *data __maybe_unused) 420/**
421 * hsi_register_port_event - Register a client to receive port events
422 * @cl: HSI client that wants to receive port events
423 * @cb: Event handler callback
424 *
425 * Clients should register a callback to be able to receive
426 * events from the ports. Registration should happen after
427 * claiming the port.
428 * The handler can be called in interrupt context.
429 *
430 * Returns -errno on error, or 0 on success.
431 */
432int hsi_register_port_event(struct hsi_client *cl,
433 void (*handler)(struct hsi_client *, unsigned long))
426{ 434{
427 if (cl->hsi_stop_rx) 435 struct hsi_port *port = hsi_get_port(cl);
428 (*cl->hsi_stop_rx)(cl);
429 436
430 return 0; 437 if (!handler || cl->ehandler)
438 return -EINVAL;
439 if (!hsi_port_claimed(cl))
440 return -EACCES;
441 cl->ehandler = handler;
442 cl->nb.notifier_call = hsi_event_notifier_call;
443
444 return atomic_notifier_chain_register(&port->n_head, &cl->nb);
431} 445}
446EXPORT_SYMBOL_GPL(hsi_register_port_event);
432 447
433static int hsi_port_for_each_client(struct hsi_port *port, void *data, 448/**
434 int (*fn)(struct hsi_client *cl, void *data)) 449 * hsi_unregister_port_event - Stop receiving port events for a client
450 * @cl: HSI client that wants to stop receiving port events
451 *
452 * Clients should call this function before releasing their associated
453 * port.
454 *
455 * Returns -errno on error, or 0 on success.
456 */
457int hsi_unregister_port_event(struct hsi_client *cl)
435{ 458{
436 struct hsi_client *cl; 459 struct hsi_port *port = hsi_get_port(cl);
460 int err;
437 461
438 spin_lock(&port->clock); 462 WARN_ON(!hsi_port_claimed(cl));
439 list_for_each_entry(cl, &port->clients, link) {
440 spin_unlock(&port->clock);
441 (*fn)(cl, data);
442 spin_lock(&port->clock);
443 }
444 spin_unlock(&port->clock);
445 463
446 return 0; 464 err = atomic_notifier_chain_unregister(&port->n_head, &cl->nb);
465 if (!err)
466 cl->ehandler = NULL;
467
468 return err;
447} 469}
470EXPORT_SYMBOL_GPL(hsi_unregister_port_event);
448 471
449/** 472/**
450 * hsi_event -Notifies clients about port events 473 * hsi_event -Notifies clients about port events
@@ -458,22 +481,12 @@ static int hsi_port_for_each_client(struct hsi_port *port, void *data,
458 * Events: 481 * Events:
459 * HSI_EVENT_START_RX - Incoming wake line high 482 * HSI_EVENT_START_RX - Incoming wake line high
460 * HSI_EVENT_STOP_RX - Incoming wake line down 483 * HSI_EVENT_STOP_RX - Incoming wake line down
484 *
485 * Returns -errno on error, or 0 on success.
461 */ 486 */
462void hsi_event(struct hsi_port *port, unsigned int event) 487int hsi_event(struct hsi_port *port, unsigned long event)
463{ 488{
464 int (*fn)(struct hsi_client *cl, void *data); 489 return atomic_notifier_call_chain(&port->n_head, event, NULL);
465
466 switch (event) {
467 case HSI_EVENT_START_RX:
468 fn = hsi_start_rx;
469 break;
470 case HSI_EVENT_STOP_RX:
471 fn = hsi_stop_rx;
472 break;
473 default:
474 return;
475 }
476 hsi_port_for_each_client(port, NULL, fn);
477} 490}
478EXPORT_SYMBOL_GPL(hsi_event); 491EXPORT_SYMBOL_GPL(hsi_event);
479 492
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 8af25a097d75..7233c88f01b8 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -30,37 +30,6 @@
30#include "hyperv_vmbus.h" 30#include "hyperv_vmbus.h"
31 31
32 32
33/* #defines */
34
35
36/* Amount of space to write to */
37#define BYTES_AVAIL_TO_WRITE(r, w, z) \
38 ((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w))
39
40
41/*
42 *
43 * hv_get_ringbuffer_availbytes()
44 *
45 * Get number of bytes available to read and to write to
46 * for the specified ring buffer
47 */
48static inline void
49hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
50 u32 *read, u32 *write)
51{
52 u32 read_loc, write_loc;
53
54 smp_read_barrier_depends();
55
56 /* Capture the read/write indices before they changed */
57 read_loc = rbi->ring_buffer->read_index;
58 write_loc = rbi->ring_buffer->write_index;
59
60 *write = BYTES_AVAIL_TO_WRITE(read_loc, write_loc, rbi->ring_datasize);
61 *read = rbi->ring_datasize - *write;
62}
63
64/* 33/*
65 * hv_get_next_write_location() 34 * hv_get_next_write_location()
66 * 35 *
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index ce43642ef03e..f85ce70d9677 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -47,7 +47,7 @@ struct ad7314_data {
47 u16 rx ____cacheline_aligned; 47 u16 rx ____cacheline_aligned;
48}; 48};
49 49
50static int ad7314_spi_read(struct ad7314_data *chip, s16 *data) 50static int ad7314_spi_read(struct ad7314_data *chip)
51{ 51{
52 int ret; 52 int ret;
53 53
@@ -57,9 +57,7 @@ static int ad7314_spi_read(struct ad7314_data *chip, s16 *data)
57 return ret; 57 return ret;
58 } 58 }
59 59
60 *data = be16_to_cpu(chip->rx); 60 return be16_to_cpu(chip->rx);
61
62 return ret;
63} 61}
64 62
65static ssize_t ad7314_show_temperature(struct device *dev, 63static ssize_t ad7314_show_temperature(struct device *dev,
@@ -70,12 +68,12 @@ static ssize_t ad7314_show_temperature(struct device *dev,
70 s16 data; 68 s16 data;
71 int ret; 69 int ret;
72 70
73 ret = ad7314_spi_read(chip, &data); 71 ret = ad7314_spi_read(chip);
74 if (ret < 0) 72 if (ret < 0)
75 return ret; 73 return ret;
76 switch (spi_get_device_id(chip->spi_dev)->driver_data) { 74 switch (spi_get_device_id(chip->spi_dev)->driver_data) {
77 case ad7314: 75 case ad7314:
78 data = (data & AD7314_TEMP_MASK) >> AD7314_TEMP_OFFSET; 76 data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_OFFSET;
79 data = (data << 6) >> 6; 77 data = (data << 6) >> 6;
80 78
81 return sprintf(buf, "%d\n", 250 * data); 79 return sprintf(buf, "%d\n", 250 * data);
@@ -86,7 +84,7 @@ static ssize_t ad7314_show_temperature(struct device *dev,
86 * with a sign bit - which is a 14 bit 2's complement 84 * with a sign bit - which is a 14 bit 2's complement
87 * register. 1lsb - 31.25 milli degrees centigrade 85 * register. 1lsb - 31.25 milli degrees centigrade
88 */ 86 */
89 data &= ADT7301_TEMP_MASK; 87 data = ret & ADT7301_TEMP_MASK;
90 data = (data << 2) >> 2; 88 data = (data << 2) >> 2;
91 89
92 return sprintf(buf, "%d\n", 90 return sprintf(buf, "%d\n",
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 0d3141fbbc20..b9d512331ed4 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -52,7 +52,7 @@ module_param_named(tjmax, force_tjmax, int, 0444);
52MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); 52MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
53 53
54#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ 54#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
55#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */ 55#define NUM_REAL_CORES 32 /* Number of Real cores per cpu */
56#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ 56#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
57#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ 57#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
58#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) 58#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
@@ -709,6 +709,10 @@ static void __cpuinit put_core_offline(unsigned int cpu)
709 709
710 indx = TO_ATTR_NO(cpu); 710 indx = TO_ATTR_NO(cpu);
711 711
712 /* The core id is too big, just return */
713 if (indx > MAX_CORE_DATA - 1)
714 return;
715
712 if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu) 716 if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
713 coretemp_remove_core(pdata, &pdev->dev, indx); 717 coretemp_remove_core(pdata, &pdev->dev, indx);
714 718
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index 37a8fc92b44a..e8e18cab1fb8 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -128,17 +128,20 @@ static bool __devinit fam15h_power_is_internal_node0(struct pci_dev *f4)
128 * counter saturations resulting in bogus power readings. 128 * counter saturations resulting in bogus power readings.
129 * We correct this value ourselves to cope with older BIOSes. 129 * We correct this value ourselves to cope with older BIOSes.
130 */ 130 */
131static DEFINE_PCI_DEVICE_TABLE(affected_device) = {
132 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
133 { 0 }
134};
135
131static void __devinit tweak_runavg_range(struct pci_dev *pdev) 136static void __devinit tweak_runavg_range(struct pci_dev *pdev)
132{ 137{
133 u32 val; 138 u32 val;
134 const struct pci_device_id affected_device = {
135 PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) };
136 139
137 /* 140 /*
138 * let this quirk apply only to the current version of the 141 * let this quirk apply only to the current version of the
139 * northbridge, since future versions may change the behavior 142 * northbridge, since future versions may change the behavior
140 */ 143 */
141 if (!pci_match_id(&affected_device, pdev)) 144 if (!pci_match_id(affected_device, pdev))
142 return; 145 return;
143 146
144 pci_bus_read_config_dword(pdev->bus, 147 pci_bus_read_config_dword(pdev->bus,
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index f086131cb1c7..c811289b61e2 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -324,7 +324,7 @@ static s32 pch_i2c_wait_for_xfer_complete(struct i2c_algo_pch_data *adap)
324{ 324{
325 long ret; 325 long ret;
326 ret = wait_event_timeout(pch_event, 326 ret = wait_event_timeout(pch_event,
327 (adap->pch_event_flag != 0), msecs_to_jiffies(50)); 327 (adap->pch_event_flag != 0), msecs_to_jiffies(1000));
328 328
329 if (ret == 0) { 329 if (ret == 0) {
330 pch_err(adap, "timeout: %x\n", adap->pch_event_flag); 330 pch_err(adap, "timeout: %x\n", adap->pch_event_flag);
@@ -1063,6 +1063,6 @@ module_exit(pch_pci_exit);
1063 1063
1064MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semico ML7213/ML7223/ML7831 IOH I2C"); 1064MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semico ML7213/ML7223/ML7831 IOH I2C");
1065MODULE_LICENSE("GPL"); 1065MODULE_LICENSE("GPL");
1066MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.lapis-semi.com>"); 1066MODULE_AUTHOR("Tomoya MORINAGA. <tomoya.rohm@gmail.com>");
1067module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR)); 1067module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR));
1068module_param(pch_clk, int, (S_IRUSR | S_IWUSR)); 1068module_param(pch_clk, int, (S_IRUSR | S_IWUSR));
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 3d471d56bf15..76b8af44f634 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -227,6 +227,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
227 return -EINVAL; 227 return -EINVAL;
228 228
229 init_completion(&i2c->cmd_complete); 229 init_completion(&i2c->cmd_complete);
230 i2c->cmd_err = 0;
230 231
231 flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0; 232 flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0;
232 233
@@ -252,6 +253,9 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
252 253
253 if (i2c->cmd_err == -ENXIO) 254 if (i2c->cmd_err == -ENXIO)
254 mxs_i2c_reset(i2c); 255 mxs_i2c_reset(i2c);
256 else
257 writel(MXS_I2C_QUEUECTRL_QUEUE_RUN,
258 i2c->regs + MXS_I2C_QUEUECTRL_CLR);
255 259
256 dev_dbg(i2c->dev, "Done with err=%d\n", i2c->cmd_err); 260 dev_dbg(i2c->dev, "Done with err=%d\n", i2c->cmd_err);
257 261
@@ -299,8 +303,6 @@ static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id)
299 MXS_I2C_CTRL1_SLAVE_STOP_IRQ | MXS_I2C_CTRL1_SLAVE_IRQ)) 303 MXS_I2C_CTRL1_SLAVE_STOP_IRQ | MXS_I2C_CTRL1_SLAVE_IRQ))
300 /* MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ is only for slaves */ 304 /* MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ is only for slaves */
301 i2c->cmd_err = -EIO; 305 i2c->cmd_err = -EIO;
302 else
303 i2c->cmd_err = 0;
304 306
305 is_last_cmd = (readl(i2c->regs + MXS_I2C_QUEUESTAT) & 307 is_last_cmd = (readl(i2c->regs + MXS_I2C_QUEUESTAT) &
306 MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK) == 0; 308 MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK) == 0;
@@ -384,8 +386,6 @@ static int __devexit mxs_i2c_remove(struct platform_device *pdev)
384 if (ret) 386 if (ret)
385 return -EBUSY; 387 return -EBUSY;
386 388
387 writel(MXS_I2C_QUEUECTRL_QUEUE_RUN,
388 i2c->regs + MXS_I2C_QUEUECTRL_CLR);
389 writel(MXS_I2C_CTRL0_SFTRST, i2c->regs + MXS_I2C_CTRL0_SET); 389 writel(MXS_I2C_CTRL0_SFTRST, i2c->regs + MXS_I2C_CTRL0_SET);
390 390
391 platform_set_drvdata(pdev, NULL); 391 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 04be9f82e14b..eb8ad538c79f 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -546,8 +546,7 @@ static int i2c_pnx_controller_suspend(struct platform_device *pdev,
546{ 546{
547 struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev); 547 struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
548 548
549 /* FIXME: shouldn't this be clk_disable? */ 549 clk_disable(alg_data->clk);
550 clk_enable(alg_data->clk);
551 550
552 return 0; 551 return 0;
553} 552}
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index e978635e60f0..55e5ea62ccee 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -516,6 +516,14 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
516 if (likely(i2c_dev->msg_err == I2C_ERR_NONE)) 516 if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
517 return 0; 517 return 0;
518 518
519 /*
520 * NACK interrupt is generated before the I2C controller generates the
521 * STOP condition on the bus. So wait for 2 clock periods before resetting
522 * the controller so that STOP condition has been delivered properly.
523 */
524 if (i2c_dev->msg_err == I2C_ERR_NO_ACK)
525 udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
526
519 tegra_i2c_init(i2c_dev); 527 tegra_i2c_init(i2c_dev);
520 if (i2c_dev->msg_err == I2C_ERR_NO_ACK) { 528 if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
521 if (msg->flags & I2C_M_IGNORE_NAK) 529 if (msg->flags & I2C_M_IGNORE_NAK)
diff --git a/drivers/ieee802154/Kconfig b/drivers/ieee802154/Kconfig
index 9b9f43aa2f85..15c064073701 100644
--- a/drivers/ieee802154/Kconfig
+++ b/drivers/ieee802154/Kconfig
@@ -19,4 +19,12 @@ config IEEE802154_FAKEHARD
19 19
20 This driver can also be built as a module. To do so say M here. 20 This driver can also be built as a module. To do so say M here.
21 The module will be called 'fakehard'. 21 The module will be called 'fakehard'.
22config IEEE802154_FAKELB
23 depends on IEEE802154_DRIVERS && MAC802154
24 tristate "IEEE 802.15.4 loopback driver"
25 ---help---
26 Say Y here to enable the fake driver that can emulate a net
27 of several interconnected radio devices.
22 28
29 This driver can also be built as a module. To do so say M here.
30 The module will be called 'fakelb'.
diff --git a/drivers/ieee802154/Makefile b/drivers/ieee802154/Makefile
index 800a3894af0d..ea784ea6f0f8 100644
--- a/drivers/ieee802154/Makefile
+++ b/drivers/ieee802154/Makefile
@@ -1 +1,2 @@
1obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o 1obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
2obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
diff --git a/drivers/ieee802154/fakelb.c b/drivers/ieee802154/fakelb.c
new file mode 100644
index 000000000000..e7456fcd0913
--- /dev/null
+++ b/drivers/ieee802154/fakelb.c
@@ -0,0 +1,294 @@
1/*
2 * Loopback IEEE 802.15.4 interface
3 *
4 * Copyright 2007-2012 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by:
20 * Sergey Lapin <slapin@ossfans.org>
21 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
22 * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
23 */
24
25#include <linux/module.h>
26#include <linux/timer.h>
27#include <linux/platform_device.h>
28#include <linux/netdevice.h>
29#include <linux/spinlock.h>
30#include <net/mac802154.h>
31#include <net/wpan-phy.h>
32
33static int numlbs = 1;
34
35struct fakelb_dev_priv {
36 struct ieee802154_dev *dev;
37
38 struct list_head list;
39 struct fakelb_priv *fake;
40
41 spinlock_t lock;
42 bool working;
43};
44
45struct fakelb_priv {
46 struct list_head list;
47 rwlock_t lock;
48};
49
50static int
51fakelb_hw_ed(struct ieee802154_dev *dev, u8 *level)
52{
53 might_sleep();
54 BUG_ON(!level);
55 *level = 0xbe;
56
57 return 0;
58}
59
60static int
61fakelb_hw_channel(struct ieee802154_dev *dev, int page, int channel)
62{
63 pr_debug("set channel to %d\n", channel);
64
65 might_sleep();
66 dev->phy->current_page = page;
67 dev->phy->current_channel = channel;
68
69 return 0;
70}
71
72static void
73fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb)
74{
75 struct sk_buff *newskb;
76
77 spin_lock(&priv->lock);
78 if (priv->working) {
79 newskb = pskb_copy(skb, GFP_ATOMIC);
80 ieee802154_rx_irqsafe(priv->dev, newskb, 0xcc);
81 }
82 spin_unlock(&priv->lock);
83}
84
85static int
86fakelb_hw_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
87{
88 struct fakelb_dev_priv *priv = dev->priv;
89 struct fakelb_priv *fake = priv->fake;
90
91 might_sleep();
92
93 read_lock_bh(&fake->lock);
94 if (priv->list.next == priv->list.prev) {
95 /* we are the only one device */
96 fakelb_hw_deliver(priv, skb);
97 } else {
98 struct fakelb_dev_priv *dp;
99 list_for_each_entry(dp, &priv->fake->list, list) {
100 if (dp != priv &&
101 (dp->dev->phy->current_channel ==
102 priv->dev->phy->current_channel))
103 fakelb_hw_deliver(dp, skb);
104 }
105 }
106 read_unlock_bh(&fake->lock);
107
108 return 0;
109}
110
111static int
112fakelb_hw_start(struct ieee802154_dev *dev) {
113 struct fakelb_dev_priv *priv = dev->priv;
114 int ret = 0;
115
116 spin_lock(&priv->lock);
117 if (priv->working)
118 ret = -EBUSY;
119 else
120 priv->working = 1;
121 spin_unlock(&priv->lock);
122
123 return ret;
124}
125
126static void
127fakelb_hw_stop(struct ieee802154_dev *dev) {
128 struct fakelb_dev_priv *priv = dev->priv;
129
130 spin_lock(&priv->lock);
131 priv->working = 0;
132 spin_unlock(&priv->lock);
133}
134
135static struct ieee802154_ops fakelb_ops = {
136 .owner = THIS_MODULE,
137 .xmit = fakelb_hw_xmit,
138 .ed = fakelb_hw_ed,
139 .set_channel = fakelb_hw_channel,
140 .start = fakelb_hw_start,
141 .stop = fakelb_hw_stop,
142};
143
144/* Number of dummy devices to be set up by this module. */
145module_param(numlbs, int, 0);
146MODULE_PARM_DESC(numlbs, " number of pseudo devices");
147
148static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake)
149{
150 struct fakelb_dev_priv *priv;
151 int err;
152 struct ieee802154_dev *ieee;
153
154 ieee = ieee802154_alloc_device(sizeof(*priv), &fakelb_ops);
155 if (!ieee)
156 return -ENOMEM;
157
158 priv = ieee->priv;
159 priv->dev = ieee;
160
161 /* 868 MHz BPSK 802.15.4-2003 */
162 ieee->phy->channels_supported[0] |= 1;
163 /* 915 MHz BPSK 802.15.4-2003 */
164 ieee->phy->channels_supported[0] |= 0x7fe;
165 /* 2.4 GHz O-QPSK 802.15.4-2003 */
166 ieee->phy->channels_supported[0] |= 0x7FFF800;
167 /* 868 MHz ASK 802.15.4-2006 */
168 ieee->phy->channels_supported[1] |= 1;
169 /* 915 MHz ASK 802.15.4-2006 */
170 ieee->phy->channels_supported[1] |= 0x7fe;
171 /* 868 MHz O-QPSK 802.15.4-2006 */
172 ieee->phy->channels_supported[2] |= 1;
173 /* 915 MHz O-QPSK 802.15.4-2006 */
174 ieee->phy->channels_supported[2] |= 0x7fe;
175 /* 2.4 GHz CSS 802.15.4a-2007 */
176 ieee->phy->channels_supported[3] |= 0x3fff;
177 /* UWB Sub-gigahertz 802.15.4a-2007 */
178 ieee->phy->channels_supported[4] |= 1;
179 /* UWB Low band 802.15.4a-2007 */
180 ieee->phy->channels_supported[4] |= 0x1e;
181 /* UWB High band 802.15.4a-2007 */
182 ieee->phy->channels_supported[4] |= 0xffe0;
183 /* 750 MHz O-QPSK 802.15.4c-2009 */
184 ieee->phy->channels_supported[5] |= 0xf;
185 /* 750 MHz MPSK 802.15.4c-2009 */
186 ieee->phy->channels_supported[5] |= 0xf0;
187 /* 950 MHz BPSK 802.15.4d-2009 */
188 ieee->phy->channels_supported[6] |= 0x3ff;
189 /* 950 MHz GFSK 802.15.4d-2009 */
190 ieee->phy->channels_supported[6] |= 0x3ffc00;
191
192 INIT_LIST_HEAD(&priv->list);
193 priv->fake = fake;
194
195 spin_lock_init(&priv->lock);
196
197 ieee->parent = dev;
198
199 err = ieee802154_register_device(ieee);
200 if (err)
201 goto err_reg;
202
203 write_lock_bh(&fake->lock);
204 list_add_tail(&priv->list, &fake->list);
205 write_unlock_bh(&fake->lock);
206
207 return 0;
208
209err_reg:
210 ieee802154_free_device(priv->dev);
211 return err;
212}
213
214static void fakelb_del(struct fakelb_dev_priv *priv)
215{
216 write_lock_bh(&priv->fake->lock);
217 list_del(&priv->list);
218 write_unlock_bh(&priv->fake->lock);
219
220 ieee802154_unregister_device(priv->dev);
221 ieee802154_free_device(priv->dev);
222}
223
224static int __devinit fakelb_probe(struct platform_device *pdev)
225{
226 struct fakelb_priv *priv;
227 struct fakelb_dev_priv *dp;
228 int err = -ENOMEM;
229 int i;
230
231 priv = kzalloc(sizeof(struct fakelb_priv), GFP_KERNEL);
232 if (!priv)
233 goto err_alloc;
234
235 INIT_LIST_HEAD(&priv->list);
236 rwlock_init(&priv->lock);
237
238 for (i = 0; i < numlbs; i++) {
239 err = fakelb_add_one(&pdev->dev, priv);
240 if (err < 0)
241 goto err_slave;
242 }
243
244 platform_set_drvdata(pdev, priv);
245 dev_info(&pdev->dev, "added ieee802154 hardware\n");
246 return 0;
247
248err_slave:
249 list_for_each_entry(dp, &priv->list, list)
250 fakelb_del(dp);
251 kfree(priv);
252err_alloc:
253 return err;
254}
255
256static int __devexit fakelb_remove(struct platform_device *pdev)
257{
258 struct fakelb_priv *priv = platform_get_drvdata(pdev);
259 struct fakelb_dev_priv *dp, *temp;
260
261 list_for_each_entry_safe(dp, temp, &priv->list, list)
262 fakelb_del(dp);
263 kfree(priv);
264
265 return 0;
266}
267
268static struct platform_device *ieee802154fake_dev;
269
270static struct platform_driver ieee802154fake_driver = {
271 .probe = fakelb_probe,
272 .remove = __devexit_p(fakelb_remove),
273 .driver = {
274 .name = "ieee802154fakelb",
275 .owner = THIS_MODULE,
276 },
277};
278
279static __init int fakelb_init_module(void)
280{
281 ieee802154fake_dev = platform_device_register_simple(
282 "ieee802154fakelb", -1, NULL, 0);
283 return platform_driver_register(&ieee802154fake_driver);
284}
285
286static __exit void fake_remove_module(void)
287{
288 platform_driver_unregister(&ieee802154fake_driver);
289 platform_device_unregister(ieee802154fake_dev);
290}
291
292module_init(fakelb_init_module);
293module_exit(fake_remove_module);
294MODULE_LICENSE("GPL");
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index e3e470fecaa9..59fbd704a1ec 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -42,6 +42,7 @@
42#include <linux/inetdevice.h> 42#include <linux/inetdevice.h>
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <linux/module.h> 44#include <linux/module.h>
45#include <net/route.h>
45 46
46#include <net/tcp.h> 47#include <net/tcp.h>
47#include <net/ipv6.h> 48#include <net/ipv6.h>
@@ -1826,7 +1827,10 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
1826 route->path_rec->reversible = 1; 1827 route->path_rec->reversible = 1;
1827 route->path_rec->pkey = cpu_to_be16(0xffff); 1828 route->path_rec->pkey = cpu_to_be16(0xffff);
1828 route->path_rec->mtu_selector = IB_SA_EQ; 1829 route->path_rec->mtu_selector = IB_SA_EQ;
1829 route->path_rec->sl = id_priv->tos >> 5; 1830 route->path_rec->sl = netdev_get_prio_tc_map(
1831 ndev->priv_flags & IFF_802_1Q_VLAN ?
1832 vlan_dev_real_dev(ndev) : ndev,
1833 rt_tos2priority(id_priv->tos));
1830 1834
1831 route->path_rec->mtu = iboe_get_mtu(ndev->mtu); 1835 route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
1832 route->path_rec->rate_selector = IB_SA_EQ; 1836 route->path_rec->rate_selector = IB_SA_EQ;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 426bb7617ec6..b0d0bc8a6fb6 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1854,6 +1854,8 @@ static bool generate_unmatched_resp(struct ib_mad_private *recv,
1854 response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; 1854 response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
1855 response->mad.mad.mad_hdr.status = 1855 response->mad.mad.mad_hdr.status =
1856 cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); 1856 cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
1857 if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
1858 response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION;
1857 1859
1858 return true; 1860 return true;
1859 } else { 1861 } else {
@@ -1869,6 +1871,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1869 struct ib_mad_list_head *mad_list; 1871 struct ib_mad_list_head *mad_list;
1870 struct ib_mad_agent_private *mad_agent; 1872 struct ib_mad_agent_private *mad_agent;
1871 int port_num; 1873 int port_num;
1874 int ret = IB_MAD_RESULT_SUCCESS;
1872 1875
1873 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; 1876 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1874 qp_info = mad_list->mad_queue->qp_info; 1877 qp_info = mad_list->mad_queue->qp_info;
@@ -1952,8 +1955,6 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1952local: 1955local:
1953 /* Give driver "right of first refusal" on incoming MAD */ 1956 /* Give driver "right of first refusal" on incoming MAD */
1954 if (port_priv->device->process_mad) { 1957 if (port_priv->device->process_mad) {
1955 int ret;
1956
1957 ret = port_priv->device->process_mad(port_priv->device, 0, 1958 ret = port_priv->device->process_mad(port_priv->device, 0,
1958 port_priv->port_num, 1959 port_priv->port_num,
1959 wc, &recv->grh, 1960 wc, &recv->grh,
@@ -1981,7 +1982,8 @@ local:
1981 * or via recv_handler in ib_mad_complete_recv() 1982 * or via recv_handler in ib_mad_complete_recv()
1982 */ 1983 */
1983 recv = NULL; 1984 recv = NULL;
1984 } else if (generate_unmatched_resp(recv, response)) { 1985 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
1986 generate_unmatched_resp(recv, response)) {
1985 agent_send_response(&response->mad.mad, &recv->grh, wc, 1987 agent_send_response(&response->mad.mad, &recv->grh, wc,
1986 port_priv->device, port_num, qp_info->qp->qp_num); 1988 port_priv->device, port_num, qp_info->qp->qp_num);
1987 } 1989 }
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 396e29370304..e497dfbee435 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -125,7 +125,8 @@ int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
125 unsigned char *prev_tail; 125 unsigned char *prev_tail;
126 126
127 prev_tail = skb_tail_pointer(skb); 127 prev_tail = skb_tail_pointer(skb);
128 NLA_PUT(skb, type, len, data); 128 if (nla_put(skb, type, len, data))
129 goto nla_put_failure;
129 nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail; 130 nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail;
130 return 0; 131 return 0;
131 132
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 5861cdb22b7c..8002ae642cfe 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -66,12 +66,6 @@ static ctl_table ucma_ctl_table[] = {
66 { } 66 { }
67}; 67};
68 68
69static struct ctl_path ucma_ctl_path[] = {
70 { .procname = "net" },
71 { .procname = "rdma_ucm" },
72 { }
73};
74
75struct ucma_file { 69struct ucma_file {
76 struct mutex mut; 70 struct mutex mut;
77 struct file *filp; 71 struct file *filp;
@@ -1392,7 +1386,7 @@ static int __init ucma_init(void)
1392 goto err1; 1386 goto err1;
1393 } 1387 }
1394 1388
1395 ucma_ctl_table_hdr = register_sysctl_paths(ucma_ctl_path, ucma_ctl_table); 1389 ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
1396 if (!ucma_ctl_table_hdr) { 1390 if (!ucma_ctl_table_hdr) {
1397 printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n"); 1391 printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
1398 ret = -ENOMEM; 1392 ret = -ENOMEM;
@@ -1408,7 +1402,7 @@ err1:
1408 1402
1409static void __exit ucma_cleanup(void) 1403static void __exit ucma_cleanup(void)
1410{ 1404{
1411 unregister_sysctl_table(ucma_ctl_table_hdr); 1405 unregister_net_sysctl_table(ucma_ctl_table_hdr);
1412 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); 1406 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1413 misc_deregister(&ucma_misc); 1407 misc_deregister(&ucma_misc);
1414 idr_destroy(&ctx_idr); 1408 idr_destroy(&ctx_idr);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 669673e81439..b948b6dd5d55 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -247,7 +247,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port,
247 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, 247 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port,
248 NULL, NULL, in_mad, out_mad); 248 NULL, NULL, in_mad, out_mad);
249 if (err) 249 if (err)
250 return err; 250 goto out;
251 251
252 /* Checking LinkSpeedActive for FDR-10 */ 252 /* Checking LinkSpeedActive for FDR-10 */
253 if (out_mad->data[15] & 0x1) 253 if (out_mad->data[15] & 0x1)
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 8081a0a5d602..a4b14a41cbf4 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -274,7 +274,8 @@ static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
274 static unsigned char param = 0xc8; 274 static unsigned char param = 0xc8;
275 struct synaptics_data *priv = psmouse->private; 275 struct synaptics_data *priv = psmouse->private;
276 276
277 if (!SYN_CAP_ADV_GESTURE(priv->ext_cap_0c)) 277 if (!(SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
278 SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)))
278 return 0; 279 return 0;
279 280
280 if (psmouse_sliced_command(psmouse, SYN_QUE_MODEL)) 281 if (psmouse_sliced_command(psmouse, SYN_QUE_MODEL))
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index b902794bbf07..38c4bd87b2c9 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -336,11 +336,6 @@ static inline void
336capincci_alloc_minor(struct capidev *cdev, struct capincci *np) { } 336capincci_alloc_minor(struct capidev *cdev, struct capincci *np) { }
337static inline void capincci_free_minor(struct capincci *np) { } 337static inline void capincci_free_minor(struct capincci *np) { }
338 338
339static inline unsigned int capincci_minor_opencount(struct capincci *np)
340{
341 return 0;
342}
343
344#endif /* !CONFIG_ISDN_CAPI_MIDDLEWARE */ 339#endif /* !CONFIG_ISDN_CAPI_MIDDLEWARE */
345 340
346static struct capincci *capincci_alloc(struct capidev *cdev, u32 ncci) 341static struct capincci *capincci_alloc(struct capidev *cdev, u32 ncci)
@@ -372,6 +367,7 @@ static void capincci_free(struct capidev *cdev, u32 ncci)
372 } 367 }
373} 368}
374 369
370#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
375static struct capincci *capincci_find(struct capidev *cdev, u32 ncci) 371static struct capincci *capincci_find(struct capidev *cdev, u32 ncci)
376{ 372{
377 struct capincci *np; 373 struct capincci *np;
@@ -382,7 +378,6 @@ static struct capincci *capincci_find(struct capidev *cdev, u32 ncci)
382 return NULL; 378 return NULL;
383} 379}
384 380
385#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
386/* -------- handle data queue --------------------------------------- */ 381/* -------- handle data queue --------------------------------------- */
387 382
388static struct sk_buff * 383static struct sk_buff *
@@ -578,8 +573,8 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
578 struct tty_struct *tty; 573 struct tty_struct *tty;
579 struct capiminor *mp; 574 struct capiminor *mp;
580 u16 datahandle; 575 u16 datahandle;
581#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
582 struct capincci *np; 576 struct capincci *np;
577#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
583 578
584 mutex_lock(&cdev->lock); 579 mutex_lock(&cdev->lock);
585 580
@@ -597,6 +592,12 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
597 goto unlock_out; 592 goto unlock_out;
598 } 593 }
599 594
595#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE
596 skb_queue_tail(&cdev->recvqueue, skb);
597 wake_up_interruptible(&cdev->recvwait);
598
599#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */
600
600 np = capincci_find(cdev, CAPIMSG_CONTROL(skb->data)); 601 np = capincci_find(cdev, CAPIMSG_CONTROL(skb->data));
601 if (!np) { 602 if (!np) {
602 printk(KERN_ERR "BUG: capi_signal: ncci not found\n"); 603 printk(KERN_ERR "BUG: capi_signal: ncci not found\n");
@@ -605,12 +606,6 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
605 goto unlock_out; 606 goto unlock_out;
606 } 607 }
607 608
608#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE
609 skb_queue_tail(&cdev->recvqueue, skb);
610 wake_up_interruptible(&cdev->recvwait);
611
612#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */
613
614 mp = np->minorp; 609 mp = np->minorp;
615 if (!mp) { 610 if (!mp) {
616 skb_queue_tail(&cdev->recvqueue, skb); 611 skb_queue_tail(&cdev->recvqueue, skb);
@@ -786,7 +781,6 @@ register_out:
786 return retval; 781 return retval;
787 782
788 case CAPI_GET_VERSION: 783 case CAPI_GET_VERSION:
789 {
790 if (copy_from_user(&data.contr, argp, 784 if (copy_from_user(&data.contr, argp,
791 sizeof(data.contr))) 785 sizeof(data.contr)))
792 return -EFAULT; 786 return -EFAULT;
@@ -796,11 +790,9 @@ register_out:
796 if (copy_to_user(argp, &data.version, 790 if (copy_to_user(argp, &data.version,
797 sizeof(data.version))) 791 sizeof(data.version)))
798 return -EFAULT; 792 return -EFAULT;
799 } 793 return 0;
800 return 0;
801 794
802 case CAPI_GET_SERIAL: 795 case CAPI_GET_SERIAL:
803 {
804 if (copy_from_user(&data.contr, argp, 796 if (copy_from_user(&data.contr, argp,
805 sizeof(data.contr))) 797 sizeof(data.contr)))
806 return -EFAULT; 798 return -EFAULT;
@@ -810,10 +802,9 @@ register_out:
810 if (copy_to_user(argp, data.serial, 802 if (copy_to_user(argp, data.serial,
811 sizeof(data.serial))) 803 sizeof(data.serial)))
812 return -EFAULT; 804 return -EFAULT;
813 } 805 return 0;
814 return 0; 806
815 case CAPI_GET_PROFILE: 807 case CAPI_GET_PROFILE:
816 {
817 if (copy_from_user(&data.contr, argp, 808 if (copy_from_user(&data.contr, argp,
818 sizeof(data.contr))) 809 sizeof(data.contr)))
819 return -EFAULT; 810 return -EFAULT;
@@ -837,11 +828,9 @@ register_out:
837 } 828 }
838 if (retval) 829 if (retval)
839 return -EFAULT; 830 return -EFAULT;
840 } 831 return 0;
841 return 0;
842 832
843 case CAPI_GET_MANUFACTURER: 833 case CAPI_GET_MANUFACTURER:
844 {
845 if (copy_from_user(&data.contr, argp, 834 if (copy_from_user(&data.contr, argp,
846 sizeof(data.contr))) 835 sizeof(data.contr)))
847 return -EFAULT; 836 return -EFAULT;
@@ -853,8 +842,8 @@ register_out:
853 sizeof(data.manufacturer))) 842 sizeof(data.manufacturer)))
854 return -EFAULT; 843 return -EFAULT;
855 844
856 } 845 return 0;
857 return 0; 846
858 case CAPI_GET_ERRCODE: 847 case CAPI_GET_ERRCODE:
859 data.errcode = cdev->errcode; 848 data.errcode = cdev->errcode;
860 cdev->errcode = CAPI_NOERROR; 849 cdev->errcode = CAPI_NOERROR;
@@ -870,8 +859,7 @@ register_out:
870 return 0; 859 return 0;
871 return -ENXIO; 860 return -ENXIO;
872 861
873 case CAPI_MANUFACTURER_CMD: 862 case CAPI_MANUFACTURER_CMD: {
874 {
875 struct capi_manufacturer_cmd mcmd; 863 struct capi_manufacturer_cmd mcmd;
876 if (!capable(CAP_SYS_ADMIN)) 864 if (!capable(CAP_SYS_ADMIN))
877 return -EPERM; 865 return -EPERM;
@@ -879,8 +867,6 @@ register_out:
879 return -EFAULT; 867 return -EFAULT;
880 return capi20_manufacturer(mcmd.cmd, mcmd.data); 868 return capi20_manufacturer(mcmd.cmd, mcmd.data);
881 } 869 }
882 return 0;
883
884 case CAPI_SET_FLAGS: 870 case CAPI_SET_FLAGS:
885 case CAPI_CLR_FLAGS: { 871 case CAPI_CLR_FLAGS: {
886 unsigned userflags; 872 unsigned userflags;
@@ -902,6 +888,11 @@ register_out:
902 return -EFAULT; 888 return -EFAULT;
903 return 0; 889 return 0;
904 890
891#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE
892 case CAPI_NCCI_OPENCOUNT:
893 return 0;
894
895#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */
905 case CAPI_NCCI_OPENCOUNT: { 896 case CAPI_NCCI_OPENCOUNT: {
906 struct capincci *nccip; 897 struct capincci *nccip;
907 unsigned ncci; 898 unsigned ncci;
@@ -918,7 +909,6 @@ register_out:
918 return count; 909 return count;
919 } 910 }
920 911
921#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
922 case CAPI_NCCI_GETUNIT: { 912 case CAPI_NCCI_GETUNIT: {
923 struct capincci *nccip; 913 struct capincci *nccip;
924 struct capiminor *mp; 914 struct capiminor *mp;
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 6f5016b479f8..832bc807ed20 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -1593,7 +1593,7 @@ static int capidrv_command(isdn_ctrl *c, capidrv_contr *card)
1593 return capidrv_ioctl(c, card); 1593 return capidrv_ioctl(c, card);
1594 1594
1595 switch (c->command) { 1595 switch (c->command) {
1596 case ISDN_CMD_DIAL:{ 1596 case ISDN_CMD_DIAL: {
1597 u8 calling[ISDN_MSNLEN + 3]; 1597 u8 calling[ISDN_MSNLEN + 3];
1598 u8 called[ISDN_MSNLEN + 2]; 1598 u8 called[ISDN_MSNLEN + 2];
1599 1599
@@ -2072,7 +2072,8 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp)
2072 card->interface.writebuf_skb = if_sendbuf; 2072 card->interface.writebuf_skb = if_sendbuf;
2073 card->interface.writecmd = NULL; 2073 card->interface.writecmd = NULL;
2074 card->interface.readstat = if_readstat; 2074 card->interface.readstat = if_readstat;
2075 card->interface.features = ISDN_FEATURE_L2_HDLC | 2075 card->interface.features =
2076 ISDN_FEATURE_L2_HDLC |
2076 ISDN_FEATURE_L2_TRANS | 2077 ISDN_FEATURE_L2_TRANS |
2077 ISDN_FEATURE_L3_TRANS | 2078 ISDN_FEATURE_L3_TRANS |
2078 ISDN_FEATURE_P_UNKNOWN | 2079 ISDN_FEATURE_P_UNKNOWN |
@@ -2080,7 +2081,8 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp)
2080 ISDN_FEATURE_L2_X75UI | 2081 ISDN_FEATURE_L2_X75UI |
2081 ISDN_FEATURE_L2_X75BUI; 2082 ISDN_FEATURE_L2_X75BUI;
2082 if (profp->support1 & (1 << 2)) 2083 if (profp->support1 & (1 << 2))
2083 card->interface.features |= ISDN_FEATURE_L2_V11096 | 2084 card->interface.features |=
2085 ISDN_FEATURE_L2_V11096 |
2084 ISDN_FEATURE_L2_V11019 | 2086 ISDN_FEATURE_L2_V11019 |
2085 ISDN_FEATURE_L2_V11038; 2087 ISDN_FEATURE_L2_V11038;
2086 if (profp->support1 & (1 << 8)) 2088 if (profp->support1 & (1 << 8))
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index afa080258bfa..3b9278b333ba 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -410,10 +410,10 @@ static void check_pending(struct bas_cardstate *ucs)
410 if (!(ucs->basstate & BS_RESETTING)) 410 if (!(ucs->basstate & BS_RESETTING))
411 ucs->pending = 0; 411 ucs->pending = 0;
412 break; 412 break;
413 /* 413 /*
414 * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately 414 * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately
415 * and should never end up here 415 * and should never end up here
416 */ 416 */
417 default: 417 default:
418 dev_warn(&ucs->interface->dev, 418 dev_warn(&ucs->interface->dev,
419 "unknown pending request 0x%02x cleared\n", 419 "unknown pending request 0x%02x cleared\n",
@@ -877,8 +877,7 @@ static void read_iso_callback(struct urb *urb)
877 for (i = 0; i < BAS_NUMFRAMES; i++) { 877 for (i = 0; i < BAS_NUMFRAMES; i++) {
878 ubc->isoinlost += urb->iso_frame_desc[i].actual_length; 878 ubc->isoinlost += urb->iso_frame_desc[i].actual_length;
879 if (unlikely(urb->iso_frame_desc[i].status != 0 && 879 if (unlikely(urb->iso_frame_desc[i].status != 0 &&
880 urb->iso_frame_desc[i].status != 880 urb->iso_frame_desc[i].status != -EINPROGRESS))
881 -EINPROGRESS))
882 ubc->loststatus = urb->iso_frame_desc[i].status; 881 ubc->loststatus = urb->iso_frame_desc[i].status;
883 urb->iso_frame_desc[i].status = 0; 882 urb->iso_frame_desc[i].status = 0;
884 urb->iso_frame_desc[i].actual_length = 0; 883 urb->iso_frame_desc[i].actual_length = 0;
@@ -2078,16 +2077,14 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
2078/* Free hardware dependent part of the B channel structure 2077/* Free hardware dependent part of the B channel structure
2079 * parameter: 2078 * parameter:
2080 * bcs B channel structure 2079 * bcs B channel structure
2081 * return value:
2082 * !=0 on success
2083 */ 2080 */
2084static int gigaset_freebcshw(struct bc_state *bcs) 2081static void gigaset_freebcshw(struct bc_state *bcs)
2085{ 2082{
2086 struct bas_bc_state *ubc = bcs->hw.bas; 2083 struct bas_bc_state *ubc = bcs->hw.bas;
2087 int i; 2084 int i;
2088 2085
2089 if (!ubc) 2086 if (!ubc)
2090 return 0; 2087 return;
2091 2088
2092 /* kill URBs and tasklets before freeing - better safe than sorry */ 2089 /* kill URBs and tasklets before freeing - better safe than sorry */
2093 ubc->running = 0; 2090 ubc->running = 0;
@@ -2105,14 +2102,13 @@ static int gigaset_freebcshw(struct bc_state *bcs)
2105 kfree(ubc->isooutbuf); 2102 kfree(ubc->isooutbuf);
2106 kfree(ubc); 2103 kfree(ubc);
2107 bcs->hw.bas = NULL; 2104 bcs->hw.bas = NULL;
2108 return 1;
2109} 2105}
2110 2106
2111/* Initialize hardware dependent part of the B channel structure 2107/* Initialize hardware dependent part of the B channel structure
2112 * parameter: 2108 * parameter:
2113 * bcs B channel structure 2109 * bcs B channel structure
2114 * return value: 2110 * return value:
2115 * !=0 on success 2111 * 0 on success, error code < 0 on failure
2116 */ 2112 */
2117static int gigaset_initbcshw(struct bc_state *bcs) 2113static int gigaset_initbcshw(struct bc_state *bcs)
2118{ 2114{
@@ -2122,7 +2118,7 @@ static int gigaset_initbcshw(struct bc_state *bcs)
2122 bcs->hw.bas = ubc = kmalloc(sizeof(struct bas_bc_state), GFP_KERNEL); 2118 bcs->hw.bas = ubc = kmalloc(sizeof(struct bas_bc_state), GFP_KERNEL);
2123 if (!ubc) { 2119 if (!ubc) {
2124 pr_err("out of memory\n"); 2120 pr_err("out of memory\n");
2125 return 0; 2121 return -ENOMEM;
2126 } 2122 }
2127 2123
2128 ubc->running = 0; 2124 ubc->running = 0;
@@ -2139,7 +2135,7 @@ static int gigaset_initbcshw(struct bc_state *bcs)
2139 pr_err("out of memory\n"); 2135 pr_err("out of memory\n");
2140 kfree(ubc); 2136 kfree(ubc);
2141 bcs->hw.bas = NULL; 2137 bcs->hw.bas = NULL;
2142 return 0; 2138 return -ENOMEM;
2143 } 2139 }
2144 tasklet_init(&ubc->sent_tasklet, 2140 tasklet_init(&ubc->sent_tasklet,
2145 write_iso_tasklet, (unsigned long) bcs); 2141 write_iso_tasklet, (unsigned long) bcs);
@@ -2164,7 +2160,7 @@ static int gigaset_initbcshw(struct bc_state *bcs)
2164 ubc->stolen0s = 0; 2160 ubc->stolen0s = 0;
2165 tasklet_init(&ubc->rcvd_tasklet, 2161 tasklet_init(&ubc->rcvd_tasklet,
2166 read_iso_tasklet, (unsigned long) bcs); 2162 read_iso_tasklet, (unsigned long) bcs);
2167 return 1; 2163 return 0;
2168} 2164}
2169 2165
2170static void gigaset_reinitbcshw(struct bc_state *bcs) 2166static void gigaset_reinitbcshw(struct bc_state *bcs)
@@ -2187,6 +2183,12 @@ static void gigaset_freecshw(struct cardstate *cs)
2187 cs->hw.bas = NULL; 2183 cs->hw.bas = NULL;
2188} 2184}
2189 2185
2186/* Initialize hardware dependent part of the cardstate structure
2187 * parameter:
2188 * cs cardstate structure
2189 * return value:
2190 * 0 on success, error code < 0 on failure
2191 */
2190static int gigaset_initcshw(struct cardstate *cs) 2192static int gigaset_initcshw(struct cardstate *cs)
2191{ 2193{
2192 struct bas_cardstate *ucs; 2194 struct bas_cardstate *ucs;
@@ -2194,13 +2196,13 @@ static int gigaset_initcshw(struct cardstate *cs)
2194 cs->hw.bas = ucs = kmalloc(sizeof *ucs, GFP_KERNEL); 2196 cs->hw.bas = ucs = kmalloc(sizeof *ucs, GFP_KERNEL);
2195 if (!ucs) { 2197 if (!ucs) {
2196 pr_err("out of memory\n"); 2198 pr_err("out of memory\n");
2197 return 0; 2199 return -ENOMEM;
2198 } 2200 }
2199 ucs->int_in_buf = kmalloc(IP_MSGSIZE, GFP_KERNEL); 2201 ucs->int_in_buf = kmalloc(IP_MSGSIZE, GFP_KERNEL);
2200 if (!ucs->int_in_buf) { 2202 if (!ucs->int_in_buf) {
2201 kfree(ucs); 2203 kfree(ucs);
2202 pr_err("out of memory\n"); 2204 pr_err("out of memory\n");
2203 return 0; 2205 return -ENOMEM;
2204 } 2206 }
2205 2207
2206 ucs->urb_cmd_in = NULL; 2208 ucs->urb_cmd_in = NULL;
@@ -2219,7 +2221,7 @@ static int gigaset_initcshw(struct cardstate *cs)
2219 init_waitqueue_head(&ucs->waitqueue); 2221 init_waitqueue_head(&ucs->waitqueue);
2220 INIT_WORK(&ucs->int_in_wq, int_in_work); 2222 INIT_WORK(&ucs->int_in_wq, int_in_work);
2221 2223
2222 return 1; 2224 return 0;
2223} 2225}
2224 2226
2225/* freeurbs 2227/* freeurbs
@@ -2379,18 +2381,20 @@ static int gigaset_probe(struct usb_interface *interface,
2379 /* save address of controller structure */ 2381 /* save address of controller structure */
2380 usb_set_intfdata(interface, cs); 2382 usb_set_intfdata(interface, cs);
2381 2383
2382 if (!gigaset_start(cs)) 2384 rc = gigaset_start(cs);
2385 if (rc < 0)
2383 goto error; 2386 goto error;
2384 2387
2385 return 0; 2388 return 0;
2386 2389
2387allocerr: 2390allocerr:
2388 dev_err(cs->dev, "could not allocate URBs\n"); 2391 dev_err(cs->dev, "could not allocate URBs\n");
2392 rc = -ENOMEM;
2389error: 2393error:
2390 freeurbs(cs); 2394 freeurbs(cs);
2391 usb_set_intfdata(interface, NULL); 2395 usb_set_intfdata(interface, NULL);
2392 gigaset_freecs(cs); 2396 gigaset_freecs(cs);
2393 return -ENODEV; 2397 return rc;
2394} 2398}
2395 2399
2396/* gigaset_disconnect 2400/* gigaset_disconnect
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 343b5c80cb7b..27e4a3e21d64 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -14,6 +14,7 @@
14#include "gigaset.h" 14#include "gigaset.h"
15#include <linux/proc_fs.h> 15#include <linux/proc_fs.h>
16#include <linux/seq_file.h> 16#include <linux/seq_file.h>
17#include <linux/ratelimit.h>
17#include <linux/isdn/capilli.h> 18#include <linux/isdn/capilli.h>
18#include <linux/isdn/capicmd.h> 19#include <linux/isdn/capicmd.h>
19#include <linux/isdn/capiutil.h> 20#include <linux/isdn/capiutil.h>
@@ -108,51 +109,35 @@ static struct {
108 u8 *bc; 109 u8 *bc;
109 u8 *hlc; 110 u8 *hlc;
110} cip2bchlc[] = { 111} cip2bchlc[] = {
111 [1] = { "8090A3", NULL }, 112 [1] = { "8090A3", NULL }, /* Speech (A-law) */
112 /* Speech (A-law) */ 113 [2] = { "8890", NULL }, /* Unrestricted digital information */
113 [2] = { "8890", NULL }, 114 [3] = { "8990", NULL }, /* Restricted digital information */
114 /* Unrestricted digital information */ 115 [4] = { "9090A3", NULL }, /* 3,1 kHz audio (A-law) */
115 [3] = { "8990", NULL }, 116 [5] = { "9190", NULL }, /* 7 kHz audio */
116 /* Restricted digital information */ 117 [6] = { "9890", NULL }, /* Video */
117 [4] = { "9090A3", NULL }, 118 [7] = { "88C0C6E6", NULL }, /* Packet mode */
118 /* 3,1 kHz audio (A-law) */ 119 [8] = { "8890218F", NULL }, /* 56 kbit/s rate adaptation */
119 [5] = { "9190", NULL }, 120 [9] = { "9190A5", NULL }, /* Unrestricted digital information
120 /* 7 kHz audio */ 121 * with tones/announcements */
121 [6] = { "9890", NULL }, 122 [16] = { "8090A3", "9181" }, /* Telephony */
122 /* Video */ 123 [17] = { "9090A3", "9184" }, /* Group 2/3 facsimile */
123 [7] = { "88C0C6E6", NULL }, 124 [18] = { "8890", "91A1" }, /* Group 4 facsimile Class 1 */
124 /* Packet mode */ 125 [19] = { "8890", "91A4" }, /* Teletex service basic and mixed mode
125 [8] = { "8890218F", NULL }, 126 * and Group 4 facsimile service
126 /* 56 kbit/s rate adaptation */ 127 * Classes II and III */
127 [9] = { "9190A5", NULL }, 128 [20] = { "8890", "91A8" }, /* Teletex service basic and
128 /* Unrestricted digital information with tones/announcements */ 129 * processable mode */
129 [16] = { "8090A3", "9181" }, 130 [21] = { "8890", "91B1" }, /* Teletex service basic mode */
130 /* Telephony */ 131 [22] = { "8890", "91B2" }, /* International interworking for
131 [17] = { "9090A3", "9184" }, 132 * Videotex */
132 /* Group 2/3 facsimile */ 133 [23] = { "8890", "91B5" }, /* Telex */
133 [18] = { "8890", "91A1" }, 134 [24] = { "8890", "91B8" }, /* Message Handling Systems
134 /* Group 4 facsimile Class 1 */ 135 * in accordance with X.400 */
135 [19] = { "8890", "91A4" }, 136 [25] = { "8890", "91C1" }, /* OSI application
136 /* Teletex service basic and mixed mode 137 * in accordance with X.200 */
137 and Group 4 facsimile service Classes II and III */ 138 [26] = { "9190A5", "9181" }, /* 7 kHz telephony */
138 [20] = { "8890", "91A8" }, 139 [27] = { "9190A5", "916001" }, /* Video telephony, first connection */
139 /* Teletex service basic and processable mode */ 140 [28] = { "8890", "916002" }, /* Video telephony, second connection */
140 [21] = { "8890", "91B1" },
141 /* Teletex service basic mode */
142 [22] = { "8890", "91B2" },
143 /* International interworking for Videotex */
144 [23] = { "8890", "91B5" },
145 /* Telex */
146 [24] = { "8890", "91B8" },
147 /* Message Handling Systems in accordance with X.400 */
148 [25] = { "8890", "91C1" },
149 /* OSI application in accordance with X.200 */
150 [26] = { "9190A5", "9181" },
151 /* 7 kHz telephony */
152 [27] = { "9190A5", "916001" },
153 /* Video telephony, first connection */
154 [28] = { "8890", "916002" },
155 /* Video telephony, second connection */
156}; 141};
157 142
158/* 143/*
@@ -223,10 +208,14 @@ get_appl(struct gigaset_capi_ctr *iif, u16 appl)
223static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p) 208static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p)
224{ 209{
225#ifdef CONFIG_GIGASET_DEBUG 210#ifdef CONFIG_GIGASET_DEBUG
211 /* dump at most 20 messages in 20 secs */
212 static DEFINE_RATELIMIT_STATE(msg_dump_ratelimit, 20 * HZ, 20);
226 _cdebbuf *cdb; 213 _cdebbuf *cdb;
227 214
228 if (!(gigaset_debuglevel & level)) 215 if (!(gigaset_debuglevel & level))
229 return; 216 return;
217 if (!___ratelimit(&msg_dump_ratelimit, tag))
218 return;
230 219
231 cdb = capi_cmsg2str(p); 220 cdb = capi_cmsg2str(p);
232 if (cdb) { 221 if (cdb) {
@@ -1192,7 +1181,9 @@ static void do_facility_req(struct gigaset_capi_ctr *iif,
1192 confparam[3] = 2; /* length */ 1181 confparam[3] = 2; /* length */
1193 capimsg_setu16(confparam, 4, CapiSuccess); 1182 capimsg_setu16(confparam, 4, CapiSuccess);
1194 break; 1183 break;
1195 /* ToDo: add supported services */ 1184
1185 /* ToDo: add supported services */
1186
1196 default: 1187 default:
1197 dev_notice(cs->dev, 1188 dev_notice(cs->dev,
1198 "%s: unsupported supplementary service function 0x%04x\n", 1189 "%s: unsupported supplementary service function 0x%04x\n",
@@ -1766,7 +1757,8 @@ static void do_connect_b3_req(struct gigaset_capi_ctr *iif,
1766 1757
1767 /* NCPI parameter: not applicable for B3 Transparent */ 1758 /* NCPI parameter: not applicable for B3 Transparent */
1768 ignore_cstruct_param(cs, cmsg->NCPI, "CONNECT_B3_REQ", "NCPI"); 1759 ignore_cstruct_param(cs, cmsg->NCPI, "CONNECT_B3_REQ", "NCPI");
1769 send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ? 1760 send_conf(iif, ap, skb,
1761 (cmsg->NCPI && cmsg->NCPI[0]) ?
1770 CapiNcpiNotSupportedByProtocol : CapiSuccess); 1762 CapiNcpiNotSupportedByProtocol : CapiSuccess);
1771} 1763}
1772 1764
@@ -1882,6 +1874,9 @@ static void do_disconnect_req(struct gigaset_capi_ctr *iif,
1882 1874
1883 /* check for active logical connection */ 1875 /* check for active logical connection */
1884 if (bcs->apconnstate >= APCONN_ACTIVE) { 1876 if (bcs->apconnstate >= APCONN_ACTIVE) {
1877 /* clear it */
1878 bcs->apconnstate = APCONN_SETUP;
1879
1885 /* 1880 /*
1886 * emit DISCONNECT_B3_IND with cause 0x3301 1881 * emit DISCONNECT_B3_IND with cause 0x3301
1887 * use separate cmsg structure, as the content of iif->acmsg 1882 * use separate cmsg structure, as the content of iif->acmsg
@@ -1906,6 +1901,7 @@ static void do_disconnect_req(struct gigaset_capi_ctr *iif,
1906 } 1901 }
1907 capi_cmsg2message(b3cmsg, 1902 capi_cmsg2message(b3cmsg,
1908 __skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN)); 1903 __skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN));
1904 dump_cmsg(DEBUG_CMD, __func__, b3cmsg);
1909 kfree(b3cmsg); 1905 kfree(b3cmsg);
1910 capi_ctr_handle_message(&iif->ctr, ap->id, b3skb); 1906 capi_ctr_handle_message(&iif->ctr, ap->id, b3skb);
1911 } 1907 }
@@ -1966,7 +1962,8 @@ static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif,
1966 /* NCPI parameter: not applicable for B3 Transparent */ 1962 /* NCPI parameter: not applicable for B3 Transparent */
1967 ignore_cstruct_param(cs, cmsg->NCPI, 1963 ignore_cstruct_param(cs, cmsg->NCPI,
1968 "DISCONNECT_B3_REQ", "NCPI"); 1964 "DISCONNECT_B3_REQ", "NCPI");
1969 send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ? 1965 send_conf(iif, ap, skb,
1966 (cmsg->NCPI && cmsg->NCPI[0]) ?
1970 CapiNcpiNotSupportedByProtocol : CapiSuccess); 1967 CapiNcpiNotSupportedByProtocol : CapiSuccess);
1971} 1968}
1972 1969
@@ -2059,12 +2056,6 @@ static void do_reset_b3_req(struct gigaset_capi_ctr *iif,
2059} 2056}
2060 2057
2061/* 2058/*
2062 * dump unsupported/ignored messages at most twice per minute,
2063 * some apps send those very frequently
2064 */
2065static unsigned long ignored_msg_dump_time;
2066
2067/*
2068 * unsupported CAPI message handler 2059 * unsupported CAPI message handler
2069 */ 2060 */
2070static void do_unsupported(struct gigaset_capi_ctr *iif, 2061static void do_unsupported(struct gigaset_capi_ctr *iif,
@@ -2073,8 +2064,7 @@ static void do_unsupported(struct gigaset_capi_ctr *iif,
2073{ 2064{
2074 /* decode message */ 2065 /* decode message */
2075 capi_message2cmsg(&iif->acmsg, skb->data); 2066 capi_message2cmsg(&iif->acmsg, skb->data);
2076 if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000)) 2067 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
2077 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
2078 send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState); 2068 send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
2079} 2069}
2080 2070
@@ -2085,11 +2075,9 @@ static void do_nothing(struct gigaset_capi_ctr *iif,
2085 struct gigaset_capi_appl *ap, 2075 struct gigaset_capi_appl *ap,
2086 struct sk_buff *skb) 2076 struct sk_buff *skb)
2087{ 2077{
2088 if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000)) { 2078 /* decode message */
2089 /* decode message */ 2079 capi_message2cmsg(&iif->acmsg, skb->data);
2090 capi_message2cmsg(&iif->acmsg, skb->data); 2080 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
2091 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
2092 }
2093 dev_kfree_skb_any(skb); 2081 dev_kfree_skb_any(skb);
2094} 2082}
2095 2083
@@ -2358,7 +2346,7 @@ static const struct file_operations gigaset_proc_fops = {
2358 * @cs: device descriptor structure. 2346 * @cs: device descriptor structure.
2359 * @isdnid: device name. 2347 * @isdnid: device name.
2360 * 2348 *
2361 * Return value: 1 for success, 0 for failure 2349 * Return value: 0 on success, error code < 0 on failure
2362 */ 2350 */
2363int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) 2351int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
2364{ 2352{
@@ -2368,7 +2356,7 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
2368 iif = kmalloc(sizeof(*iif), GFP_KERNEL); 2356 iif = kmalloc(sizeof(*iif), GFP_KERNEL);
2369 if (!iif) { 2357 if (!iif) {
2370 pr_err("%s: out of memory\n", __func__); 2358 pr_err("%s: out of memory\n", __func__);
2371 return 0; 2359 return -ENOMEM;
2372 } 2360 }
2373 2361
2374 /* prepare controller structure */ 2362 /* prepare controller structure */
@@ -2392,12 +2380,12 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
2392 if (rc) { 2380 if (rc) {
2393 pr_err("attach_capi_ctr failed (%d)\n", rc); 2381 pr_err("attach_capi_ctr failed (%d)\n", rc);
2394 kfree(iif); 2382 kfree(iif);
2395 return 0; 2383 return rc;
2396 } 2384 }
2397 2385
2398 cs->iif = iif; 2386 cs->iif = iif;
2399 cs->hw_hdr_len = CAPI_DATA_B3_REQ_LEN; 2387 cs->hw_hdr_len = CAPI_DATA_B3_REQ_LEN;
2400 return 1; 2388 return 0;
2401} 2389}
2402 2390
2403/** 2391/**
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 76792707f995..aa41485bc594 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -194,13 +194,13 @@ int gigaset_get_channel(struct bc_state *bcs)
194 gig_dbg(DEBUG_CHANNEL, "could not allocate channel %d", 194 gig_dbg(DEBUG_CHANNEL, "could not allocate channel %d",
195 bcs->channel); 195 bcs->channel);
196 spin_unlock_irqrestore(&bcs->cs->lock, flags); 196 spin_unlock_irqrestore(&bcs->cs->lock, flags);
197 return 0; 197 return -EBUSY;
198 } 198 }
199 ++bcs->use_count; 199 ++bcs->use_count;
200 bcs->busy = 1; 200 bcs->busy = 1;
201 gig_dbg(DEBUG_CHANNEL, "allocated channel %d", bcs->channel); 201 gig_dbg(DEBUG_CHANNEL, "allocated channel %d", bcs->channel);
202 spin_unlock_irqrestore(&bcs->cs->lock, flags); 202 spin_unlock_irqrestore(&bcs->cs->lock, flags);
203 return 1; 203 return 0;
204} 204}
205 205
206struct bc_state *gigaset_get_free_channel(struct cardstate *cs) 206struct bc_state *gigaset_get_free_channel(struct cardstate *cs)
@@ -258,7 +258,7 @@ int gigaset_get_channels(struct cardstate *cs)
258 spin_unlock_irqrestore(&cs->lock, flags); 258 spin_unlock_irqrestore(&cs->lock, flags);
259 gig_dbg(DEBUG_CHANNEL, 259 gig_dbg(DEBUG_CHANNEL,
260 "could not allocate all channels"); 260 "could not allocate all channels");
261 return 0; 261 return -EBUSY;
262 } 262 }
263 for (i = 0; i < cs->channels; ++i) 263 for (i = 0; i < cs->channels; ++i)
264 ++cs->bcs[i].use_count; 264 ++cs->bcs[i].use_count;
@@ -266,7 +266,7 @@ int gigaset_get_channels(struct cardstate *cs)
266 266
267 gig_dbg(DEBUG_CHANNEL, "allocated all channels"); 267 gig_dbg(DEBUG_CHANNEL, "allocated all channels");
268 268
269 return 1; 269 return 0;
270} 270}
271 271
272void gigaset_free_channels(struct cardstate *cs) 272void gigaset_free_channels(struct cardstate *cs)
@@ -362,7 +362,7 @@ struct event_t *gigaset_add_event(struct cardstate *cs,
362} 362}
363EXPORT_SYMBOL_GPL(gigaset_add_event); 363EXPORT_SYMBOL_GPL(gigaset_add_event);
364 364
365static void free_strings(struct at_state_t *at_state) 365static void clear_at_state(struct at_state_t *at_state)
366{ 366{
367 int i; 367 int i;
368 368
@@ -372,18 +372,13 @@ static void free_strings(struct at_state_t *at_state)
372 } 372 }
373} 373}
374 374
375static void clear_at_state(struct at_state_t *at_state) 375static void dealloc_temp_at_states(struct cardstate *cs)
376{
377 free_strings(at_state);
378}
379
380static void dealloc_at_states(struct cardstate *cs)
381{ 376{
382 struct at_state_t *cur, *next; 377 struct at_state_t *cur, *next;
383 378
384 list_for_each_entry_safe(cur, next, &cs->temp_at_states, list) { 379 list_for_each_entry_safe(cur, next, &cs->temp_at_states, list) {
385 list_del(&cur->list); 380 list_del(&cur->list);
386 free_strings(cur); 381 clear_at_state(cur);
387 kfree(cur); 382 kfree(cur);
388 } 383 }
389} 384}
@@ -393,8 +388,7 @@ static void gigaset_freebcs(struct bc_state *bcs)
393 int i; 388 int i;
394 389
395 gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel); 390 gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel);
396 if (!bcs->cs->ops->freebcshw(bcs)) 391 bcs->cs->ops->freebcshw(bcs);
397 gig_dbg(DEBUG_INIT, "failed");
398 392
399 gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel); 393 gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel);
400 clear_at_state(&bcs->at_state); 394 clear_at_state(&bcs->at_state);
@@ -512,7 +506,7 @@ void gigaset_freecs(struct cardstate *cs)
512 case 1: /* error when registering to LL */ 506 case 1: /* error when registering to LL */
513 gig_dbg(DEBUG_INIT, "clearing at_state"); 507 gig_dbg(DEBUG_INIT, "clearing at_state");
514 clear_at_state(&cs->at_state); 508 clear_at_state(&cs->at_state);
515 dealloc_at_states(cs); 509 dealloc_temp_at_states(cs);
516 510
517 /* fall through */ 511 /* fall through */
518 case 0: /* error in basic setup */ 512 case 0: /* error in basic setup */
@@ -571,6 +565,8 @@ static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct cardstate *cs)
571 * @inbuf: buffer structure. 565 * @inbuf: buffer structure.
572 * @src: received data. 566 * @src: received data.
573 * @numbytes: number of bytes received. 567 * @numbytes: number of bytes received.
568 *
569 * Return value: !=0 if some data was appended
574 */ 570 */
575int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src, 571int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
576 unsigned numbytes) 572 unsigned numbytes)
@@ -614,8 +610,8 @@ int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
614EXPORT_SYMBOL_GPL(gigaset_fill_inbuf); 610EXPORT_SYMBOL_GPL(gigaset_fill_inbuf);
615 611
616/* Initialize the b-channel structure */ 612/* Initialize the b-channel structure */
617static struct bc_state *gigaset_initbcs(struct bc_state *bcs, 613static int gigaset_initbcs(struct bc_state *bcs, struct cardstate *cs,
618 struct cardstate *cs, int channel) 614 int channel)
619{ 615{
620 int i; 616 int i;
621 617
@@ -654,11 +650,7 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
654 bcs->apconnstate = 0; 650 bcs->apconnstate = 0;
655 651
656 gig_dbg(DEBUG_INIT, " setting up bcs[%d]->hw", channel); 652 gig_dbg(DEBUG_INIT, " setting up bcs[%d]->hw", channel);
657 if (cs->ops->initbcshw(bcs)) 653 return cs->ops->initbcshw(bcs);
658 return bcs;
659
660 gig_dbg(DEBUG_INIT, " failed");
661 return NULL;
662} 654}
663 655
664/** 656/**
@@ -757,7 +749,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
757 cs->cmdbytes = 0; 749 cs->cmdbytes = 0;
758 750
759 gig_dbg(DEBUG_INIT, "setting up iif"); 751 gig_dbg(DEBUG_INIT, "setting up iif");
760 if (!gigaset_isdn_regdev(cs, modulename)) { 752 if (gigaset_isdn_regdev(cs, modulename) < 0) {
761 pr_err("error registering ISDN device\n"); 753 pr_err("error registering ISDN device\n");
762 goto error; 754 goto error;
763 } 755 }
@@ -765,7 +757,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
765 make_valid(cs, VALID_ID); 757 make_valid(cs, VALID_ID);
766 ++cs->cs_init; 758 ++cs->cs_init;
767 gig_dbg(DEBUG_INIT, "setting up hw"); 759 gig_dbg(DEBUG_INIT, "setting up hw");
768 if (!cs->ops->initcshw(cs)) 760 if (cs->ops->initcshw(cs) < 0)
769 goto error; 761 goto error;
770 762
771 ++cs->cs_init; 763 ++cs->cs_init;
@@ -779,7 +771,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
779 /* set up channel data structures */ 771 /* set up channel data structures */
780 for (i = 0; i < channels; ++i) { 772 for (i = 0; i < channels; ++i) {
781 gig_dbg(DEBUG_INIT, "setting up bcs[%d]", i); 773 gig_dbg(DEBUG_INIT, "setting up bcs[%d]", i);
782 if (!gigaset_initbcs(cs->bcs + i, cs, i)) { 774 if (gigaset_initbcs(cs->bcs + i, cs, i) < 0) {
783 pr_err("could not allocate channel %d data\n", i); 775 pr_err("could not allocate channel %d data\n", i);
784 goto error; 776 goto error;
785 } 777 }
@@ -848,8 +840,7 @@ static void cleanup_cs(struct cardstate *cs)
848 cs->mstate = MS_UNINITIALIZED; 840 cs->mstate = MS_UNINITIALIZED;
849 841
850 clear_at_state(&cs->at_state); 842 clear_at_state(&cs->at_state);
851 dealloc_at_states(cs); 843 dealloc_temp_at_states(cs);
852 free_strings(&cs->at_state);
853 gigaset_at_init(&cs->at_state, NULL, cs, 0); 844 gigaset_at_init(&cs->at_state, NULL, cs, 0);
854 845
855 cs->inbuf->inputstate = INS_command; 846 cs->inbuf->inputstate = INS_command;
@@ -875,7 +866,7 @@ static void cleanup_cs(struct cardstate *cs)
875 866
876 for (i = 0; i < cs->channels; ++i) { 867 for (i = 0; i < cs->channels; ++i) {
877 gigaset_freebcs(cs->bcs + i); 868 gigaset_freebcs(cs->bcs + i);
878 if (!gigaset_initbcs(cs->bcs + i, cs, i)) 869 if (gigaset_initbcs(cs->bcs + i, cs, i) < 0)
879 pr_err("could not allocate channel %d data\n", i); 870 pr_err("could not allocate channel %d data\n", i);
880 } 871 }
881 872
@@ -896,14 +887,14 @@ static void cleanup_cs(struct cardstate *cs)
896 * waiting for completion of the initialization. 887 * waiting for completion of the initialization.
897 * 888 *
898 * Return value: 889 * Return value:
899 * 1 - success, 0 - error 890 * 0 on success, error code < 0 on failure
900 */ 891 */
901int gigaset_start(struct cardstate *cs) 892int gigaset_start(struct cardstate *cs)
902{ 893{
903 unsigned long flags; 894 unsigned long flags;
904 895
905 if (mutex_lock_interruptible(&cs->mutex)) 896 if (mutex_lock_interruptible(&cs->mutex))
906 return 0; 897 return -EBUSY;
907 898
908 spin_lock_irqsave(&cs->lock, flags); 899 spin_lock_irqsave(&cs->lock, flags);
909 cs->connected = 1; 900 cs->connected = 1;
@@ -927,11 +918,11 @@ int gigaset_start(struct cardstate *cs)
927 wait_event(cs->waitqueue, !cs->waiting); 918 wait_event(cs->waitqueue, !cs->waiting);
928 919
929 mutex_unlock(&cs->mutex); 920 mutex_unlock(&cs->mutex);
930 return 1; 921 return 0;
931 922
932error: 923error:
933 mutex_unlock(&cs->mutex); 924 mutex_unlock(&cs->mutex);
934 return 0; 925 return -ENOMEM;
935} 926}
936EXPORT_SYMBOL_GPL(gigaset_start); 927EXPORT_SYMBOL_GPL(gigaset_start);
937 928
@@ -943,7 +934,7 @@ EXPORT_SYMBOL_GPL(gigaset_start);
943 * waiting for completion of the shutdown. 934 * waiting for completion of the shutdown.
944 * 935 *
945 * Return value: 936 * Return value:
946 * 0 - success, -1 - error (no device associated) 937 * 0 - success, -ENODEV - error (no device associated)
947 */ 938 */
948int gigaset_shutdown(struct cardstate *cs) 939int gigaset_shutdown(struct cardstate *cs)
949{ 940{
@@ -951,7 +942,7 @@ int gigaset_shutdown(struct cardstate *cs)
951 942
952 if (!(cs->flags & VALID_MINOR)) { 943 if (!(cs->flags & VALID_MINOR)) {
953 mutex_unlock(&cs->mutex); 944 mutex_unlock(&cs->mutex);
954 return -1; 945 return -ENODEV;
955 } 946 }
956 947
957 cs->waiting = 1; 948 cs->waiting = 1;
diff --git a/drivers/isdn/gigaset/dummyll.c b/drivers/isdn/gigaset/dummyll.c
index 19b1c779d50f..570c2d53b84e 100644
--- a/drivers/isdn/gigaset/dummyll.c
+++ b/drivers/isdn/gigaset/dummyll.c
@@ -60,7 +60,7 @@ void gigaset_isdn_stop(struct cardstate *cs)
60 60
61int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) 61int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
62{ 62{
63 return 1; 63 return 0;
64} 64}
65 65
66void gigaset_isdn_unregdev(struct cardstate *cs) 66void gigaset_isdn_unregdev(struct cardstate *cs)
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index 624a8256a77f..2e6963dc740e 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -153,103 +153,104 @@ struct reply_t gigaset_tab_nocid[] =
153 * action, command */ 153 * action, command */
154 154
155/* initialize device, set cid mode if possible */ 155/* initialize device, set cid mode if possible */
156 {RSP_INIT, -1, -1, SEQ_INIT, 100, 1, {ACT_TIMEOUT} }, 156 {RSP_INIT, -1, -1, SEQ_INIT, 100, 1, {ACT_TIMEOUT} },
157 157
158 {EV_TIMEOUT, 100, 100, -1, 101, 3, {0}, "Z\r"}, 158 {EV_TIMEOUT, 100, 100, -1, 101, 3, {0}, "Z\r"},
159 {RSP_OK, 101, 103, -1, 120, 5, {ACT_GETSTRING}, 159 {RSP_OK, 101, 103, -1, 120, 5, {ACT_GETSTRING},
160 "+GMR\r"}, 160 "+GMR\r"},
161 161
162 {EV_TIMEOUT, 101, 101, -1, 102, 5, {0}, "Z\r"}, 162 {EV_TIMEOUT, 101, 101, -1, 102, 5, {0}, "Z\r"},
163 {RSP_ERROR, 101, 101, -1, 102, 5, {0}, "Z\r"}, 163 {RSP_ERROR, 101, 101, -1, 102, 5, {0}, "Z\r"},
164 164
165 {EV_TIMEOUT, 102, 102, -1, 108, 5, {ACT_SETDLE1}, 165 {EV_TIMEOUT, 102, 102, -1, 108, 5, {ACT_SETDLE1},
166 "^SDLE=0\r"}, 166 "^SDLE=0\r"},
167 {RSP_OK, 108, 108, -1, 104, -1}, 167 {RSP_OK, 108, 108, -1, 104, -1},
168 {RSP_ZDLE, 104, 104, 0, 103, 5, {0}, "Z\r"}, 168 {RSP_ZDLE, 104, 104, 0, 103, 5, {0}, "Z\r"},
169 {EV_TIMEOUT, 104, 104, -1, 0, 0, {ACT_FAILINIT} }, 169 {EV_TIMEOUT, 104, 104, -1, 0, 0, {ACT_FAILINIT} },
170 {RSP_ERROR, 108, 108, -1, 0, 0, {ACT_FAILINIT} }, 170 {RSP_ERROR, 108, 108, -1, 0, 0, {ACT_FAILINIT} },
171 171
172 {EV_TIMEOUT, 108, 108, -1, 105, 2, {ACT_SETDLE0, 172 {EV_TIMEOUT, 108, 108, -1, 105, 2, {ACT_SETDLE0,
173 ACT_HUPMODEM, 173 ACT_HUPMODEM,
174 ACT_TIMEOUT} }, 174 ACT_TIMEOUT} },
175 {EV_TIMEOUT, 105, 105, -1, 103, 5, {0}, "Z\r"}, 175 {EV_TIMEOUT, 105, 105, -1, 103, 5, {0}, "Z\r"},
176 176
177 {RSP_ERROR, 102, 102, -1, 107, 5, {0}, "^GETPRE\r"}, 177 {RSP_ERROR, 102, 102, -1, 107, 5, {0}, "^GETPRE\r"},
178 {RSP_OK, 107, 107, -1, 0, 0, {ACT_CONFIGMODE} }, 178 {RSP_OK, 107, 107, -1, 0, 0, {ACT_CONFIGMODE} },
179 {RSP_ERROR, 107, 107, -1, 0, 0, {ACT_FAILINIT} }, 179 {RSP_ERROR, 107, 107, -1, 0, 0, {ACT_FAILINIT} },
180 {EV_TIMEOUT, 107, 107, -1, 0, 0, {ACT_FAILINIT} }, 180 {EV_TIMEOUT, 107, 107, -1, 0, 0, {ACT_FAILINIT} },
181 181
182 {RSP_ERROR, 103, 103, -1, 0, 0, {ACT_FAILINIT} }, 182 {RSP_ERROR, 103, 103, -1, 0, 0, {ACT_FAILINIT} },
183 {EV_TIMEOUT, 103, 103, -1, 0, 0, {ACT_FAILINIT} }, 183 {EV_TIMEOUT, 103, 103, -1, 0, 0, {ACT_FAILINIT} },
184 184
185 {RSP_STRING, 120, 120, -1, 121, -1, {ACT_SETVER} }, 185 {RSP_STRING, 120, 120, -1, 121, -1, {ACT_SETVER} },
186 186
187 {EV_TIMEOUT, 120, 121, -1, 0, 0, {ACT_FAILVER, 187 {EV_TIMEOUT, 120, 121, -1, 0, 0, {ACT_FAILVER,
188 ACT_INIT} }, 188 ACT_INIT} },
189 {RSP_ERROR, 120, 121, -1, 0, 0, {ACT_FAILVER, 189 {RSP_ERROR, 120, 121, -1, 0, 0, {ACT_FAILVER,
190 ACT_INIT} }, 190 ACT_INIT} },
191 {RSP_OK, 121, 121, -1, 0, 0, {ACT_GOTVER, 191 {RSP_OK, 121, 121, -1, 0, 0, {ACT_GOTVER,
192 ACT_INIT} }, 192 ACT_INIT} },
193 {RSP_NONE, 121, 121, -1, 120, 0, {ACT_GETSTRING} },
193 194
194/* leave dle mode */ 195/* leave dle mode */
195 {RSP_INIT, 0, 0, SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"}, 196 {RSP_INIT, 0, 0, SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"},
196 {RSP_OK, 201, 201, -1, 202, -1}, 197 {RSP_OK, 201, 201, -1, 202, -1},
197 {RSP_ZDLE, 202, 202, 0, 0, 0, {ACT_DLE0} }, 198 {RSP_ZDLE, 202, 202, 0, 0, 0, {ACT_DLE0} },
198 {RSP_NODEV, 200, 249, -1, 0, 0, {ACT_FAKEDLE0} }, 199 {RSP_NODEV, 200, 249, -1, 0, 0, {ACT_FAKEDLE0} },
199 {RSP_ERROR, 200, 249, -1, 0, 0, {ACT_FAILDLE0} }, 200 {RSP_ERROR, 200, 249, -1, 0, 0, {ACT_FAILDLE0} },
200 {EV_TIMEOUT, 200, 249, -1, 0, 0, {ACT_FAILDLE0} }, 201 {EV_TIMEOUT, 200, 249, -1, 0, 0, {ACT_FAILDLE0} },
201 202
202/* enter dle mode */ 203/* enter dle mode */
203 {RSP_INIT, 0, 0, SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"}, 204 {RSP_INIT, 0, 0, SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"},
204 {RSP_OK, 251, 251, -1, 252, -1}, 205 {RSP_OK, 251, 251, -1, 252, -1},
205 {RSP_ZDLE, 252, 252, 1, 0, 0, {ACT_DLE1} }, 206 {RSP_ZDLE, 252, 252, 1, 0, 0, {ACT_DLE1} },
206 {RSP_ERROR, 250, 299, -1, 0, 0, {ACT_FAILDLE1} }, 207 {RSP_ERROR, 250, 299, -1, 0, 0, {ACT_FAILDLE1} },
207 {EV_TIMEOUT, 250, 299, -1, 0, 0, {ACT_FAILDLE1} }, 208 {EV_TIMEOUT, 250, 299, -1, 0, 0, {ACT_FAILDLE1} },
208 209
209/* incoming call */ 210/* incoming call */
210 {RSP_RING, -1, -1, -1, -1, -1, {ACT_RING} }, 211 {RSP_RING, -1, -1, -1, -1, -1, {ACT_RING} },
211 212
212/* get cid */ 213/* get cid */
213 {RSP_INIT, 0, 0, SEQ_CID, 301, 5, {0}, "^SGCI?\r"}, 214 {RSP_INIT, 0, 0, SEQ_CID, 301, 5, {0}, "^SGCI?\r"},
214 {RSP_OK, 301, 301, -1, 302, -1}, 215 {RSP_OK, 301, 301, -1, 302, -1},
215 {RSP_ZGCI, 302, 302, -1, 0, 0, {ACT_CID} }, 216 {RSP_ZGCI, 302, 302, -1, 0, 0, {ACT_CID} },
216 {RSP_ERROR, 301, 349, -1, 0, 0, {ACT_FAILCID} }, 217 {RSP_ERROR, 301, 349, -1, 0, 0, {ACT_FAILCID} },
217 {EV_TIMEOUT, 301, 349, -1, 0, 0, {ACT_FAILCID} }, 218 {EV_TIMEOUT, 301, 349, -1, 0, 0, {ACT_FAILCID} },
218 219
219/* enter cid mode */ 220/* enter cid mode */
220 {RSP_INIT, 0, 0, SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"}, 221 {RSP_INIT, 0, 0, SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"},
221 {RSP_OK, 150, 150, -1, 0, 0, {ACT_CMODESET} }, 222 {RSP_OK, 150, 150, -1, 0, 0, {ACT_CMODESET} },
222 {RSP_ERROR, 150, 150, -1, 0, 0, {ACT_FAILCMODE} }, 223 {RSP_ERROR, 150, 150, -1, 0, 0, {ACT_FAILCMODE} },
223 {EV_TIMEOUT, 150, 150, -1, 0, 0, {ACT_FAILCMODE} }, 224 {EV_TIMEOUT, 150, 150, -1, 0, 0, {ACT_FAILCMODE} },
224 225
225/* leave cid mode */ 226/* leave cid mode */
226 {RSP_INIT, 0, 0, SEQ_UMMODE, 160, 5, {0}, "Z\r"}, 227 {RSP_INIT, 0, 0, SEQ_UMMODE, 160, 5, {0}, "Z\r"},
227 {RSP_OK, 160, 160, -1, 0, 0, {ACT_UMODESET} }, 228 {RSP_OK, 160, 160, -1, 0, 0, {ACT_UMODESET} },
228 {RSP_ERROR, 160, 160, -1, 0, 0, {ACT_FAILUMODE} }, 229 {RSP_ERROR, 160, 160, -1, 0, 0, {ACT_FAILUMODE} },
229 {EV_TIMEOUT, 160, 160, -1, 0, 0, {ACT_FAILUMODE} }, 230 {EV_TIMEOUT, 160, 160, -1, 0, 0, {ACT_FAILUMODE} },
230 231
231/* abort getting cid */ 232/* abort getting cid */
232 {RSP_INIT, 0, 0, SEQ_NOCID, 0, 0, {ACT_ABORTCID} }, 233 {RSP_INIT, 0, 0, SEQ_NOCID, 0, 0, {ACT_ABORTCID} },
233 234
234/* reset */ 235/* reset */
235 {RSP_INIT, 0, 0, SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"}, 236 {RSP_INIT, 0, 0, SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"},
236 {RSP_OK, 504, 504, -1, 0, 0, {ACT_SDOWN} }, 237 {RSP_OK, 504, 504, -1, 0, 0, {ACT_SDOWN} },
237 {RSP_ERROR, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} }, 238 {RSP_ERROR, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} },
238 {EV_TIMEOUT, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} }, 239 {EV_TIMEOUT, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} },
239 {RSP_NODEV, 501, 599, -1, 0, 0, {ACT_FAKESDOWN} }, 240 {RSP_NODEV, 501, 599, -1, 0, 0, {ACT_FAKESDOWN} },
240 241
241 {EV_PROC_CIDMODE, -1, -1, -1, -1, -1, {ACT_PROC_CIDMODE} }, 242 {EV_PROC_CIDMODE, -1, -1, -1, -1, -1, {ACT_PROC_CIDMODE} },
242 {EV_IF_LOCK, -1, -1, -1, -1, -1, {ACT_IF_LOCK} }, 243 {EV_IF_LOCK, -1, -1, -1, -1, -1, {ACT_IF_LOCK} },
243 {EV_IF_VER, -1, -1, -1, -1, -1, {ACT_IF_VER} }, 244 {EV_IF_VER, -1, -1, -1, -1, -1, {ACT_IF_VER} },
244 {EV_START, -1, -1, -1, -1, -1, {ACT_START} }, 245 {EV_START, -1, -1, -1, -1, -1, {ACT_START} },
245 {EV_STOP, -1, -1, -1, -1, -1, {ACT_STOP} }, 246 {EV_STOP, -1, -1, -1, -1, -1, {ACT_STOP} },
246 {EV_SHUTDOWN, -1, -1, -1, -1, -1, {ACT_SHUTDOWN} }, 247 {EV_SHUTDOWN, -1, -1, -1, -1, -1, {ACT_SHUTDOWN} },
247 248
248/* misc. */ 249/* misc. */
249 {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} }, 250 {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} },
250 {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} }, 251 {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} },
251 {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} }, 252 {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} },
252 {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} }, 253 {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} },
253 {RSP_LAST} 254 {RSP_LAST}
254}; 255};
255 256
@@ -261,90 +262,90 @@ struct reply_t gigaset_tab_cid[] =
261 * action, command */ 262 * action, command */
262 263
263/* dial */ 264/* dial */
264 {EV_DIAL, -1, -1, -1, -1, -1, {ACT_DIAL} }, 265 {EV_DIAL, -1, -1, -1, -1, -1, {ACT_DIAL} },
265 {RSP_INIT, 0, 0, SEQ_DIAL, 601, 5, {ACT_CMD + AT_BC} }, 266 {RSP_INIT, 0, 0, SEQ_DIAL, 601, 5, {ACT_CMD + AT_BC} },
266 {RSP_OK, 601, 601, -1, 603, 5, {ACT_CMD + AT_PROTO} }, 267 {RSP_OK, 601, 601, -1, 603, 5, {ACT_CMD + AT_PROTO} },
267 {RSP_OK, 603, 603, -1, 604, 5, {ACT_CMD + AT_TYPE} }, 268 {RSP_OK, 603, 603, -1, 604, 5, {ACT_CMD + AT_TYPE} },
268 {RSP_OK, 604, 604, -1, 605, 5, {ACT_CMD + AT_MSN} }, 269 {RSP_OK, 604, 604, -1, 605, 5, {ACT_CMD + AT_MSN} },
269 {RSP_NULL, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} }, 270 {RSP_NULL, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} },
270 {RSP_OK, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} }, 271 {RSP_OK, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} },
271 {RSP_NULL, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} }, 272 {RSP_NULL, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} },
272 {RSP_OK, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} }, 273 {RSP_OK, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} },
273 {RSP_OK, 607, 607, -1, 608, 5, {0}, "+VLS=17\r"}, 274 {RSP_OK, 607, 607, -1, 608, 5, {0}, "+VLS=17\r"},
274 {RSP_OK, 608, 608, -1, 609, -1}, 275 {RSP_OK, 608, 608, -1, 609, -1},
275 {RSP_ZSAU, 609, 609, ZSAU_PROCEEDING, 610, 5, {ACT_CMD + AT_DIAL} }, 276 {RSP_ZSAU, 609, 609, ZSAU_PROCEEDING, 610, 5, {ACT_CMD + AT_DIAL} },
276 {RSP_OK, 610, 610, -1, 650, 0, {ACT_DIALING} }, 277 {RSP_OK, 610, 610, -1, 650, 0, {ACT_DIALING} },
277 278
278 {RSP_ERROR, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} }, 279 {RSP_ERROR, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} },
279 {EV_TIMEOUT, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} }, 280 {EV_TIMEOUT, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} },
280 281
281/* optional dialing responses */ 282/* optional dialing responses */
282 {EV_BC_OPEN, 650, 650, -1, 651, -1}, 283 {EV_BC_OPEN, 650, 650, -1, 651, -1},
283 {RSP_ZVLS, 609, 651, 17, -1, -1, {ACT_DEBUG} }, 284 {RSP_ZVLS, 609, 651, 17, -1, -1, {ACT_DEBUG} },
284 {RSP_ZCTP, 610, 651, -1, -1, -1, {ACT_DEBUG} }, 285 {RSP_ZCTP, 610, 651, -1, -1, -1, {ACT_DEBUG} },
285 {RSP_ZCPN, 610, 651, -1, -1, -1, {ACT_DEBUG} }, 286 {RSP_ZCPN, 610, 651, -1, -1, -1, {ACT_DEBUG} },
286 {RSP_ZSAU, 650, 651, ZSAU_CALL_DELIVERED, -1, -1, {ACT_DEBUG} }, 287 {RSP_ZSAU, 650, 651, ZSAU_CALL_DELIVERED, -1, -1, {ACT_DEBUG} },
287 288
288/* connect */ 289/* connect */
289 {RSP_ZSAU, 650, 650, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} }, 290 {RSP_ZSAU, 650, 650, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} },
290 {RSP_ZSAU, 651, 651, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT, 291 {RSP_ZSAU, 651, 651, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT,
291 ACT_NOTIFY_BC_UP} }, 292 ACT_NOTIFY_BC_UP} },
292 {RSP_ZSAU, 750, 750, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} }, 293 {RSP_ZSAU, 750, 750, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} },
293 {RSP_ZSAU, 751, 751, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT, 294 {RSP_ZSAU, 751, 751, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT,
294 ACT_NOTIFY_BC_UP} }, 295 ACT_NOTIFY_BC_UP} },
295 {EV_BC_OPEN, 800, 800, -1, 800, -1, {ACT_NOTIFY_BC_UP} }, 296 {EV_BC_OPEN, 800, 800, -1, 800, -1, {ACT_NOTIFY_BC_UP} },
296 297
297/* remote hangup */ 298/* remote hangup */
298 {RSP_ZSAU, 650, 651, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT} }, 299 {RSP_ZSAU, 650, 651, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT} },
299 {RSP_ZSAU, 750, 751, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} }, 300 {RSP_ZSAU, 750, 751, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
300 {RSP_ZSAU, 800, 800, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} }, 301 {RSP_ZSAU, 800, 800, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
301 302
302/* hangup */ 303/* hangup */
303 {EV_HUP, -1, -1, -1, -1, -1, {ACT_HUP} }, 304 {EV_HUP, -1, -1, -1, -1, -1, {ACT_HUP} },
304 {RSP_INIT, -1, -1, SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, 305 {RSP_INIT, -1, -1, SEQ_HUP, 401, 5, {0}, "+VLS=0\r"},
305 {RSP_OK, 401, 401, -1, 402, 5}, 306 {RSP_OK, 401, 401, -1, 402, 5},
306 {RSP_ZVLS, 402, 402, 0, 403, 5}, 307 {RSP_ZVLS, 402, 402, 0, 403, 5},
307 {RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} }, 308 {RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} },
308 {RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} }, 309 {RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} },
309 {RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} }, 310 {RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} },
310 {RSP_ERROR, 401, 401, -1, 0, 0, {ACT_ABORTHUP} }, 311 {RSP_ERROR, 401, 401, -1, 0, 0, {ACT_ABORTHUP} },
311 {EV_TIMEOUT, 401, 403, -1, 0, 0, {ACT_ABORTHUP} }, 312 {EV_TIMEOUT, 401, 403, -1, 0, 0, {ACT_ABORTHUP} },
312 313
313 {EV_BC_CLOSED, 0, 0, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} }, 314 {EV_BC_CLOSED, 0, 0, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} },
314 315
315/* ring */ 316/* ring */
316 {RSP_ZBC, 700, 700, -1, -1, -1, {0} }, 317 {RSP_ZBC, 700, 700, -1, -1, -1, {0} },
317 {RSP_ZHLC, 700, 700, -1, -1, -1, {0} }, 318 {RSP_ZHLC, 700, 700, -1, -1, -1, {0} },
318 {RSP_NMBR, 700, 700, -1, -1, -1, {0} }, 319 {RSP_NMBR, 700, 700, -1, -1, -1, {0} },
319 {RSP_ZCPN, 700, 700, -1, -1, -1, {0} }, 320 {RSP_ZCPN, 700, 700, -1, -1, -1, {0} },
320 {RSP_ZCTP, 700, 700, -1, -1, -1, {0} }, 321 {RSP_ZCTP, 700, 700, -1, -1, -1, {0} },
321 {EV_TIMEOUT, 700, 700, -1, 720, 720, {ACT_ICALL} }, 322 {EV_TIMEOUT, 700, 700, -1, 720, 720, {ACT_ICALL} },
322 {EV_BC_CLOSED, 720, 720, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} }, 323 {EV_BC_CLOSED, 720, 720, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} },
323 324
324/*accept icall*/ 325/*accept icall*/
325 {EV_ACCEPT, -1, -1, -1, -1, -1, {ACT_ACCEPT} }, 326 {EV_ACCEPT, -1, -1, -1, -1, -1, {ACT_ACCEPT} },
326 {RSP_INIT, 720, 720, SEQ_ACCEPT, 721, 5, {ACT_CMD + AT_PROTO} }, 327 {RSP_INIT, 720, 720, SEQ_ACCEPT, 721, 5, {ACT_CMD + AT_PROTO} },
327 {RSP_OK, 721, 721, -1, 722, 5, {ACT_CMD + AT_ISO} }, 328 {RSP_OK, 721, 721, -1, 722, 5, {ACT_CMD + AT_ISO} },
328 {RSP_OK, 722, 722, -1, 723, 5, {0}, "+VLS=17\r"}, 329 {RSP_OK, 722, 722, -1, 723, 5, {0}, "+VLS=17\r"},
329 {RSP_OK, 723, 723, -1, 724, 5, {0} }, 330 {RSP_OK, 723, 723, -1, 724, 5, {0} },
330 {RSP_ZVLS, 724, 724, 17, 750, 50, {ACT_ACCEPTED} }, 331 {RSP_ZVLS, 724, 724, 17, 750, 50, {ACT_ACCEPTED} },
331 {RSP_ERROR, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} }, 332 {RSP_ERROR, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} },
332 {EV_TIMEOUT, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} }, 333 {EV_TIMEOUT, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} },
333 {RSP_ZSAU, 700, 729, ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT} }, 334 {RSP_ZSAU, 700, 729, ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT} },
334 {RSP_ZSAU, 700, 729, ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT} }, 335 {RSP_ZSAU, 700, 729, ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT} },
335 {RSP_ZSAU, 700, 729, ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT} }, 336 {RSP_ZSAU, 700, 729, ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT} },
336 337
337 {EV_BC_OPEN, 750, 750, -1, 751, -1}, 338 {EV_BC_OPEN, 750, 750, -1, 751, -1},
338 {EV_TIMEOUT, 750, 751, -1, 0, 0, {ACT_CONNTIMEOUT} }, 339 {EV_TIMEOUT, 750, 751, -1, 0, 0, {ACT_CONNTIMEOUT} },
339 340
340/* B channel closed (general case) */ 341/* B channel closed (general case) */
341 {EV_BC_CLOSED, -1, -1, -1, -1, -1, {ACT_NOTIFY_BC_DOWN} }, 342 {EV_BC_CLOSED, -1, -1, -1, -1, -1, {ACT_NOTIFY_BC_DOWN} },
342 343
343/* misc. */ 344/* misc. */
344 {RSP_ZCON, -1, -1, -1, -1, -1, {ACT_DEBUG} }, 345 {RSP_ZCON, -1, -1, -1, -1, -1, {ACT_DEBUG} },
345 {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} }, 346 {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} },
346 {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} }, 347 {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} },
347 {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} }, 348 {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} },
348 {RSP_LAST} 349 {RSP_LAST}
349}; 350};
350 351
@@ -648,16 +649,16 @@ static void disconnect(struct at_state_t **at_state_p)
648static inline struct at_state_t *get_free_channel(struct cardstate *cs, 649static inline struct at_state_t *get_free_channel(struct cardstate *cs,
649 int cid) 650 int cid)
650/* cids: >0: siemens-cid 651/* cids: >0: siemens-cid
651 0: without cid 652 * 0: without cid
652 -1: no cid assigned yet 653 * -1: no cid assigned yet
653*/ 654 */
654{ 655{
655 unsigned long flags; 656 unsigned long flags;
656 int i; 657 int i;
657 struct at_state_t *ret; 658 struct at_state_t *ret;
658 659
659 for (i = 0; i < cs->channels; ++i) 660 for (i = 0; i < cs->channels; ++i)
660 if (gigaset_get_channel(cs->bcs + i)) { 661 if (gigaset_get_channel(cs->bcs + i) >= 0) {
661 ret = &cs->bcs[i].at_state; 662 ret = &cs->bcs[i].at_state;
662 ret->cid = cid; 663 ret->cid = cid;
663 return ret; 664 return ret;
@@ -922,18 +923,18 @@ static void do_stop(struct cardstate *cs)
922 * channel >= 0: getting cid for the channel failed 923 * channel >= 0: getting cid for the channel failed
923 * channel < 0: entering cid mode failed 924 * channel < 0: entering cid mode failed
924 * 925 *
925 * returns 0 on failure 926 * returns 0 on success, <0 on failure
926 */ 927 */
927static int reinit_and_retry(struct cardstate *cs, int channel) 928static int reinit_and_retry(struct cardstate *cs, int channel)
928{ 929{
929 int i; 930 int i;
930 931
931 if (--cs->retry_count <= 0) 932 if (--cs->retry_count <= 0)
932 return 0; 933 return -EFAULT;
933 934
934 for (i = 0; i < cs->channels; ++i) 935 for (i = 0; i < cs->channels; ++i)
935 if (cs->bcs[i].at_state.cid > 0) 936 if (cs->bcs[i].at_state.cid > 0)
936 return 0; 937 return -EBUSY;
937 938
938 if (channel < 0) 939 if (channel < 0)
939 dev_warn(cs->dev, 940 dev_warn(cs->dev,
@@ -944,7 +945,7 @@ static int reinit_and_retry(struct cardstate *cs, int channel)
944 cs->bcs[channel].at_state.pending_commands |= PC_CID; 945 cs->bcs[channel].at_state.pending_commands |= PC_CID;
945 } 946 }
946 schedule_init(cs, MS_INIT); 947 schedule_init(cs, MS_INIT);
947 return 1; 948 return 0;
948} 949}
949 950
950static int at_state_invalid(struct cardstate *cs, 951static int at_state_invalid(struct cardstate *cs,
@@ -1015,7 +1016,7 @@ static int do_lock(struct cardstate *cs)
1015 if (cs->bcs[i].at_state.pending_commands) 1016 if (cs->bcs[i].at_state.pending_commands)
1016 return -EBUSY; 1017 return -EBUSY;
1017 1018
1018 if (!gigaset_get_channels(cs)) 1019 if (gigaset_get_channels(cs) < 0)
1019 return -EBUSY; 1020 return -EBUSY;
1020 1021
1021 break; 1022 break;
@@ -1124,7 +1125,7 @@ static void do_action(int action, struct cardstate *cs,
1124 init_failed(cs, M_UNKNOWN); 1125 init_failed(cs, M_UNKNOWN);
1125 break; 1126 break;
1126 } 1127 }
1127 if (!reinit_and_retry(cs, -1)) 1128 if (reinit_and_retry(cs, -1) < 0)
1128 schedule_init(cs, MS_RECOVER); 1129 schedule_init(cs, MS_RECOVER);
1129 break; 1130 break;
1130 case ACT_FAILUMODE: 1131 case ACT_FAILUMODE:
@@ -1267,7 +1268,7 @@ static void do_action(int action, struct cardstate *cs,
1267 case ACT_FAILCID: 1268 case ACT_FAILCID:
1268 cs->cur_at_seq = SEQ_NONE; 1269 cs->cur_at_seq = SEQ_NONE;
1269 channel = cs->curchannel; 1270 channel = cs->curchannel;
1270 if (!reinit_and_retry(cs, channel)) { 1271 if (reinit_and_retry(cs, channel) < 0) {
1271 dev_warn(cs->dev, 1272 dev_warn(cs->dev,
1272 "Could not get a call ID. Cannot dial.\n"); 1273 "Could not get a call ID. Cannot dial.\n");
1273 at_state2 = &cs->bcs[channel].at_state; 1274 at_state2 = &cs->bcs[channel].at_state;
@@ -1314,8 +1315,9 @@ static void do_action(int action, struct cardstate *cs,
1314 s = ev->ptr; 1315 s = ev->ptr;
1315 1316
1316 if (!strcmp(s, "OK")) { 1317 if (!strcmp(s, "OK")) {
1318 /* OK without version string: assume old response */
1317 *p_genresp = 1; 1319 *p_genresp = 1;
1318 *p_resp_code = RSP_ERROR; 1320 *p_resp_code = RSP_NONE;
1319 break; 1321 break;
1320 } 1322 }
1321 1323
@@ -1372,7 +1374,8 @@ static void do_action(int action, struct cardstate *cs,
1372 ev->parameter, at_state->ConState); 1374 ev->parameter, at_state->ConState);
1373 break; 1375 break;
1374 1376
1375 /* events from the LL */ 1377 /* events from the LL */
1378
1376 case ACT_DIAL: 1379 case ACT_DIAL:
1377 start_dial(at_state, ev->ptr, ev->parameter); 1380 start_dial(at_state, ev->ptr, ev->parameter);
1378 break; 1381 break;
@@ -1385,7 +1388,8 @@ static void do_action(int action, struct cardstate *cs,
1385 cs->commands_pending = 1; 1388 cs->commands_pending = 1;
1386 break; 1389 break;
1387 1390
1388 /* hotplug events */ 1391 /* hotplug events */
1392
1389 case ACT_STOP: 1393 case ACT_STOP:
1390 do_stop(cs); 1394 do_stop(cs);
1391 break; 1395 break;
@@ -1393,7 +1397,8 @@ static void do_action(int action, struct cardstate *cs,
1393 do_start(cs); 1397 do_start(cs);
1394 break; 1398 break;
1395 1399
1396 /* events from the interface */ 1400 /* events from the interface */
1401
1397 case ACT_IF_LOCK: 1402 case ACT_IF_LOCK:
1398 cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs); 1403 cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs);
1399 cs->waiting = 0; 1404 cs->waiting = 0;
@@ -1412,7 +1417,8 @@ static void do_action(int action, struct cardstate *cs,
1412 wake_up(&cs->waitqueue); 1417 wake_up(&cs->waitqueue);
1413 break; 1418 break;
1414 1419
1415 /* events from the proc file system */ 1420 /* events from the proc file system */
1421
1416 case ACT_PROC_CIDMODE: 1422 case ACT_PROC_CIDMODE:
1417 spin_lock_irqsave(&cs->lock, flags); 1423 spin_lock_irqsave(&cs->lock, flags);
1418 if (ev->parameter != cs->cidmode) { 1424 if (ev->parameter != cs->cidmode) {
@@ -1431,7 +1437,8 @@ static void do_action(int action, struct cardstate *cs,
1431 wake_up(&cs->waitqueue); 1437 wake_up(&cs->waitqueue);
1432 break; 1438 break;
1433 1439
1434 /* events from the hardware drivers */ 1440 /* events from the hardware drivers */
1441
1435 case ACT_NOTIFY_BC_DOWN: 1442 case ACT_NOTIFY_BC_DOWN:
1436 bchannel_down(bcs); 1443 bchannel_down(bcs);
1437 break; 1444 break;
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index 1dc25131e670..8e2fc8f31d16 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -163,8 +163,8 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
163#define BAS_LOWFRAME 5 /* " " with negative flow control */ 163#define BAS_LOWFRAME 5 /* " " with negative flow control */
164#define BAS_CORRFRAMES 4 /* flow control multiplicator */ 164#define BAS_CORRFRAMES 4 /* flow control multiplicator */
165 165
166#define BAS_INBUFSIZE (BAS_MAXFRAME * BAS_NUMFRAMES) 166#define BAS_INBUFSIZE (BAS_MAXFRAME * BAS_NUMFRAMES) /* size of isoc in buf
167/* size of isoc in buf per URB */ 167 * per URB */
168#define BAS_OUTBUFSIZE 4096 /* size of common isoc out buffer */ 168#define BAS_OUTBUFSIZE 4096 /* size of common isoc out buffer */
169#define BAS_OUTBUFPAD BAS_MAXFRAME /* size of pad area for isoc out buf */ 169#define BAS_OUTBUFPAD BAS_MAXFRAME /* size of pad area for isoc out buf */
170 170
@@ -471,18 +471,18 @@ struct cardstate {
471 for */ 471 for */
472 int commands_pending; /* flag(s) in xxx.commands_pending have 472 int commands_pending; /* flag(s) in xxx.commands_pending have
473 been set */ 473 been set */
474 struct tasklet_struct event_tasklet; 474 struct tasklet_struct
475 /* tasklet for serializing AT commands. 475 event_tasklet; /* tasklet for serializing AT commands.
476 * Scheduled 476 * Scheduled
477 * -> for modem reponses (and 477 * -> for modem reponses (and
478 * incoming data for M10x) 478 * incoming data for M10x)
479 * -> on timeout 479 * -> on timeout
480 * -> after setting bits in 480 * -> after setting bits in
481 * xxx.at_state.pending_command 481 * xxx.at_state.pending_command
482 * (e.g. command from LL) */ 482 * (e.g. command from LL) */
483 struct tasklet_struct write_tasklet; 483 struct tasklet_struct
484 /* tasklet for serial output 484 write_tasklet; /* tasklet for serial output
485 * (not used in base driver) */ 485 * (not used in base driver) */
486 486
487 /* event queue */ 487 /* event queue */
488 struct event_t events[MAX_EVENTS]; 488 struct event_t events[MAX_EVENTS];
@@ -583,7 +583,7 @@ struct gigaset_ops {
583 int (*initbcshw)(struct bc_state *bcs); 583 int (*initbcshw)(struct bc_state *bcs);
584 584
585 /* Called by gigaset_freecs() for freeing bcs->hw.xxx */ 585 /* Called by gigaset_freecs() for freeing bcs->hw.xxx */
586 int (*freebcshw)(struct bc_state *bcs); 586 void (*freebcshw)(struct bc_state *bcs);
587 587
588 /* Called by gigaset_bchannel_down() for resetting bcs->hw.xxx */ 588 /* Called by gigaset_bchannel_down() for resetting bcs->hw.xxx */
589 void (*reinitbcshw)(struct bc_state *bcs); 589 void (*reinitbcshw)(struct bc_state *bcs);
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index 0f13eb1de657..2d75329007f1 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -229,7 +229,7 @@ static int command_from_LL(isdn_ctrl *cntrl)
229 return -EINVAL; 229 return -EINVAL;
230 } 230 }
231 bcs = cs->bcs + ch; 231 bcs = cs->bcs + ch;
232 if (!gigaset_get_channel(bcs)) { 232 if (gigaset_get_channel(bcs) < 0) {
233 dev_err(cs->dev, "ISDN_CMD_DIAL: channel not free\n"); 233 dev_err(cs->dev, "ISDN_CMD_DIAL: channel not free\n");
234 return -EBUSY; 234 return -EBUSY;
235 } 235 }
@@ -618,7 +618,7 @@ void gigaset_isdn_stop(struct cardstate *cs)
618 * @cs: device descriptor structure. 618 * @cs: device descriptor structure.
619 * @isdnid: device name. 619 * @isdnid: device name.
620 * 620 *
621 * Return value: 1 for success, 0 for failure 621 * Return value: 0 on success, error code < 0 on failure
622 */ 622 */
623int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) 623int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
624{ 624{
@@ -627,14 +627,14 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
627 iif = kmalloc(sizeof *iif, GFP_KERNEL); 627 iif = kmalloc(sizeof *iif, GFP_KERNEL);
628 if (!iif) { 628 if (!iif) {
629 pr_err("out of memory\n"); 629 pr_err("out of memory\n");
630 return 0; 630 return -ENOMEM;
631 } 631 }
632 632
633 if (snprintf(iif->id, sizeof iif->id, "%s_%u", isdnid, cs->minor_index) 633 if (snprintf(iif->id, sizeof iif->id, "%s_%u", isdnid, cs->minor_index)
634 >= sizeof iif->id) { 634 >= sizeof iif->id) {
635 pr_err("ID too long: %s\n", isdnid); 635 pr_err("ID too long: %s\n", isdnid);
636 kfree(iif); 636 kfree(iif);
637 return 0; 637 return -EINVAL;
638 } 638 }
639 639
640 iif->owner = THIS_MODULE; 640 iif->owner = THIS_MODULE;
@@ -656,13 +656,13 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
656 if (!register_isdn(iif)) { 656 if (!register_isdn(iif)) {
657 pr_err("register_isdn failed\n"); 657 pr_err("register_isdn failed\n");
658 kfree(iif); 658 kfree(iif);
659 return 0; 659 return -EINVAL;
660 } 660 }
661 661
662 cs->iif = iif; 662 cs->iif = iif;
663 cs->myid = iif->channels; /* Set my device id */ 663 cs->myid = iif->channels; /* Set my device id */
664 cs->hw_hdr_len = HW_HDR_LEN; 664 cs->hw_hdr_len = HW_HDR_LEN;
665 return 1; 665 return 0;
666} 666}
667 667
668/** 668/**
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index a351c16705bd..bc29f1d52a2f 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -56,7 +56,7 @@ static inline int isowbuf_freebytes(struct isowbuf_t *iwb)
56 56
57/* start writing 57/* start writing
58 * acquire the write semaphore 58 * acquire the write semaphore
59 * return true if acquired, false if busy 59 * return 0 if acquired, <0 if busy
60 */ 60 */
61static inline int isowbuf_startwrite(struct isowbuf_t *iwb) 61static inline int isowbuf_startwrite(struct isowbuf_t *iwb)
62{ 62{
@@ -64,12 +64,12 @@ static inline int isowbuf_startwrite(struct isowbuf_t *iwb)
64 atomic_inc(&iwb->writesem); 64 atomic_inc(&iwb->writesem);
65 gig_dbg(DEBUG_ISO, "%s: couldn't acquire iso write semaphore", 65 gig_dbg(DEBUG_ISO, "%s: couldn't acquire iso write semaphore",
66 __func__); 66 __func__);
67 return 0; 67 return -EBUSY;
68 } 68 }
69 gig_dbg(DEBUG_ISO, 69 gig_dbg(DEBUG_ISO,
70 "%s: acquired iso write semaphore, data[write]=%02x, nbits=%d", 70 "%s: acquired iso write semaphore, data[write]=%02x, nbits=%d",
71 __func__, iwb->data[iwb->write], iwb->wbits); 71 __func__, iwb->data[iwb->write], iwb->wbits);
72 return 1; 72 return 0;
73} 73}
74 74
75/* finish writing 75/* finish writing
@@ -158,7 +158,7 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
158 /* no wraparound in valid data */ 158 /* no wraparound in valid data */
159 if (limit >= write) { 159 if (limit >= write) {
160 /* append idle frame */ 160 /* append idle frame */
161 if (!isowbuf_startwrite(iwb)) 161 if (isowbuf_startwrite(iwb) < 0)
162 return -EBUSY; 162 return -EBUSY;
163 /* write position could have changed */ 163 /* write position could have changed */
164 write = iwb->write; 164 write = iwb->write;
@@ -403,7 +403,7 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb,
403 unsigned char c; 403 unsigned char c;
404 404
405 if (isowbuf_freebytes(iwb) < count + count / 5 + 6 || 405 if (isowbuf_freebytes(iwb) < count + count / 5 + 6 ||
406 !isowbuf_startwrite(iwb)) { 406 isowbuf_startwrite(iwb) < 0) {
407 gig_dbg(DEBUG_ISO, "%s: %d bytes free -> -EAGAIN", 407 gig_dbg(DEBUG_ISO, "%s: %d bytes free -> -EAGAIN",
408 __func__, isowbuf_freebytes(iwb)); 408 __func__, isowbuf_freebytes(iwb));
409 return -EAGAIN; 409 return -EAGAIN;
@@ -457,7 +457,7 @@ static inline int trans_buildframe(struct isowbuf_t *iwb,
457 return iwb->write; 457 return iwb->write;
458 458
459 if (isowbuf_freebytes(iwb) < count || 459 if (isowbuf_freebytes(iwb) < count ||
460 !isowbuf_startwrite(iwb)) { 460 isowbuf_startwrite(iwb) < 0) {
461 gig_dbg(DEBUG_ISO, "can't put %d bytes", count); 461 gig_dbg(DEBUG_ISO, "can't put %d bytes", count);
462 return -EAGAIN; 462 return -EAGAIN;
463 } 463 }
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 6f3fd4cf4378..8c91fd5eb6fd 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -340,17 +340,16 @@ static int gigaset_initbcshw(struct bc_state *bcs)
340{ 340{
341 /* unused */ 341 /* unused */
342 bcs->hw.ser = NULL; 342 bcs->hw.ser = NULL;
343 return 1; 343 return 0;
344} 344}
345 345
346/* 346/*
347 * Free B channel structure 347 * Free B channel structure
348 * Called by "gigaset_freebcs" in common.c 348 * Called by "gigaset_freebcs" in common.c
349 */ 349 */
350static int gigaset_freebcshw(struct bc_state *bcs) 350static void gigaset_freebcshw(struct bc_state *bcs)
351{ 351{
352 /* unused */ 352 /* unused */
353 return 1;
354} 353}
355 354
356/* 355/*
@@ -398,7 +397,7 @@ static int gigaset_initcshw(struct cardstate *cs)
398 scs = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL); 397 scs = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL);
399 if (!scs) { 398 if (!scs) {
400 pr_err("out of memory\n"); 399 pr_err("out of memory\n");
401 return 0; 400 return -ENOMEM;
402 } 401 }
403 cs->hw.ser = scs; 402 cs->hw.ser = scs;
404 403
@@ -410,13 +409,13 @@ static int gigaset_initcshw(struct cardstate *cs)
410 pr_err("error %d registering platform device\n", rc); 409 pr_err("error %d registering platform device\n", rc);
411 kfree(cs->hw.ser); 410 kfree(cs->hw.ser);
412 cs->hw.ser = NULL; 411 cs->hw.ser = NULL;
413 return 0; 412 return rc;
414 } 413 }
415 dev_set_drvdata(&cs->hw.ser->dev.dev, cs); 414 dev_set_drvdata(&cs->hw.ser->dev.dev, cs);
416 415
417 tasklet_init(&cs->write_tasklet, 416 tasklet_init(&cs->write_tasklet,
418 gigaset_modem_fill, (unsigned long) cs); 417 gigaset_modem_fill, (unsigned long) cs);
419 return 1; 418 return 0;
420} 419}
421 420
422/* 421/*
@@ -503,6 +502,7 @@ static int
503gigaset_tty_open(struct tty_struct *tty) 502gigaset_tty_open(struct tty_struct *tty)
504{ 503{
505 struct cardstate *cs; 504 struct cardstate *cs;
505 int rc;
506 506
507 gig_dbg(DEBUG_INIT, "Starting HLL for Gigaset M101"); 507 gig_dbg(DEBUG_INIT, "Starting HLL for Gigaset M101");
508 508
@@ -515,8 +515,10 @@ gigaset_tty_open(struct tty_struct *tty)
515 515
516 /* allocate memory for our device state and initialize it */ 516 /* allocate memory for our device state and initialize it */
517 cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME); 517 cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
518 if (!cs) 518 if (!cs) {
519 rc = -ENODEV;
519 goto error; 520 goto error;
521 }
520 522
521 cs->dev = &cs->hw.ser->dev.dev; 523 cs->dev = &cs->hw.ser->dev.dev;
522 cs->hw.ser->tty = tty; 524 cs->hw.ser->tty = tty;
@@ -530,7 +532,8 @@ gigaset_tty_open(struct tty_struct *tty)
530 */ 532 */
531 if (startmode == SM_LOCKED) 533 if (startmode == SM_LOCKED)
532 cs->mstate = MS_LOCKED; 534 cs->mstate = MS_LOCKED;
533 if (!gigaset_start(cs)) { 535 rc = gigaset_start(cs);
536 if (rc < 0) {
534 tasklet_kill(&cs->write_tasklet); 537 tasklet_kill(&cs->write_tasklet);
535 goto error; 538 goto error;
536 } 539 }
@@ -542,7 +545,7 @@ error:
542 gig_dbg(DEBUG_INIT, "Startup of HLL failed"); 545 gig_dbg(DEBUG_INIT, "Startup of HLL failed");
543 tty->disc_data = NULL; 546 tty->disc_data = NULL;
544 gigaset_freecs(cs); 547 gigaset_freecs(cs);
545 return -ENODEV; 548 return rc;
546} 549}
547 550
548/* 551/*
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 049da67f6392..bb12d8051732 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -549,10 +549,9 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
549 0, 0, &buf, 6, 2000); 549 0, 0, &buf, 6, 2000);
550} 550}
551 551
552static int gigaset_freebcshw(struct bc_state *bcs) 552static void gigaset_freebcshw(struct bc_state *bcs)
553{ 553{
554 /* unused */ 554 /* unused */
555 return 1;
556} 555}
557 556
558/* Initialize the b-channel structure */ 557/* Initialize the b-channel structure */
@@ -560,7 +559,7 @@ static int gigaset_initbcshw(struct bc_state *bcs)
560{ 559{
561 /* unused */ 560 /* unused */
562 bcs->hw.usb = NULL; 561 bcs->hw.usb = NULL;
563 return 1; 562 return 0;
564} 563}
565 564
566static void gigaset_reinitbcshw(struct bc_state *bcs) 565static void gigaset_reinitbcshw(struct bc_state *bcs)
@@ -582,7 +581,7 @@ static int gigaset_initcshw(struct cardstate *cs)
582 kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL); 581 kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL);
583 if (!ucs) { 582 if (!ucs) {
584 pr_err("out of memory\n"); 583 pr_err("out of memory\n");
585 return 0; 584 return -ENOMEM;
586 } 585 }
587 586
588 ucs->bchars[0] = 0; 587 ucs->bchars[0] = 0;
@@ -597,7 +596,7 @@ static int gigaset_initcshw(struct cardstate *cs)
597 tasklet_init(&cs->write_tasklet, 596 tasklet_init(&cs->write_tasklet,
598 gigaset_modem_fill, (unsigned long) cs); 597 gigaset_modem_fill, (unsigned long) cs);
599 598
600 return 1; 599 return 0;
601} 600}
602 601
603/* Send data from current skb to the device. */ 602/* Send data from current skb to the device. */
@@ -766,9 +765,9 @@ static int gigaset_probe(struct usb_interface *interface,
766 if (startmode == SM_LOCKED) 765 if (startmode == SM_LOCKED)
767 cs->mstate = MS_LOCKED; 766 cs->mstate = MS_LOCKED;
768 767
769 if (!gigaset_start(cs)) { 768 retval = gigaset_start(cs);
769 if (retval < 0) {
770 tasklet_kill(&cs->write_tasklet); 770 tasklet_kill(&cs->write_tasklet);
771 retval = -ENODEV;
772 goto error; 771 goto error;
773 } 772 }
774 return 0; 773 return 0;
@@ -898,8 +897,10 @@ static int __init usb_gigaset_init(void)
898 driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, 897 driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
899 GIGASET_MODULENAME, GIGASET_DEVNAME, 898 GIGASET_MODULENAME, GIGASET_DEVNAME,
900 &ops, THIS_MODULE); 899 &ops, THIS_MODULE);
901 if (driver == NULL) 900 if (driver == NULL) {
901 result = -ENOMEM;
902 goto error; 902 goto error;
903 }
903 904
904 /* register this driver with the USB subsystem */ 905 /* register this driver with the USB subsystem */
905 result = usb_register(&gigaset_usb_driver); 906 result = usb_register(&gigaset_usb_driver);
@@ -915,7 +916,7 @@ error:
915 if (driver) 916 if (driver)
916 gigaset_freedriver(driver); 917 gigaset_freedriver(driver);
917 driver = NULL; 918 driver = NULL;
918 return -1; 919 return result;
919} 920}
920 921
921/* 922/*
diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
index a576f32e6635..7a0bdbdd87ea 100644
--- a/drivers/isdn/hardware/eicon/capifunc.c
+++ b/drivers/isdn/hardware/eicon/capifunc.c
@@ -1120,7 +1120,7 @@ int fax_head_line_time(char *buffer)
1120/* 1120/*
1121 * init (alloc) main structures 1121 * init (alloc) main structures
1122 */ 1122 */
1123static int DIVA_INIT_FUNCTION init_main_structs(void) 1123static int __init init_main_structs(void)
1124{ 1124{
1125 if (!(mapped_msg = (CAPI_MSG *) diva_os_malloc(0, MAX_MSG_SIZE))) { 1125 if (!(mapped_msg = (CAPI_MSG *) diva_os_malloc(0, MAX_MSG_SIZE))) {
1126 DBG_ERR(("init: failed alloc mapped_msg.")) 1126 DBG_ERR(("init: failed alloc mapped_msg."))
@@ -1181,7 +1181,7 @@ static void do_api_remove_start(void)
1181/* 1181/*
1182 * init 1182 * init
1183 */ 1183 */
1184int DIVA_INIT_FUNCTION init_capifunc(void) 1184int __init init_capifunc(void)
1185{ 1185{
1186 diva_os_initialize_spin_lock(&api_lock, "capifunc"); 1186 diva_os_initialize_spin_lock(&api_lock, "capifunc");
1187 memset(ControllerMap, 0, MAX_DESCRIPTORS + 1); 1187 memset(ControllerMap, 0, MAX_DESCRIPTORS + 1);
@@ -1209,7 +1209,7 @@ int DIVA_INIT_FUNCTION init_capifunc(void)
1209/* 1209/*
1210 * finit 1210 * finit
1211 */ 1211 */
1212void DIVA_EXIT_FUNCTION finit_capifunc(void) 1212void __exit finit_capifunc(void)
1213{ 1213{
1214 do_api_remove_start(); 1214 do_api_remove_start();
1215 divacapi_disconnect_didd(); 1215 divacapi_disconnect_didd();
diff --git a/drivers/isdn/hardware/eicon/capimain.c b/drivers/isdn/hardware/eicon/capimain.c
index eabe0fa1b627..997d46abf5b2 100644
--- a/drivers/isdn/hardware/eicon/capimain.c
+++ b/drivers/isdn/hardware/eicon/capimain.c
@@ -118,7 +118,7 @@ void diva_os_set_controller_struct(struct capi_ctr *ctrl)
118/* 118/*
119 * module init 119 * module init
120 */ 120 */
121static int DIVA_INIT_FUNCTION divacapi_init(void) 121static int __init divacapi_init(void)
122{ 122{
123 char tmprev[32]; 123 char tmprev[32];
124 int ret = 0; 124 int ret = 0;
@@ -144,7 +144,7 @@ static int DIVA_INIT_FUNCTION divacapi_init(void)
144/* 144/*
145 * module exit 145 * module exit
146 */ 146 */
147static void DIVA_EXIT_FUNCTION divacapi_exit(void) 147static void __exit divacapi_exit(void)
148{ 148{
149 finit_capifunc(); 149 finit_capifunc();
150 printk(KERN_INFO "%s: module unloaded.\n", DRIVERLNAME); 150 printk(KERN_INFO "%s: module unloaded.\n", DRIVERLNAME);
diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
index c4c8220c9d72..b0b23ed8b374 100644
--- a/drivers/isdn/hardware/eicon/diddfunc.c
+++ b/drivers/isdn/hardware/eicon/diddfunc.c
@@ -47,7 +47,7 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter,
47/* 47/*
48 * connect to didd 48 * connect to didd
49 */ 49 */
50static int DIVA_INIT_FUNCTION connect_didd(void) 50static int __init connect_didd(void)
51{ 51{
52 int x = 0; 52 int x = 0;
53 int dadapter = 0; 53 int dadapter = 0;
@@ -79,7 +79,7 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
79/* 79/*
80 * disconnect from didd 80 * disconnect from didd
81 */ 81 */
82static void DIVA_EXIT_FUNCTION disconnect_didd(void) 82static void __exit disconnect_didd(void)
83{ 83{
84 IDI_SYNC_REQ req; 84 IDI_SYNC_REQ req;
85 85
@@ -92,7 +92,7 @@ static void DIVA_EXIT_FUNCTION disconnect_didd(void)
92/* 92/*
93 * init 93 * init
94 */ 94 */
95int DIVA_INIT_FUNCTION diddfunc_init(void) 95int __init diddfunc_init(void)
96{ 96{
97 diva_didd_load_time_init(); 97 diva_didd_load_time_init();
98 98
@@ -107,7 +107,7 @@ int DIVA_INIT_FUNCTION diddfunc_init(void)
107/* 107/*
108 * finit 108 * finit
109 */ 109 */
110void DIVA_EXIT_FUNCTION diddfunc_finit(void) 110void __exit diddfunc_finit(void)
111{ 111{
112 DbgDeregister(); 112 DbgDeregister();
113 disconnect_didd(); 113 disconnect_didd();
diff --git a/drivers/isdn/hardware/eicon/diva_didd.c b/drivers/isdn/hardware/eicon/diva_didd.c
index d1d3de03cced..fab6ccfb00d5 100644
--- a/drivers/isdn/hardware/eicon/diva_didd.c
+++ b/drivers/isdn/hardware/eicon/diva_didd.c
@@ -91,7 +91,7 @@ static const struct file_operations divadidd_proc_fops = {
91 .release = single_release, 91 .release = single_release,
92}; 92};
93 93
94static int DIVA_INIT_FUNCTION create_proc(void) 94static int __init create_proc(void)
95{ 95{
96 proc_net_eicon = proc_mkdir("eicon", init_net.proc_net); 96 proc_net_eicon = proc_mkdir("eicon", init_net.proc_net);
97 97
@@ -109,7 +109,7 @@ static void remove_proc(void)
109 remove_proc_entry("eicon", init_net.proc_net); 109 remove_proc_entry("eicon", init_net.proc_net);
110} 110}
111 111
112static int DIVA_INIT_FUNCTION divadidd_init(void) 112static int __init divadidd_init(void)
113{ 113{
114 char tmprev[32]; 114 char tmprev[32];
115 int ret = 0; 115 int ret = 0;
@@ -141,7 +141,7 @@ out:
141 return (ret); 141 return (ret);
142} 142}
143 143
144static void DIVA_EXIT_FUNCTION divadidd_exit(void) 144static void __exit divadidd_exit(void)
145{ 145{
146 diddfunc_finit(); 146 diddfunc_finit();
147 remove_proc(); 147 remove_proc();
diff --git a/drivers/isdn/hardware/eicon/divamnt.c b/drivers/isdn/hardware/eicon/divamnt.c
index ffa0c31be745..48db08d0bb3d 100644
--- a/drivers/isdn/hardware/eicon/divamnt.c
+++ b/drivers/isdn/hardware/eicon/divamnt.c
@@ -184,7 +184,7 @@ static void divas_maint_unregister_chrdev(void)
184 unregister_chrdev(major, DEVNAME); 184 unregister_chrdev(major, DEVNAME);
185} 185}
186 186
187static int DIVA_INIT_FUNCTION divas_maint_register_chrdev(void) 187static int __init divas_maint_register_chrdev(void)
188{ 188{
189 if ((major = register_chrdev(0, DEVNAME, &divas_maint_fops)) < 0) 189 if ((major = register_chrdev(0, DEVNAME, &divas_maint_fops)) < 0)
190 { 190 {
@@ -207,7 +207,7 @@ void diva_maint_wakeup_read(void)
207/* 207/*
208 * Driver Load 208 * Driver Load
209 */ 209 */
210static int DIVA_INIT_FUNCTION maint_init(void) 210static int __init maint_init(void)
211{ 211{
212 char tmprev[50]; 212 char tmprev[50];
213 int ret = 0; 213 int ret = 0;
@@ -245,7 +245,7 @@ out:
245/* 245/*
246** Driver Unload 246** Driver Unload
247*/ 247*/
248static void DIVA_EXIT_FUNCTION maint_exit(void) 248static void __exit maint_exit(void)
249{ 249{
250 divas_maint_unregister_chrdev(); 250 divas_maint_unregister_chrdev();
251 mntfunc_finit(); 251 mntfunc_finit();
diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
index 60aaf9580956..4be5f8814777 100644
--- a/drivers/isdn/hardware/eicon/divasfunc.c
+++ b/drivers/isdn/hardware/eicon/divasfunc.c
@@ -153,7 +153,7 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter,
153/* 153/*
154 * connect to didd 154 * connect to didd
155 */ 155 */
156static int DIVA_INIT_FUNCTION connect_didd(void) 156static int __init connect_didd(void)
157{ 157{
158 int x = 0; 158 int x = 0;
159 int dadapter = 0; 159 int dadapter = 0;
@@ -209,7 +209,7 @@ static void disconnect_didd(void)
209/* 209/*
210 * init 210 * init
211 */ 211 */
212int DIVA_INIT_FUNCTION divasfunc_init(int dbgmask) 212int __init divasfunc_init(int dbgmask)
213{ 213{
214 char *version; 214 char *version;
215 215
diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c
index a5c8f90b3b37..4103a8c178d7 100644
--- a/drivers/isdn/hardware/eicon/divasi.c
+++ b/drivers/isdn/hardware/eicon/divasi.c
@@ -114,7 +114,7 @@ static const struct file_operations um_idi_proc_fops = {
114 .release = single_release, 114 .release = single_release,
115}; 115};
116 116
117static int DIVA_INIT_FUNCTION create_um_idi_proc(void) 117static int __init create_um_idi_proc(void)
118{ 118{
119 um_idi_proc_entry = proc_create(DRIVERLNAME, S_IRUGO, proc_net_eicon, 119 um_idi_proc_entry = proc_create(DRIVERLNAME, S_IRUGO, proc_net_eicon,
120 &um_idi_proc_fops); 120 &um_idi_proc_fops);
@@ -146,7 +146,7 @@ static void divas_idi_unregister_chrdev(void)
146 unregister_chrdev(major, DEVNAME); 146 unregister_chrdev(major, DEVNAME);
147} 147}
148 148
149static int DIVA_INIT_FUNCTION divas_idi_register_chrdev(void) 149static int __init divas_idi_register_chrdev(void)
150{ 150{
151 if ((major = register_chrdev(0, DEVNAME, &divas_idi_fops)) < 0) 151 if ((major = register_chrdev(0, DEVNAME, &divas_idi_fops)) < 0)
152 { 152 {
@@ -161,7 +161,7 @@ static int DIVA_INIT_FUNCTION divas_idi_register_chrdev(void)
161/* 161/*
162** Driver Load 162** Driver Load
163*/ 163*/
164static int DIVA_INIT_FUNCTION divasi_init(void) 164static int __init divasi_init(void)
165{ 165{
166 char tmprev[50]; 166 char tmprev[50];
167 int ret = 0; 167 int ret = 0;
@@ -202,7 +202,7 @@ out:
202/* 202/*
203** Driver Unload 203** Driver Unload
204*/ 204*/
205static void DIVA_EXIT_FUNCTION divasi_exit(void) 205static void __exit divasi_exit(void)
206{ 206{
207 idifunc_finit(); 207 idifunc_finit();
208 remove_um_idi_proc(); 208 remove_um_idi_proc();
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index 7eaab06276f9..ca6d276bb256 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -673,7 +673,7 @@ static void divas_unregister_chrdev(void)
673 unregister_chrdev(major, DEVNAME); 673 unregister_chrdev(major, DEVNAME);
674} 674}
675 675
676static int DIVA_INIT_FUNCTION divas_register_chrdev(void) 676static int __init divas_register_chrdev(void)
677{ 677{
678 if ((major = register_chrdev(0, DEVNAME, &divas_fops)) < 0) 678 if ((major = register_chrdev(0, DEVNAME, &divas_fops)) < 0)
679 { 679 {
@@ -767,7 +767,7 @@ static void __devexit divas_remove_one(struct pci_dev *pdev)
767/* -------------------------------------------------------------------------- 767/* --------------------------------------------------------------------------
768 Driver Load / Startup 768 Driver Load / Startup
769 -------------------------------------------------------------------------- */ 769 -------------------------------------------------------------------------- */
770static int DIVA_INIT_FUNCTION divas_init(void) 770static int __init divas_init(void)
771{ 771{
772 char tmprev[50]; 772 char tmprev[50];
773 int ret = 0; 773 int ret = 0;
@@ -831,7 +831,7 @@ out:
831/* -------------------------------------------------------------------------- 831/* --------------------------------------------------------------------------
832 Driver Unload 832 Driver Unload
833 -------------------------------------------------------------------------- */ 833 -------------------------------------------------------------------------- */
834static void DIVA_EXIT_FUNCTION divas_exit(void) 834static void __exit divas_exit(void)
835{ 835{
836 pci_unregister_driver(&diva_pci_driver); 836 pci_unregister_driver(&diva_pci_driver);
837 remove_divas_proc(); 837 remove_divas_proc();
diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
index d153e3cdecf7..fef6586fe5ac 100644
--- a/drivers/isdn/hardware/eicon/idifunc.c
+++ b/drivers/isdn/hardware/eicon/idifunc.c
@@ -133,7 +133,7 @@ static void um_remove_card(DESCRIPTOR *d)
133/* 133/*
134 * remove all adapter 134 * remove all adapter
135 */ 135 */
136static void DIVA_EXIT_FUNCTION remove_all_idi_proc(void) 136static void __exit remove_all_idi_proc(void)
137{ 137{
138 udiva_card *card; 138 udiva_card *card;
139 diva_os_spin_lock_magic_t old_irql; 139 diva_os_spin_lock_magic_t old_irql;
@@ -181,7 +181,7 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter,
181/* 181/*
182 * connect DIDD 182 * connect DIDD
183 */ 183 */
184static int DIVA_INIT_FUNCTION connect_didd(void) 184static int __init connect_didd(void)
185{ 185{
186 int x = 0; 186 int x = 0;
187 int dadapter = 0; 187 int dadapter = 0;
@@ -225,7 +225,7 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
225/* 225/*
226 * Disconnect from DIDD 226 * Disconnect from DIDD
227 */ 227 */
228static void DIVA_EXIT_FUNCTION disconnect_didd(void) 228static void __exit disconnect_didd(void)
229{ 229{
230 IDI_SYNC_REQ req; 230 IDI_SYNC_REQ req;
231 231
@@ -240,7 +240,7 @@ static void DIVA_EXIT_FUNCTION disconnect_didd(void)
240/* 240/*
241 * init 241 * init
242 */ 242 */
243int DIVA_INIT_FUNCTION idifunc_init(void) 243int __init idifunc_init(void)
244{ 244{
245 diva_os_initialize_spin_lock(&ll_lock, "idifunc"); 245 diva_os_initialize_spin_lock(&ll_lock, "idifunc");
246 246
@@ -260,7 +260,7 @@ int DIVA_INIT_FUNCTION idifunc_init(void)
260/* 260/*
261 * finit 261 * finit
262 */ 262 */
263void DIVA_EXIT_FUNCTION idifunc_finit(void) 263void __exit idifunc_finit(void)
264{ 264{
265 diva_user_mode_idi_finit(); 265 diva_user_mode_idi_finit();
266 disconnect_didd(); 266 disconnect_didd();
diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
index d6072607305c..1cd9affb6058 100644
--- a/drivers/isdn/hardware/eicon/mntfunc.c
+++ b/drivers/isdn/hardware/eicon/mntfunc.c
@@ -72,7 +72,7 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter,
72/* 72/*
73 * connect to didd 73 * connect to didd
74 */ 74 */
75static int DIVA_INIT_FUNCTION connect_didd(void) 75static int __init connect_didd(void)
76{ 76{
77 int x = 0; 77 int x = 0;
78 int dadapter = 0; 78 int dadapter = 0;
@@ -114,7 +114,7 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
114/* 114/*
115 * disconnect from didd 115 * disconnect from didd
116 */ 116 */
117static void DIVA_EXIT_FUNCTION disconnect_didd(void) 117static void __exit disconnect_didd(void)
118{ 118{
119 IDI_SYNC_REQ req; 119 IDI_SYNC_REQ req;
120 120
@@ -300,7 +300,7 @@ int maint_read_write(void __user *buf, int count)
300/* 300/*
301 * init 301 * init
302 */ 302 */
303int DIVA_INIT_FUNCTION mntfunc_init(int *buffer_length, void **buffer, 303int __init mntfunc_init(int *buffer_length, void **buffer,
304 unsigned long diva_dbg_mem) 304 unsigned long diva_dbg_mem)
305{ 305{
306 if (*buffer_length < 64) { 306 if (*buffer_length < 64) {
@@ -348,7 +348,7 @@ int DIVA_INIT_FUNCTION mntfunc_init(int *buffer_length, void **buffer,
348/* 348/*
349 * exit 349 * exit
350 */ 350 */
351void DIVA_EXIT_FUNCTION mntfunc_finit(void) 351void __exit mntfunc_finit(void)
352{ 352{
353 void *buffer; 353 void *buffer;
354 int i = 100; 354 int i = 100;
diff --git a/drivers/isdn/hardware/eicon/platform.h b/drivers/isdn/hardware/eicon/platform.h
index 7331c3b14a5f..b2edb7590dda 100644
--- a/drivers/isdn/hardware/eicon/platform.h
+++ b/drivers/isdn/hardware/eicon/platform.h
@@ -38,9 +38,6 @@
38#define DIVA_NO_DEBUGLIB 38#define DIVA_NO_DEBUGLIB
39#endif 39#endif
40 40
41#define DIVA_INIT_FUNCTION __init
42#define DIVA_EXIT_FUNCTION __exit
43
44#define DIVA_USER_MODE_CARD_CONFIG 1 41#define DIVA_USER_MODE_CARD_CONFIG 1
45#define USE_EXTENDED_DEBUGS 1 42#define USE_EXTENDED_DEBUGS 1
46 43
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index c0b8c960ee3f..c08fc605e56b 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -30,7 +30,7 @@
30#include "ipac.h" 30#include "ipac.h"
31 31
32 32
33#define AVMFRITZ_REV "2.1" 33#define AVMFRITZ_REV "2.3"
34 34
35static int AVM_cnt; 35static int AVM_cnt;
36static int debug; 36static int debug;
@@ -69,6 +69,7 @@ enum {
69#define HDLC_MODE_TRANS 0x02 69#define HDLC_MODE_TRANS 0x02
70#define HDLC_MODE_CCR_7 0x04 70#define HDLC_MODE_CCR_7 0x04
71#define HDLC_MODE_CCR_16 0x08 71#define HDLC_MODE_CCR_16 0x08
72#define HDLC_FIFO_SIZE_128 0x20
72#define HDLC_MODE_TESTLOOP 0x80 73#define HDLC_MODE_TESTLOOP 0x80
73 74
74#define HDLC_INT_XPR 0x80 75#define HDLC_INT_XPR 0x80
@@ -80,13 +81,16 @@ enum {
80#define HDLC_STAT_RDO 0x10 81#define HDLC_STAT_RDO 0x10
81#define HDLC_STAT_CRCVFRRAB 0x0E 82#define HDLC_STAT_CRCVFRRAB 0x0E
82#define HDLC_STAT_CRCVFR 0x06 83#define HDLC_STAT_CRCVFR 0x06
83#define HDLC_STAT_RML_MASK 0x3f00 84#define HDLC_STAT_RML_MASK_V1 0x3f00
85#define HDLC_STAT_RML_MASK_V2 0x7f00
84 86
85#define HDLC_CMD_XRS 0x80 87#define HDLC_CMD_XRS 0x80
86#define HDLC_CMD_XME 0x01 88#define HDLC_CMD_XME 0x01
87#define HDLC_CMD_RRS 0x20 89#define HDLC_CMD_RRS 0x20
88#define HDLC_CMD_XML_MASK 0x3f00 90#define HDLC_CMD_XML_MASK 0x3f00
89#define HDLC_FIFO_SIZE 32 91
92#define HDLC_FIFO_SIZE_V1 32
93#define HDLC_FIFO_SIZE_V2 128
90 94
91/* Fritz PCI v2.0 */ 95/* Fritz PCI v2.0 */
92 96
@@ -346,11 +350,14 @@ modehdlc(struct bchannel *bch, int protocol)
346{ 350{
347 struct fritzcard *fc = bch->hw; 351 struct fritzcard *fc = bch->hw;
348 struct hdlc_hw *hdlc; 352 struct hdlc_hw *hdlc;
353 u8 mode;
349 354
350 hdlc = &fc->hdlc[(bch->nr - 1) & 1]; 355 hdlc = &fc->hdlc[(bch->nr - 1) & 1];
351 pr_debug("%s: hdlc %c protocol %x-->%x ch %d\n", fc->name, 356 pr_debug("%s: hdlc %c protocol %x-->%x ch %d\n", fc->name,
352 '@' + bch->nr, bch->state, protocol, bch->nr); 357 '@' + bch->nr, bch->state, protocol, bch->nr);
353 hdlc->ctrl.ctrl = 0; 358 hdlc->ctrl.ctrl = 0;
359 mode = (fc->type == AVM_FRITZ_PCIV2) ? HDLC_FIFO_SIZE_128 : 0;
360
354 switch (protocol) { 361 switch (protocol) {
355 case -1: /* used for init */ 362 case -1: /* used for init */
356 bch->state = -1; 363 bch->state = -1;
@@ -358,7 +365,7 @@ modehdlc(struct bchannel *bch, int protocol)
358 if (bch->state == ISDN_P_NONE) 365 if (bch->state == ISDN_P_NONE)
359 break; 366 break;
360 hdlc->ctrl.sr.cmd = HDLC_CMD_XRS | HDLC_CMD_RRS; 367 hdlc->ctrl.sr.cmd = HDLC_CMD_XRS | HDLC_CMD_RRS;
361 hdlc->ctrl.sr.mode = HDLC_MODE_TRANS; 368 hdlc->ctrl.sr.mode = mode | HDLC_MODE_TRANS;
362 write_ctrl(bch, 5); 369 write_ctrl(bch, 5);
363 bch->state = ISDN_P_NONE; 370 bch->state = ISDN_P_NONE;
364 test_and_clear_bit(FLG_HDLC, &bch->Flags); 371 test_and_clear_bit(FLG_HDLC, &bch->Flags);
@@ -367,7 +374,7 @@ modehdlc(struct bchannel *bch, int protocol)
367 case ISDN_P_B_RAW: 374 case ISDN_P_B_RAW:
368 bch->state = protocol; 375 bch->state = protocol;
369 hdlc->ctrl.sr.cmd = HDLC_CMD_XRS | HDLC_CMD_RRS; 376 hdlc->ctrl.sr.cmd = HDLC_CMD_XRS | HDLC_CMD_RRS;
370 hdlc->ctrl.sr.mode = HDLC_MODE_TRANS; 377 hdlc->ctrl.sr.mode = mode | HDLC_MODE_TRANS;
371 write_ctrl(bch, 5); 378 write_ctrl(bch, 5);
372 hdlc->ctrl.sr.cmd = HDLC_CMD_XRS; 379 hdlc->ctrl.sr.cmd = HDLC_CMD_XRS;
373 write_ctrl(bch, 1); 380 write_ctrl(bch, 1);
@@ -377,7 +384,7 @@ modehdlc(struct bchannel *bch, int protocol)
377 case ISDN_P_B_HDLC: 384 case ISDN_P_B_HDLC:
378 bch->state = protocol; 385 bch->state = protocol;
379 hdlc->ctrl.sr.cmd = HDLC_CMD_XRS | HDLC_CMD_RRS; 386 hdlc->ctrl.sr.cmd = HDLC_CMD_XRS | HDLC_CMD_RRS;
380 hdlc->ctrl.sr.mode = HDLC_MODE_ITF_FLG; 387 hdlc->ctrl.sr.mode = mode | HDLC_MODE_ITF_FLG;
381 write_ctrl(bch, 5); 388 write_ctrl(bch, 5);
382 hdlc->ctrl.sr.cmd = HDLC_CMD_XRS; 389 hdlc->ctrl.sr.cmd = HDLC_CMD_XRS;
383 write_ctrl(bch, 1); 390 write_ctrl(bch, 1);
@@ -397,39 +404,40 @@ hdlc_empty_fifo(struct bchannel *bch, int count)
397 u32 *ptr; 404 u32 *ptr;
398 u8 *p; 405 u8 *p;
399 u32 val, addr; 406 u32 val, addr;
400 int cnt = 0; 407 int cnt;
401 struct fritzcard *fc = bch->hw; 408 struct fritzcard *fc = bch->hw;
402 409
403 pr_debug("%s: %s %d\n", fc->name, __func__, count); 410 pr_debug("%s: %s %d\n", fc->name, __func__, count);
404 if (!bch->rx_skb) { 411 if (test_bit(FLG_RX_OFF, &bch->Flags)) {
405 bch->rx_skb = mI_alloc_skb(bch->maxlen, GFP_ATOMIC); 412 p = NULL;
406 if (!bch->rx_skb) { 413 bch->dropcnt += count;
407 pr_info("%s: B receive out of memory\n", 414 } else {
408 fc->name); 415 cnt = bchannel_get_rxbuf(bch, count);
416 if (cnt < 0) {
417 pr_warning("%s.B%d: No bufferspace for %d bytes\n",
418 fc->name, bch->nr, count);
409 return; 419 return;
410 } 420 }
421 p = skb_put(bch->rx_skb, count);
411 } 422 }
412 if ((bch->rx_skb->len + count) > bch->maxlen) {
413 pr_debug("%s: overrun %d\n", fc->name,
414 bch->rx_skb->len + count);
415 return;
416 }
417 p = skb_put(bch->rx_skb, count);
418 ptr = (u32 *)p; 423 ptr = (u32 *)p;
419 if (AVM_FRITZ_PCIV2 == fc->type) 424 if (fc->type == AVM_FRITZ_PCIV2)
420 addr = fc->addr + (bch->nr == 2 ? 425 addr = fc->addr + (bch->nr == 2 ?
421 AVM_HDLC_FIFO_2 : AVM_HDLC_FIFO_1); 426 AVM_HDLC_FIFO_2 : AVM_HDLC_FIFO_1);
422 else { 427 else {
423 addr = fc->addr + CHIP_WINDOW; 428 addr = fc->addr + CHIP_WINDOW;
424 outl(bch->nr == 2 ? AVM_HDLC_2 : AVM_HDLC_1, fc->addr); 429 outl(bch->nr == 2 ? AVM_HDLC_2 : AVM_HDLC_1, fc->addr);
425 } 430 }
431 cnt = 0;
426 while (cnt < count) { 432 while (cnt < count) {
427 val = le32_to_cpu(inl(addr)); 433 val = le32_to_cpu(inl(addr));
428 put_unaligned(val, ptr); 434 if (p) {
429 ptr++; 435 put_unaligned(val, ptr);
436 ptr++;
437 }
430 cnt += 4; 438 cnt += 4;
431 } 439 }
432 if (debug & DEBUG_HW_BFIFO) { 440 if (p && (debug & DEBUG_HW_BFIFO)) {
433 snprintf(fc->log, LOG_SIZE, "B%1d-recv %s %d ", 441 snprintf(fc->log, LOG_SIZE, "B%1d-recv %s %d ",
434 bch->nr, fc->name, count); 442 bch->nr, fc->name, count);
435 print_hex_dump_bytes(fc->log, DUMP_PREFIX_OFFSET, p, count); 443 print_hex_dump_bytes(fc->log, DUMP_PREFIX_OFFSET, p, count);
@@ -441,30 +449,43 @@ hdlc_fill_fifo(struct bchannel *bch)
441{ 449{
442 struct fritzcard *fc = bch->hw; 450 struct fritzcard *fc = bch->hw;
443 struct hdlc_hw *hdlc; 451 struct hdlc_hw *hdlc;
444 int count, cnt = 0; 452 int count, fs, cnt = 0, idx, fillempty = 0;
445 u8 *p; 453 u8 *p;
446 u32 *ptr, val, addr; 454 u32 *ptr, val, addr;
447 455
448 hdlc = &fc->hdlc[(bch->nr - 1) & 1]; 456 idx = (bch->nr - 1) & 1;
449 if (!bch->tx_skb) 457 hdlc = &fc->hdlc[idx];
450 return; 458 fs = (fc->type == AVM_FRITZ_PCIV2) ?
451 count = bch->tx_skb->len - bch->tx_idx; 459 HDLC_FIFO_SIZE_V2 : HDLC_FIFO_SIZE_V1;
452 if (count <= 0) 460 if (!bch->tx_skb) {
453 return; 461 if (!test_bit(FLG_TX_EMPTY, &bch->Flags))
454 p = bch->tx_skb->data + bch->tx_idx; 462 return;
463 count = fs;
464 p = bch->fill;
465 fillempty = 1;
466 } else {
467 count = bch->tx_skb->len - bch->tx_idx;
468 if (count <= 0)
469 return;
470 p = bch->tx_skb->data + bch->tx_idx;
471 }
455 hdlc->ctrl.sr.cmd &= ~HDLC_CMD_XME; 472 hdlc->ctrl.sr.cmd &= ~HDLC_CMD_XME;
456 if (count > HDLC_FIFO_SIZE) { 473 if (count > fs) {
457 count = HDLC_FIFO_SIZE; 474 count = fs;
458 } else { 475 } else {
459 if (test_bit(FLG_HDLC, &bch->Flags)) 476 if (test_bit(FLG_HDLC, &bch->Flags))
460 hdlc->ctrl.sr.cmd |= HDLC_CMD_XME; 477 hdlc->ctrl.sr.cmd |= HDLC_CMD_XME;
461 } 478 }
462 pr_debug("%s: %s %d/%d/%d", fc->name, __func__, count,
463 bch->tx_idx, bch->tx_skb->len);
464 ptr = (u32 *)p; 479 ptr = (u32 *)p;
465 bch->tx_idx += count; 480 if (fillempty) {
466 hdlc->ctrl.sr.xml = ((count == HDLC_FIFO_SIZE) ? 0 : count); 481 pr_debug("%s.B%d: %d/%d/%d", fc->name, bch->nr, count,
467 if (AVM_FRITZ_PCIV2 == fc->type) { 482 bch->tx_idx, bch->tx_skb->len);
483 bch->tx_idx += count;
484 } else {
485 pr_debug("%s.B%d: fillempty %d\n", fc->name, bch->nr, count);
486 }
487 hdlc->ctrl.sr.xml = ((count == fs) ? 0 : count);
488 if (fc->type == AVM_FRITZ_PCIV2) {
468 __write_ctrl_pciv2(fc, hdlc, bch->nr); 489 __write_ctrl_pciv2(fc, hdlc, bch->nr);
469 addr = fc->addr + (bch->nr == 2 ? 490 addr = fc->addr + (bch->nr == 2 ?
470 AVM_HDLC_FIFO_2 : AVM_HDLC_FIFO_1); 491 AVM_HDLC_FIFO_2 : AVM_HDLC_FIFO_1);
@@ -472,13 +493,21 @@ hdlc_fill_fifo(struct bchannel *bch)
472 __write_ctrl_pci(fc, hdlc, bch->nr); 493 __write_ctrl_pci(fc, hdlc, bch->nr);
473 addr = fc->addr + CHIP_WINDOW; 494 addr = fc->addr + CHIP_WINDOW;
474 } 495 }
475 while (cnt < count) { 496 if (fillempty) {
476 val = get_unaligned(ptr); 497 while (cnt < count) {
477 outl(cpu_to_le32(val), addr); 498 /* all bytes the same - no worry about endian */
478 ptr++; 499 outl(*ptr, addr);
479 cnt += 4; 500 cnt += 4;
501 }
502 } else {
503 while (cnt < count) {
504 val = get_unaligned(ptr);
505 outl(cpu_to_le32(val), addr);
506 ptr++;
507 cnt += 4;
508 }
480 } 509 }
481 if (debug & DEBUG_HW_BFIFO) { 510 if ((debug & DEBUG_HW_BFIFO) && !fillempty) {
482 snprintf(fc->log, LOG_SIZE, "B%1d-send %s %d ", 511 snprintf(fc->log, LOG_SIZE, "B%1d-send %s %d ",
483 bch->nr, fc->name, count); 512 bch->nr, fc->name, count);
484 print_hex_dump_bytes(fc->log, DUMP_PREFIX_OFFSET, p, count); 513 print_hex_dump_bytes(fc->log, DUMP_PREFIX_OFFSET, p, count);
@@ -488,17 +517,17 @@ hdlc_fill_fifo(struct bchannel *bch)
488static void 517static void
489HDLC_irq_xpr(struct bchannel *bch) 518HDLC_irq_xpr(struct bchannel *bch)
490{ 519{
491 if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len) 520 if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len) {
492 hdlc_fill_fifo(bch); 521 hdlc_fill_fifo(bch);
493 else { 522 } else {
494 if (bch->tx_skb) { 523 if (bch->tx_skb)
495 /* send confirm, on trans, free on hdlc. */
496 if (test_bit(FLG_TRANSPARENT, &bch->Flags))
497 confirm_Bsend(bch);
498 dev_kfree_skb(bch->tx_skb); 524 dev_kfree_skb(bch->tx_skb);
499 } 525 if (get_next_bframe(bch)) {
500 if (get_next_bframe(bch))
501 hdlc_fill_fifo(bch); 526 hdlc_fill_fifo(bch);
527 test_and_clear_bit(FLG_TX_EMPTY, &bch->Flags);
528 } else if (test_bit(FLG_TX_EMPTY, &bch->Flags)) {
529 hdlc_fill_fifo(bch);
530 }
502 } 531 }
503} 532}
504 533
@@ -506,13 +535,23 @@ static void
506HDLC_irq(struct bchannel *bch, u32 stat) 535HDLC_irq(struct bchannel *bch, u32 stat)
507{ 536{
508 struct fritzcard *fc = bch->hw; 537 struct fritzcard *fc = bch->hw;
509 int len; 538 int len, fs;
539 u32 rmlMask;
510 struct hdlc_hw *hdlc; 540 struct hdlc_hw *hdlc;
511 541
512 hdlc = &fc->hdlc[(bch->nr - 1) & 1]; 542 hdlc = &fc->hdlc[(bch->nr - 1) & 1];
513 pr_debug("%s: ch%d stat %#x\n", fc->name, bch->nr, stat); 543 pr_debug("%s: ch%d stat %#x\n", fc->name, bch->nr, stat);
544 if (fc->type == AVM_FRITZ_PCIV2) {
545 rmlMask = HDLC_STAT_RML_MASK_V2;
546 fs = HDLC_FIFO_SIZE_V2;
547 } else {
548 rmlMask = HDLC_STAT_RML_MASK_V1;
549 fs = HDLC_FIFO_SIZE_V1;
550 }
514 if (stat & HDLC_INT_RPR) { 551 if (stat & HDLC_INT_RPR) {
515 if (stat & HDLC_STAT_RDO) { 552 if (stat & HDLC_STAT_RDO) {
553 pr_warning("%s: ch%d stat %x RDO\n",
554 fc->name, bch->nr, stat);
516 hdlc->ctrl.sr.xml = 0; 555 hdlc->ctrl.sr.xml = 0;
517 hdlc->ctrl.sr.cmd |= HDLC_CMD_RRS; 556 hdlc->ctrl.sr.cmd |= HDLC_CMD_RRS;
518 write_ctrl(bch, 1); 557 write_ctrl(bch, 1);
@@ -521,21 +560,21 @@ HDLC_irq(struct bchannel *bch, u32 stat)
521 if (bch->rx_skb) 560 if (bch->rx_skb)
522 skb_trim(bch->rx_skb, 0); 561 skb_trim(bch->rx_skb, 0);
523 } else { 562 } else {
524 len = (stat & HDLC_STAT_RML_MASK) >> 8; 563 len = (stat & rmlMask) >> 8;
525 if (!len) 564 if (!len)
526 len = 32; 565 len = fs;
527 hdlc_empty_fifo(bch, len); 566 hdlc_empty_fifo(bch, len);
528 if (!bch->rx_skb) 567 if (!bch->rx_skb)
529 goto handle_tx; 568 goto handle_tx;
530 if ((stat & HDLC_STAT_RME) || test_bit(FLG_TRANSPARENT, 569 if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
531 &bch->Flags)) { 570 recv_Bchannel(bch, 0, false);
532 if (((stat & HDLC_STAT_CRCVFRRAB) == 571 } else if (stat & HDLC_STAT_RME) {
533 HDLC_STAT_CRCVFR) || 572 if ((stat & HDLC_STAT_CRCVFRRAB) ==
534 test_bit(FLG_TRANSPARENT, &bch->Flags)) { 573 HDLC_STAT_CRCVFR) {
535 recv_Bchannel(bch, 0); 574 recv_Bchannel(bch, 0, false);
536 } else { 575 } else {
537 pr_debug("%s: got invalid frame\n", 576 pr_warning("%s: got invalid frame\n",
538 fc->name); 577 fc->name);
539 skb_trim(bch->rx_skb, 0); 578 skb_trim(bch->rx_skb, 0);
540 } 579 }
541 } 580 }
@@ -547,16 +586,13 @@ handle_tx:
547 * restart transmitting the whole frame on HDLC 586 * restart transmitting the whole frame on HDLC
548 * in transparent mode we send the next data 587 * in transparent mode we send the next data
549 */ 588 */
550 if (bch->tx_skb) 589 pr_warning("%s: ch%d stat %x XDU %s\n", fc->name, bch->nr,
551 pr_debug("%s: ch%d XDU len(%d) idx(%d) Flags(%lx)\n", 590 stat, bch->tx_skb ? "tx_skb" : "no tx_skb");
552 fc->name, bch->nr, bch->tx_skb->len,
553 bch->tx_idx, bch->Flags);
554 else
555 pr_debug("%s: ch%d XDU no tx_skb Flags(%lx)\n",
556 fc->name, bch->nr, bch->Flags);
557 if (bch->tx_skb && bch->tx_skb->len) { 591 if (bch->tx_skb && bch->tx_skb->len) {
558 if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) 592 if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
559 bch->tx_idx = 0; 593 bch->tx_idx = 0;
594 } else if (test_bit(FLG_FILLEMPTY, &bch->Flags)) {
595 test_and_set_bit(FLG_TX_EMPTY, &bch->Flags);
560 } 596 }
561 hdlc->ctrl.sr.xml = 0; 597 hdlc->ctrl.sr.xml = 0;
562 hdlc->ctrl.sr.cmd |= HDLC_CMD_XRS; 598 hdlc->ctrl.sr.cmd |= HDLC_CMD_XRS;
@@ -659,22 +695,17 @@ avm_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
659 struct fritzcard *fc = bch->hw; 695 struct fritzcard *fc = bch->hw;
660 int ret = -EINVAL; 696 int ret = -EINVAL;
661 struct mISDNhead *hh = mISDN_HEAD_P(skb); 697 struct mISDNhead *hh = mISDN_HEAD_P(skb);
662 u32 id; 698 unsigned long flags;
663 u_long flags;
664 699
665 switch (hh->prim) { 700 switch (hh->prim) {
666 case PH_DATA_REQ: 701 case PH_DATA_REQ:
667 spin_lock_irqsave(&fc->lock, flags); 702 spin_lock_irqsave(&fc->lock, flags);
668 ret = bchannel_senddata(bch, skb); 703 ret = bchannel_senddata(bch, skb);
669 if (ret > 0) { /* direct TX */ 704 if (ret > 0) { /* direct TX */
670 id = hh->id; /* skb can be freed */
671 hdlc_fill_fifo(bch); 705 hdlc_fill_fifo(bch);
672 ret = 0; 706 ret = 0;
673 spin_unlock_irqrestore(&fc->lock, flags); 707 }
674 if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) 708 spin_unlock_irqrestore(&fc->lock, flags);
675 queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
676 } else
677 spin_unlock_irqrestore(&fc->lock, flags);
678 return ret; 709 return ret;
679 case PH_ACTIVATE_REQ: 710 case PH_ACTIVATE_REQ:
680 spin_lock_irqsave(&fc->lock, flags); 711 spin_lock_irqsave(&fc->lock, flags);
@@ -783,7 +814,7 @@ init_card(struct fritzcard *fc)
783 inithdlc(fc); 814 inithdlc(fc);
784 enable_hwirq(fc); 815 enable_hwirq(fc);
785 /* RESET Receiver and Transmitter */ 816 /* RESET Receiver and Transmitter */
786 if (AVM_FRITZ_PCIV2 == fc->type) { 817 if (fc->type == AVM_FRITZ_PCIV2) {
787 WriteISAC_V2(fc, ISACX_MASK, 0); 818 WriteISAC_V2(fc, ISACX_MASK, 0);
788 WriteISAC_V2(fc, ISACX_CMDRD, 0x41); 819 WriteISAC_V2(fc, ISACX_CMDRD, 0x41);
789 } else { 820 } else {
@@ -810,21 +841,7 @@ init_card(struct fritzcard *fc)
810static int 841static int
811channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) 842channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
812{ 843{
813 int ret = 0; 844 return mISDN_ctrl_bchannel(bch, cq);
814 struct fritzcard *fc = bch->hw;
815
816 switch (cq->op) {
817 case MISDN_CTRL_GETOP:
818 cq->op = 0;
819 break;
820 /* Nothing implemented yet */
821 case MISDN_CTRL_FILL_EMPTY:
822 default:
823 pr_info("%s: %s unknown Op %x\n", fc->name, __func__, cq->op);
824 ret = -EINVAL;
825 break;
826 }
827 return ret;
828} 845}
829 846
830static int 847static int
@@ -839,14 +856,10 @@ avm_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
839 switch (cmd) { 856 switch (cmd) {
840 case CLOSE_CHANNEL: 857 case CLOSE_CHANNEL:
841 test_and_clear_bit(FLG_OPEN, &bch->Flags); 858 test_and_clear_bit(FLG_OPEN, &bch->Flags);
842 if (test_bit(FLG_ACTIVE, &bch->Flags)) { 859 spin_lock_irqsave(&fc->lock, flags);
843 spin_lock_irqsave(&fc->lock, flags); 860 mISDN_freebchannel(bch);
844 mISDN_freebchannel(bch); 861 modehdlc(bch, ISDN_P_NONE);
845 test_and_clear_bit(FLG_TX_BUSY, &bch->Flags); 862 spin_unlock_irqrestore(&fc->lock, flags);
846 test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
847 modehdlc(bch, ISDN_P_NONE);
848 spin_unlock_irqrestore(&fc->lock, flags);
849 }
850 ch->protocol = ISDN_P_NONE; 863 ch->protocol = ISDN_P_NONE;
851 ch->peer = NULL; 864 ch->peer = NULL;
852 module_put(THIS_MODULE); 865 module_put(THIS_MODULE);
@@ -868,7 +881,7 @@ channel_ctrl(struct fritzcard *fc, struct mISDN_ctrl_req *cq)
868 881
869 switch (cq->op) { 882 switch (cq->op) {
870 case MISDN_CTRL_GETOP: 883 case MISDN_CTRL_GETOP:
871 cq->op = MISDN_CTRL_LOOP; 884 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
872 break; 885 break;
873 case MISDN_CTRL_LOOP: 886 case MISDN_CTRL_LOOP:
874 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */ 887 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
@@ -878,6 +891,9 @@ channel_ctrl(struct fritzcard *fc, struct mISDN_ctrl_req *cq)
878 } 891 }
879 ret = fc->isac.ctrl(&fc->isac, HW_TESTLOOP, cq->channel); 892 ret = fc->isac.ctrl(&fc->isac, HW_TESTLOOP, cq->channel);
880 break; 893 break;
894 case MISDN_CTRL_L1_TIMER3:
895 ret = fc->isac.ctrl(&fc->isac, HW_TIMER3_VALUE, cq->p1);
896 break;
881 default: 897 default:
882 pr_info("%s: %s unknown Op %x\n", fc->name, __func__, cq->op); 898 pr_info("%s: %s unknown Op %x\n", fc->name, __func__, cq->op);
883 ret = -EINVAL; 899 ret = -EINVAL;
@@ -898,7 +914,6 @@ open_bchannel(struct fritzcard *fc, struct channel_req *rq)
898 bch = &fc->bch[rq->adr.channel - 1]; 914 bch = &fc->bch[rq->adr.channel - 1];
899 if (test_and_set_bit(FLG_OPEN, &bch->Flags)) 915 if (test_and_set_bit(FLG_OPEN, &bch->Flags))
900 return -EBUSY; /* b-channel can be only open once */ 916 return -EBUSY; /* b-channel can be only open once */
901 test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
902 bch->ch.protocol = rq->protocol; 917 bch->ch.protocol = rq->protocol;
903 rq->ch = &bch->ch; 918 rq->ch = &bch->ch;
904 return 0; 919 return 0;
@@ -1021,6 +1036,7 @@ static int __devinit
1021setup_instance(struct fritzcard *card) 1036setup_instance(struct fritzcard *card)
1022{ 1037{
1023 int i, err; 1038 int i, err;
1039 unsigned short minsize;
1024 u_long flags; 1040 u_long flags;
1025 1041
1026 snprintf(card->name, MISDN_MAX_IDLEN - 1, "AVM.%d", AVM_cnt + 1); 1042 snprintf(card->name, MISDN_MAX_IDLEN - 1, "AVM.%d", AVM_cnt + 1);
@@ -1040,7 +1056,11 @@ setup_instance(struct fritzcard *card)
1040 for (i = 0; i < 2; i++) { 1056 for (i = 0; i < 2; i++) {
1041 card->bch[i].nr = i + 1; 1057 card->bch[i].nr = i + 1;
1042 set_channelmap(i + 1, card->isac.dch.dev.channelmap); 1058 set_channelmap(i + 1, card->isac.dch.dev.channelmap);
1043 mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM); 1059 if (AVM_FRITZ_PCIV2 == card->type)
1060 minsize = HDLC_FIFO_SIZE_V2;
1061 else
1062 minsize = HDLC_FIFO_SIZE_V1;
1063 mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM, minsize);
1044 card->bch[i].hw = card; 1064 card->bch[i].hw = card;
1045 card->bch[i].ch.send = avm_l2l1B; 1065 card->bch[i].ch.send = avm_l2l1B;
1046 card->bch[i].ch.ctrl = avm_bctrl; 1066 card->bch[i].ch.ctrl = avm_bctrl;
diff --git a/drivers/isdn/hardware/mISDN/hfc_multi.h b/drivers/isdn/hardware/mISDN/hfc_multi.h
index b0588acbb47d..c601f880141e 100644
--- a/drivers/isdn/hardware/mISDN/hfc_multi.h
+++ b/drivers/isdn/hardware/mISDN/hfc_multi.h
@@ -205,18 +205,22 @@ struct hfc_multi {
205 205
206 u_int slots; /* number of PCM slots */ 206 u_int slots; /* number of PCM slots */
207 u_int leds; /* type of leds */ 207 u_int leds; /* type of leds */
208 u_int ledcount; /* used to animate leds */
209 u_long ledstate; /* save last state of leds */ 208 u_long ledstate; /* save last state of leds */
210 int opticalsupport; /* has the e1 board */ 209 int opticalsupport; /* has the e1 board */
211 /* an optical Interface */ 210 /* an optical Interface */
212 int dslot; /* channel # of d-channel (E1) default 16 */ 211
212 u_int bmask[32]; /* bitmask of bchannels for port */
213 u_char dnum[32]; /* array of used dchannel numbers for port */
214 u_char created[32]; /* what port is created */
215 u_int activity_tx; /* if there is data TX / RX */
216 u_int activity_rx; /* bitmask according to port number */
217 /* (will be cleared after */
218 /* showing led-states) */
219 u_int flash[8]; /* counter for flashing 8 leds on activity */
213 220
214 u_long wdcount; /* every 500 ms we need to */ 221 u_long wdcount; /* every 500 ms we need to */
215 /* send the watchdog a signal */ 222 /* send the watchdog a signal */
216 u_char wdbyte; /* watchdog toggle byte */ 223 u_char wdbyte; /* watchdog toggle byte */
217 u_int activity[8]; /* if there is any action on this */
218 /* port (will be cleared after */
219 /* showing led-states) */
220 int e1_state; /* keep track of last state */ 224 int e1_state; /* keep track of last state */
221 int e1_getclock; /* if sync is retrieved from interface */ 225 int e1_getclock; /* if sync is retrieved from interface */
222 int syncronized; /* keep track of existing sync interface */ 226 int syncronized; /* keep track of existing sync interface */
@@ -233,7 +237,6 @@ struct hfc_multi {
233 * the bch->channel is equvalent to the hfc-channel 237 * the bch->channel is equvalent to the hfc-channel
234 */ 238 */
235 struct hfc_chan chan[32]; 239 struct hfc_chan chan[32];
236 u_char created[8]; /* what port is created */
237 signed char slot_owner[256]; /* owner channel of slot */ 240 signed char slot_owner[256]; /* owner channel of slot */
238}; 241};
239 242
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 033223180b55..5e402cf2e795 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -103,14 +103,26 @@
103 * Omit this value, if all cards are interconnected or none is connected. 103 * Omit this value, if all cards are interconnected or none is connected.
104 * If unsure, don't give this parameter. 104 * If unsure, don't give this parameter.
105 * 105 *
106 * dslot: 106 * dmask and bmask:
107 * NOTE: only one dslot value must be given for every card. 107 * NOTE: One dmask value must be given for every HFC-E1 card.
108 * Also this value must be given for non-E1 cards. If omitted, the E1 108 * If omitted, the E1 card has D-channel on time slot 16, which is default.
109 * card has D-channel on time slot 16, which is default. 109 * dmask is a 32 bit mask. The bit must be set for an alternate time slot.
110 * If 1..15 or 17..31, an alternate time slot is used for D-channel. 110 * If multiple bits are set, multiple virtual card fragments are created.
111 * In this case, the application must be able to handle this. 111 * For each bit set, a bmask value must be given. Each bit on the bmask
112 * If -1 is given, the D-channel is disabled and all 31 slots can be used 112 * value stands for a B-channel. The bmask may not overlap with dmask or
113 * for B-channel. (only for specific applications) 113 * with other bmask values for that card.
114 * Example: dmask=0x00020002 bmask=0x0000fffc,0xfffc0000
115 * This will create one fragment with D-channel on slot 1 with
116 * B-channels on slots 2..15, and a second fragment with D-channel
117 * on slot 17 with B-channels on slot 18..31. Slot 16 is unused.
118 * If bit 0 is set (dmask=0x00000001) the D-channel is on slot 0 and will
119 * not function.
120 * Example: dmask=0x00000001 bmask=0xfffffffe
121 * This will create a port with all 31 usable timeslots as
122 * B-channels.
123 * If no bits are set on bmask, no B-channel is created for that fragment.
124 * Example: dmask=0xfffffffe bmask=0,0,0,0.... (31 0-values for bmask)
125 * This will create 31 ports with one D-channel only.
114 * If you don't know how to use it, you don't need it! 126 * If you don't know how to use it, you don't need it!
115 * 127 *
116 * iomode: 128 * iomode:
@@ -172,6 +184,7 @@
172 184
173#define MAX_CARDS 8 185#define MAX_CARDS 8
174#define MAX_PORTS (8 * MAX_CARDS) 186#define MAX_PORTS (8 * MAX_CARDS)
187#define MAX_FRAGS (32 * MAX_CARDS)
175 188
176static LIST_HEAD(HFClist); 189static LIST_HEAD(HFClist);
177static spinlock_t HFClock; /* global hfc list lock */ 190static spinlock_t HFClock; /* global hfc list lock */
@@ -203,7 +216,8 @@ static int nt_t1_count[] = { 3840, 1920, 960, 480, 240, 120, 60, 30 };
203 216
204static uint type[MAX_CARDS]; 217static uint type[MAX_CARDS];
205static int pcm[MAX_CARDS]; 218static int pcm[MAX_CARDS];
206static int dslot[MAX_CARDS]; 219static uint dmask[MAX_CARDS];
220static uint bmask[MAX_FRAGS];
207static uint iomode[MAX_CARDS]; 221static uint iomode[MAX_CARDS];
208static uint port[MAX_PORTS]; 222static uint port[MAX_PORTS];
209static uint debug; 223static uint debug;
@@ -218,7 +232,7 @@ static uint clockdelay_nt = CLKDEL_NT;
218#define HWID_MINIP16 3 232#define HWID_MINIP16 3
219static uint hwid = HWID_NONE; 233static uint hwid = HWID_NONE;
220 234
221static int HFC_cnt, Port_cnt, PCM_cnt = 99; 235static int HFC_cnt, E1_cnt, bmask_cnt, Port_cnt, PCM_cnt = 99;
222 236
223MODULE_AUTHOR("Andreas Eversberg"); 237MODULE_AUTHOR("Andreas Eversberg");
224MODULE_LICENSE("GPL"); 238MODULE_LICENSE("GPL");
@@ -231,7 +245,8 @@ module_param(clockdelay_te, uint, S_IRUGO | S_IWUSR);
231module_param(clockdelay_nt, uint, S_IRUGO | S_IWUSR); 245module_param(clockdelay_nt, uint, S_IRUGO | S_IWUSR);
232module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR); 246module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR);
233module_param_array(pcm, int, NULL, S_IRUGO | S_IWUSR); 247module_param_array(pcm, int, NULL, S_IRUGO | S_IWUSR);
234module_param_array(dslot, int, NULL, S_IRUGO | S_IWUSR); 248module_param_array(dmask, uint, NULL, S_IRUGO | S_IWUSR);
249module_param_array(bmask, uint, NULL, S_IRUGO | S_IWUSR);
235module_param_array(iomode, uint, NULL, S_IRUGO | S_IWUSR); 250module_param_array(iomode, uint, NULL, S_IRUGO | S_IWUSR);
236module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR); 251module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR);
237module_param(hwid, uint, S_IRUGO | S_IWUSR); /* The hardware ID */ 252module_param(hwid, uint, S_IRUGO | S_IWUSR); /* The hardware ID */
@@ -1156,7 +1171,7 @@ init_chip(struct hfc_multi *hc)
1156 hc->DTMFbase = 0x1000; 1171 hc->DTMFbase = 0x1000;
1157 if (test_bit(HFC_CHIP_EXRAM_128, &hc->chip)) { 1172 if (test_bit(HFC_CHIP_EXRAM_128, &hc->chip)) {
1158 if (debug & DEBUG_HFCMULTI_INIT) 1173 if (debug & DEBUG_HFCMULTI_INIT)
1159 printk(KERN_DEBUG "%s: changing to 128K extenal RAM\n", 1174 printk(KERN_DEBUG "%s: changing to 128K external RAM\n",
1160 __func__); 1175 __func__);
1161 hc->hw.r_ctrl |= V_EXT_RAM; 1176 hc->hw.r_ctrl |= V_EXT_RAM;
1162 hc->hw.r_ram_sz = 1; 1177 hc->hw.r_ram_sz = 1;
@@ -1167,7 +1182,7 @@ init_chip(struct hfc_multi *hc)
1167 } 1182 }
1168 if (test_bit(HFC_CHIP_EXRAM_512, &hc->chip)) { 1183 if (test_bit(HFC_CHIP_EXRAM_512, &hc->chip)) {
1169 if (debug & DEBUG_HFCMULTI_INIT) 1184 if (debug & DEBUG_HFCMULTI_INIT)
1170 printk(KERN_DEBUG "%s: changing to 512K extenal RAM\n", 1185 printk(KERN_DEBUG "%s: changing to 512K external RAM\n",
1171 __func__); 1186 __func__);
1172 hc->hw.r_ctrl |= V_EXT_RAM; 1187 hc->hw.r_ctrl |= V_EXT_RAM;
1173 hc->hw.r_ram_sz = 2; 1188 hc->hw.r_ram_sz = 2;
@@ -1607,40 +1622,46 @@ hfcmulti_leds(struct hfc_multi *hc)
1607 struct dchannel *dch; 1622 struct dchannel *dch;
1608 int led[4]; 1623 int led[4];
1609 1624
1610 hc->ledcount += poll;
1611 if (hc->ledcount > 4096) {
1612 hc->ledcount -= 4096;
1613 hc->ledstate = 0xAFFEAFFE;
1614 }
1615
1616 switch (hc->leds) { 1625 switch (hc->leds) {
1617 case 1: /* HFC-E1 OEM */ 1626 case 1: /* HFC-E1 OEM */
1618 /* 2 red blinking: NT mode deactivate 1627 /* 2 red steady: LOS
1619 * 2 red steady: TE mode deactivate 1628 * 1 red steady: L1 not active
1620 * left green: L1 active 1629 * 2 green steady: L1 active
1621 * left red: frame sync, but no L1 1630 * 1st green flashing: activity on TX
1622 * right green: L2 active 1631 * 2nd green flashing: activity on RX
1623 */ 1632 */
1624 if (hc->chan[hc->dslot].sync != 2) { /* no frame sync */ 1633 led[0] = 0;
1625 if (hc->chan[hc->dslot].dch->dev.D.protocol 1634 led[1] = 0;
1626 != ISDN_P_NT_E1) { 1635 led[2] = 0;
1627 led[0] = 1; 1636 led[3] = 0;
1637 dch = hc->chan[hc->dnum[0]].dch;
1638 if (dch) {
1639 if (hc->chan[hc->dnum[0]].los)
1628 led[1] = 1; 1640 led[1] = 1;
1629 } else if (hc->ledcount >> 11) { 1641 if (hc->e1_state != 1) {
1630 led[0] = 1; 1642 led[0] = 1;
1631 led[1] = 1; 1643 hc->flash[2] = 0;
1644 hc->flash[3] = 0;
1632 } else { 1645 } else {
1633 led[0] = 0; 1646 led[2] = 1;
1634 led[1] = 0; 1647 led[3] = 1;
1648 if (!hc->flash[2] && hc->activity_tx)
1649 hc->flash[2] = poll;
1650 if (!hc->flash[3] && hc->activity_rx)
1651 hc->flash[3] = poll;
1652 if (hc->flash[2] && hc->flash[2] < 1024)
1653 led[2] = 0;
1654 if (hc->flash[3] && hc->flash[3] < 1024)
1655 led[3] = 0;
1656 if (hc->flash[2] >= 2048)
1657 hc->flash[2] = 0;
1658 if (hc->flash[3] >= 2048)
1659 hc->flash[3] = 0;
1660 if (hc->flash[2])
1661 hc->flash[2] += poll;
1662 if (hc->flash[3])
1663 hc->flash[3] += poll;
1635 } 1664 }
1636 led[2] = 0;
1637 led[3] = 0;
1638 } else { /* with frame sync */
1639 /* TODO make it work */
1640 led[0] = 0;
1641 led[1] = 0;
1642 led[2] = 0;
1643 led[3] = 1;
1644 } 1665 }
1645 leds = (led[0] | (led[1]<<2) | (led[2]<<1) | (led[3]<<3))^0xF; 1666 leds = (led[0] | (led[1]<<2) | (led[2]<<1) | (led[3]<<3))^0xF;
1646 /* leds are inverted */ 1667 /* leds are inverted */
@@ -1651,9 +1672,9 @@ hfcmulti_leds(struct hfc_multi *hc)
1651 break; 1672 break;
1652 1673
1653 case 2: /* HFC-4S OEM */ 1674 case 2: /* HFC-4S OEM */
1654 /* red blinking = PH_DEACTIVATE NT Mode 1675 /* red steady: PH_DEACTIVATE
1655 * red steady = PH_DEACTIVATE TE Mode 1676 * green steady: PH_ACTIVATE
1656 * green steady = PH_ACTIVATE 1677 * green flashing: activity on TX
1657 */ 1678 */
1658 for (i = 0; i < 4; i++) { 1679 for (i = 0; i < 4; i++) {
1659 state = 0; 1680 state = 0;
@@ -1669,17 +1690,20 @@ hfcmulti_leds(struct hfc_multi *hc)
1669 if (state) { 1690 if (state) {
1670 if (state == active) { 1691 if (state == active) {
1671 led[i] = 1; /* led green */ 1692 led[i] = 1; /* led green */
1672 } else 1693 hc->activity_tx |= hc->activity_rx;
1673 if (dch->dev.D.protocol == ISDN_P_TE_S0) 1694 if (!hc->flash[i] &&
1674 /* TE mode: led red */ 1695 (hc->activity_tx & (1 << i)))
1675 led[i] = 2; 1696 hc->flash[i] = poll;
1676 else 1697 if (hc->flash[i] && hc->flash[i] < 1024)
1677 if (hc->ledcount >> 11) 1698 led[i] = 0; /* led off */
1678 /* led red */ 1699 if (hc->flash[i] >= 2048)
1679 led[i] = 2; 1700 hc->flash[i] = 0;
1680 else 1701 if (hc->flash[i])
1681 /* led off */ 1702 hc->flash[i] += poll;
1682 led[i] = 0; 1703 } else {
1704 led[i] = 2; /* led red */
1705 hc->flash[i] = 0;
1706 }
1683 } else 1707 } else
1684 led[i] = 0; /* led off */ 1708 led[i] = 0; /* led off */
1685 } 1709 }
@@ -1712,9 +1736,9 @@ hfcmulti_leds(struct hfc_multi *hc)
1712 break; 1736 break;
1713 1737
1714 case 3: /* HFC 1S/2S Beronet */ 1738 case 3: /* HFC 1S/2S Beronet */
1715 /* red blinking = PH_DEACTIVATE NT Mode 1739 /* red steady: PH_DEACTIVATE
1716 * red steady = PH_DEACTIVATE TE Mode 1740 * green steady: PH_ACTIVATE
1717 * green steady = PH_ACTIVATE 1741 * green flashing: activity on TX
1718 */ 1742 */
1719 for (i = 0; i < 2; i++) { 1743 for (i = 0; i < 2; i++) {
1720 state = 0; 1744 state = 0;
@@ -1730,22 +1754,23 @@ hfcmulti_leds(struct hfc_multi *hc)
1730 if (state) { 1754 if (state) {
1731 if (state == active) { 1755 if (state == active) {
1732 led[i] = 1; /* led green */ 1756 led[i] = 1; /* led green */
1733 } else 1757 hc->activity_tx |= hc->activity_rx;
1734 if (dch->dev.D.protocol == ISDN_P_TE_S0) 1758 if (!hc->flash[i] &&
1735 /* TE mode: led red */ 1759 (hc->activity_tx & (1 << i)))
1736 led[i] = 2; 1760 hc->flash[i] = poll;
1737 else 1761 if (hc->flash[i] < 1024)
1738 if (hc->ledcount >> 11) 1762 led[i] = 0; /* led off */
1739 /* led red */ 1763 if (hc->flash[i] >= 2048)
1740 led[i] = 2; 1764 hc->flash[i] = 0;
1741 else 1765 if (hc->flash[i])
1742 /* led off */ 1766 hc->flash[i] += poll;
1743 led[i] = 0; 1767 } else {
1768 led[i] = 2; /* led red */
1769 hc->flash[i] = 0;
1770 }
1744 } else 1771 } else
1745 led[i] = 0; /* led off */ 1772 led[i] = 0; /* led off */
1746 } 1773 }
1747
1748
1749 leds = (led[0] > 0) | ((led[1] > 0) << 1) | ((led[0]&1) << 2) 1774 leds = (led[0] > 0) | ((led[1] > 0) << 1) | ((led[0]&1) << 2)
1750 | ((led[1]&1) << 3); 1775 | ((led[1]&1) << 3);
1751 if (leds != (int)hc->ledstate) { 1776 if (leds != (int)hc->ledstate) {
@@ -1757,8 +1782,11 @@ hfcmulti_leds(struct hfc_multi *hc)
1757 } 1782 }
1758 break; 1783 break;
1759 case 8: /* HFC 8S+ Beronet */ 1784 case 8: /* HFC 8S+ Beronet */
1760 lled = 0; 1785 /* off: PH_DEACTIVATE
1761 1786 * steady: PH_ACTIVATE
1787 * flashing: activity on TX
1788 */
1789 lled = 0xff; /* leds off */
1762 for (i = 0; i < 8; i++) { 1790 for (i = 0; i < 8; i++) {
1763 state = 0; 1791 state = 0;
1764 active = -1; 1792 active = -1;
@@ -1772,14 +1800,20 @@ hfcmulti_leds(struct hfc_multi *hc)
1772 } 1800 }
1773 if (state) { 1801 if (state) {
1774 if (state == active) { 1802 if (state == active) {
1775 lled |= 0 << i; 1803 lled &= ~(1 << i); /* led on */
1804 hc->activity_tx |= hc->activity_rx;
1805 if (!hc->flash[i] &&
1806 (hc->activity_tx & (1 << i)))
1807 hc->flash[i] = poll;
1808 if (hc->flash[i] < 1024)
1809 lled |= 1 << i; /* led off */
1810 if (hc->flash[i] >= 2048)
1811 hc->flash[i] = 0;
1812 if (hc->flash[i])
1813 hc->flash[i] += poll;
1776 } else 1814 } else
1777 if (hc->ledcount >> 11) 1815 hc->flash[i] = 0;
1778 lled |= 0 << i; 1816 }
1779 else
1780 lled |= 1 << i;
1781 } else
1782 lled |= 1 << i;
1783 } 1817 }
1784 leddw = lled << 24 | lled << 16 | lled << 8 | lled; 1818 leddw = lled << 24 | lled << 16 | lled << 8 | lled;
1785 if (leddw != hc->ledstate) { 1819 if (leddw != hc->ledstate) {
@@ -1794,6 +1828,8 @@ hfcmulti_leds(struct hfc_multi *hc)
1794 } 1828 }
1795 break; 1829 break;
1796 } 1830 }
1831 hc->activity_tx = 0;
1832 hc->activity_rx = 0;
1797} 1833}
1798/* 1834/*
1799 * read dtmf coefficients 1835 * read dtmf coefficients
@@ -2093,7 +2129,8 @@ next_frame:
2093 *txpending = 1; 2129 *txpending = 1;
2094 2130
2095 /* show activity */ 2131 /* show activity */
2096 hc->activity[hc->chan[ch].port] = 1; 2132 if (dch)
2133 hc->activity_tx |= 1 << hc->chan[ch].port;
2097 2134
2098 /* fill fifo to what we have left */ 2135 /* fill fifo to what we have left */
2099 ii = len; 2136 ii = len;
@@ -2129,13 +2166,9 @@ next_frame:
2129 HFC_wait_nodebug(hc); 2166 HFC_wait_nodebug(hc);
2130 } 2167 }
2131 2168
2132 /* send confirm, since get_net_bframe will not do it with trans */
2133 if (bch && test_bit(FLG_TRANSPARENT, &bch->Flags))
2134 confirm_Bsend(bch);
2135
2136 /* check for next frame */
2137 dev_kfree_skb(*sp); 2169 dev_kfree_skb(*sp);
2138 if (bch && get_next_bframe(bch)) { /* hdlc is confirmed here */ 2170 /* check for next frame */
2171 if (bch && get_next_bframe(bch)) {
2139 len = (*sp)->len; 2172 len = (*sp)->len;
2140 goto next_frame; 2173 goto next_frame;
2141 } 2174 }
@@ -2163,24 +2196,20 @@ hfcmulti_rx(struct hfc_multi *hc, int ch)
2163 int f1 = 0, f2 = 0; /* = 0, to make GCC happy */ 2196 int f1 = 0, f2 = 0; /* = 0, to make GCC happy */
2164 int again = 0; 2197 int again = 0;
2165 struct bchannel *bch; 2198 struct bchannel *bch;
2166 struct dchannel *dch; 2199 struct dchannel *dch = NULL;
2167 struct sk_buff *skb, **sp = NULL; 2200 struct sk_buff *skb, **sp = NULL;
2168 int maxlen; 2201 int maxlen;
2169 2202
2170 bch = hc->chan[ch].bch; 2203 bch = hc->chan[ch].bch;
2171 dch = hc->chan[ch].dch; 2204 if (bch) {
2172 if ((!dch) && (!bch)) 2205 if (!test_bit(FLG_ACTIVE, &bch->Flags))
2173 return; 2206 return;
2174 if (dch) { 2207 } else if (hc->chan[ch].dch) {
2208 dch = hc->chan[ch].dch;
2175 if (!test_bit(FLG_ACTIVE, &dch->Flags)) 2209 if (!test_bit(FLG_ACTIVE, &dch->Flags))
2176 return; 2210 return;
2177 sp = &dch->rx_skb;
2178 maxlen = dch->maxlen;
2179 } else { 2211 } else {
2180 if (!test_bit(FLG_ACTIVE, &bch->Flags)) 2212 return;
2181 return;
2182 sp = &bch->rx_skb;
2183 maxlen = bch->maxlen;
2184 } 2213 }
2185next_frame: 2214next_frame:
2186 /* on first AND before getting next valid frame, R_FIFO must be written 2215 /* on first AND before getting next valid frame, R_FIFO must be written
@@ -2195,8 +2224,11 @@ next_frame:
2195 HFC_wait_nodebug(hc); 2224 HFC_wait_nodebug(hc);
2196 2225
2197 /* ignore if rx is off BUT change fifo (above) to start pending TX */ 2226 /* ignore if rx is off BUT change fifo (above) to start pending TX */
2198 if (hc->chan[ch].rx_off) 2227 if (hc->chan[ch].rx_off) {
2228 if (bch)
2229 bch->dropcnt += poll; /* not exact but fair enough */
2199 return; 2230 return;
2231 }
2200 2232
2201 if (dch || test_bit(FLG_HDLC, &bch->Flags)) { 2233 if (dch || test_bit(FLG_HDLC, &bch->Flags)) {
2202 f1 = HFC_inb_nodebug(hc, A_F1); 2234 f1 = HFC_inb_nodebug(hc, A_F1);
@@ -2227,16 +2259,30 @@ next_frame:
2227 if (Zsize <= 0) 2259 if (Zsize <= 0)
2228 return; 2260 return;
2229 2261
2230 if (*sp == NULL) { 2262 if (bch) {
2231 *sp = mI_alloc_skb(maxlen + 3, GFP_ATOMIC); 2263 maxlen = bchannel_get_rxbuf(bch, Zsize);
2232 if (*sp == NULL) { 2264 if (maxlen < 0) {
2233 printk(KERN_DEBUG "%s: No mem for rx_skb\n", 2265 pr_warning("card%d.B%d: No bufferspace for %d bytes\n",
2234 __func__); 2266 hc->id + 1, bch->nr, Zsize);
2235 return; 2267 return;
2236 } 2268 }
2269 sp = &bch->rx_skb;
2270 maxlen = bch->maxlen;
2271 } else { /* Dchannel */
2272 sp = &dch->rx_skb;
2273 maxlen = dch->maxlen + 3;
2274 if (*sp == NULL) {
2275 *sp = mI_alloc_skb(maxlen, GFP_ATOMIC);
2276 if (*sp == NULL) {
2277 pr_warning("card%d: No mem for dch rx_skb\n",
2278 hc->id + 1);
2279 return;
2280 }
2281 }
2237 } 2282 }
2238 /* show activity */ 2283 /* show activity */
2239 hc->activity[hc->chan[ch].port] = 1; 2284 if (dch)
2285 hc->activity_rx |= 1 << hc->chan[ch].port;
2240 2286
2241 /* empty fifo with what we have */ 2287 /* empty fifo with what we have */
2242 if (dch || test_bit(FLG_HDLC, &bch->Flags)) { 2288 if (dch || test_bit(FLG_HDLC, &bch->Flags)) {
@@ -2247,7 +2293,7 @@ next_frame:
2247 Zsize, z1, z2, (f1 == f2) ? "fragment" : "COMPLETE", 2293 Zsize, z1, z2, (f1 == f2) ? "fragment" : "COMPLETE",
2248 f1, f2, Zsize + (*sp)->len, again); 2294 f1, f2, Zsize + (*sp)->len, again);
2249 /* HDLC */ 2295 /* HDLC */
2250 if ((Zsize + (*sp)->len) > (maxlen + 3)) { 2296 if ((Zsize + (*sp)->len) > maxlen) {
2251 if (debug & DEBUG_HFCMULTI_FIFO) 2297 if (debug & DEBUG_HFCMULTI_FIFO)
2252 printk(KERN_DEBUG 2298 printk(KERN_DEBUG
2253 "%s(card %d): hdlc-frame too large.\n", 2299 "%s(card %d): hdlc-frame too large.\n",
@@ -2309,7 +2355,7 @@ next_frame:
2309 if (dch) 2355 if (dch)
2310 recv_Dchannel(dch); 2356 recv_Dchannel(dch);
2311 else 2357 else
2312 recv_Bchannel(bch, MISDN_ID_ANY); 2358 recv_Bchannel(bch, MISDN_ID_ANY, false);
2313 *sp = skb; 2359 *sp = skb;
2314 again++; 2360 again++;
2315 goto next_frame; 2361 goto next_frame;
@@ -2317,32 +2363,14 @@ next_frame:
2317 /* there is an incomplete frame */ 2363 /* there is an incomplete frame */
2318 } else { 2364 } else {
2319 /* transparent */ 2365 /* transparent */
2320 if (Zsize > skb_tailroom(*sp))
2321 Zsize = skb_tailroom(*sp);
2322 hc->read_fifo(hc, skb_put(*sp, Zsize), Zsize); 2366 hc->read_fifo(hc, skb_put(*sp, Zsize), Zsize);
2323 if (((*sp)->len) < MISDN_COPY_SIZE) {
2324 skb = *sp;
2325 *sp = mI_alloc_skb(skb->len, GFP_ATOMIC);
2326 if (*sp) {
2327 memcpy(skb_put(*sp, skb->len),
2328 skb->data, skb->len);
2329 skb_trim(skb, 0);
2330 } else {
2331 printk(KERN_DEBUG "%s: No mem\n", __func__);
2332 *sp = skb;
2333 skb = NULL;
2334 }
2335 } else {
2336 skb = NULL;
2337 }
2338 if (debug & DEBUG_HFCMULTI_FIFO) 2367 if (debug & DEBUG_HFCMULTI_FIFO)
2339 printk(KERN_DEBUG 2368 printk(KERN_DEBUG
2340 "%s(card %d): fifo(%d) reading %d bytes " 2369 "%s(card %d): fifo(%d) reading %d bytes "
2341 "(z1=%04x, z2=%04x) TRANS\n", 2370 "(z1=%04x, z2=%04x) TRANS\n",
2342 __func__, hc->id + 1, ch, Zsize, z1, z2); 2371 __func__, hc->id + 1, ch, Zsize, z1, z2);
2343 /* only bch is transparent */ 2372 /* only bch is transparent */
2344 recv_Bchannel(bch, hc->chan[ch].Zfill); 2373 recv_Bchannel(bch, hc->chan[ch].Zfill, false);
2345 *sp = skb;
2346 } 2374 }
2347} 2375}
2348 2376
@@ -2430,55 +2458,55 @@ handle_timer_irq(struct hfc_multi *hc)
2430 } 2458 }
2431 } 2459 }
2432 if (hc->ctype == HFC_TYPE_E1 && hc->created[0]) { 2460 if (hc->ctype == HFC_TYPE_E1 && hc->created[0]) {
2433 dch = hc->chan[hc->dslot].dch; 2461 dch = hc->chan[hc->dnum[0]].dch;
2434 if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dslot].cfg)) { 2462 /* LOS */
2435 /* LOS */ 2463 temp = HFC_inb_nodebug(hc, R_SYNC_STA) & V_SIG_LOS;
2436 temp = HFC_inb_nodebug(hc, R_SYNC_STA) & V_SIG_LOS; 2464 hc->chan[hc->dnum[0]].los = temp;
2437 if (!temp && hc->chan[hc->dslot].los) 2465 if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dnum[0]].cfg)) {
2466 if (!temp && hc->chan[hc->dnum[0]].los)
2438 signal_state_up(dch, L1_SIGNAL_LOS_ON, 2467 signal_state_up(dch, L1_SIGNAL_LOS_ON,
2439 "LOS detected"); 2468 "LOS detected");
2440 if (temp && !hc->chan[hc->dslot].los) 2469 if (temp && !hc->chan[hc->dnum[0]].los)
2441 signal_state_up(dch, L1_SIGNAL_LOS_OFF, 2470 signal_state_up(dch, L1_SIGNAL_LOS_OFF,
2442 "LOS gone"); 2471 "LOS gone");
2443 hc->chan[hc->dslot].los = temp;
2444 } 2472 }
2445 if (test_bit(HFC_CFG_REPORT_AIS, &hc->chan[hc->dslot].cfg)) { 2473 if (test_bit(HFC_CFG_REPORT_AIS, &hc->chan[hc->dnum[0]].cfg)) {
2446 /* AIS */ 2474 /* AIS */
2447 temp = HFC_inb_nodebug(hc, R_SYNC_STA) & V_AIS; 2475 temp = HFC_inb_nodebug(hc, R_SYNC_STA) & V_AIS;
2448 if (!temp && hc->chan[hc->dslot].ais) 2476 if (!temp && hc->chan[hc->dnum[0]].ais)
2449 signal_state_up(dch, L1_SIGNAL_AIS_ON, 2477 signal_state_up(dch, L1_SIGNAL_AIS_ON,
2450 "AIS detected"); 2478 "AIS detected");
2451 if (temp && !hc->chan[hc->dslot].ais) 2479 if (temp && !hc->chan[hc->dnum[0]].ais)
2452 signal_state_up(dch, L1_SIGNAL_AIS_OFF, 2480 signal_state_up(dch, L1_SIGNAL_AIS_OFF,
2453 "AIS gone"); 2481 "AIS gone");
2454 hc->chan[hc->dslot].ais = temp; 2482 hc->chan[hc->dnum[0]].ais = temp;
2455 } 2483 }
2456 if (test_bit(HFC_CFG_REPORT_SLIP, &hc->chan[hc->dslot].cfg)) { 2484 if (test_bit(HFC_CFG_REPORT_SLIP, &hc->chan[hc->dnum[0]].cfg)) {
2457 /* SLIP */ 2485 /* SLIP */
2458 temp = HFC_inb_nodebug(hc, R_SLIP) & V_FOSLIP_RX; 2486 temp = HFC_inb_nodebug(hc, R_SLIP) & V_FOSLIP_RX;
2459 if (!temp && hc->chan[hc->dslot].slip_rx) 2487 if (!temp && hc->chan[hc->dnum[0]].slip_rx)
2460 signal_state_up(dch, L1_SIGNAL_SLIP_RX, 2488 signal_state_up(dch, L1_SIGNAL_SLIP_RX,
2461 " bit SLIP detected RX"); 2489 " bit SLIP detected RX");
2462 hc->chan[hc->dslot].slip_rx = temp; 2490 hc->chan[hc->dnum[0]].slip_rx = temp;
2463 temp = HFC_inb_nodebug(hc, R_SLIP) & V_FOSLIP_TX; 2491 temp = HFC_inb_nodebug(hc, R_SLIP) & V_FOSLIP_TX;
2464 if (!temp && hc->chan[hc->dslot].slip_tx) 2492 if (!temp && hc->chan[hc->dnum[0]].slip_tx)
2465 signal_state_up(dch, L1_SIGNAL_SLIP_TX, 2493 signal_state_up(dch, L1_SIGNAL_SLIP_TX,
2466 " bit SLIP detected TX"); 2494 " bit SLIP detected TX");
2467 hc->chan[hc->dslot].slip_tx = temp; 2495 hc->chan[hc->dnum[0]].slip_tx = temp;
2468 } 2496 }
2469 if (test_bit(HFC_CFG_REPORT_RDI, &hc->chan[hc->dslot].cfg)) { 2497 if (test_bit(HFC_CFG_REPORT_RDI, &hc->chan[hc->dnum[0]].cfg)) {
2470 /* RDI */ 2498 /* RDI */
2471 temp = HFC_inb_nodebug(hc, R_RX_SL0_0) & V_A; 2499 temp = HFC_inb_nodebug(hc, R_RX_SL0_0) & V_A;
2472 if (!temp && hc->chan[hc->dslot].rdi) 2500 if (!temp && hc->chan[hc->dnum[0]].rdi)
2473 signal_state_up(dch, L1_SIGNAL_RDI_ON, 2501 signal_state_up(dch, L1_SIGNAL_RDI_ON,
2474 "RDI detected"); 2502 "RDI detected");
2475 if (temp && !hc->chan[hc->dslot].rdi) 2503 if (temp && !hc->chan[hc->dnum[0]].rdi)
2476 signal_state_up(dch, L1_SIGNAL_RDI_OFF, 2504 signal_state_up(dch, L1_SIGNAL_RDI_OFF,
2477 "RDI gone"); 2505 "RDI gone");
2478 hc->chan[hc->dslot].rdi = temp; 2506 hc->chan[hc->dnum[0]].rdi = temp;
2479 } 2507 }
2480 temp = HFC_inb_nodebug(hc, R_JATT_DIR); 2508 temp = HFC_inb_nodebug(hc, R_JATT_DIR);
2481 switch (hc->chan[hc->dslot].sync) { 2509 switch (hc->chan[hc->dnum[0]].sync) {
2482 case 0: 2510 case 0:
2483 if ((temp & 0x60) == 0x60) { 2511 if ((temp & 0x60) == 0x60) {
2484 if (debug & DEBUG_HFCMULTI_SYNC) 2512 if (debug & DEBUG_HFCMULTI_SYNC)
@@ -2487,10 +2515,10 @@ handle_timer_irq(struct hfc_multi *hc)
2487 "in clock sync\n", 2515 "in clock sync\n",
2488 __func__, hc->id); 2516 __func__, hc->id);
2489 HFC_outb(hc, R_RX_OFF, 2517 HFC_outb(hc, R_RX_OFF,
2490 hc->chan[hc->dslot].jitter | V_RX_INIT); 2518 hc->chan[hc->dnum[0]].jitter | V_RX_INIT);
2491 HFC_outb(hc, R_TX_OFF, 2519 HFC_outb(hc, R_TX_OFF,
2492 hc->chan[hc->dslot].jitter | V_RX_INIT); 2520 hc->chan[hc->dnum[0]].jitter | V_RX_INIT);
2493 hc->chan[hc->dslot].sync = 1; 2521 hc->chan[hc->dnum[0]].sync = 1;
2494 goto check_framesync; 2522 goto check_framesync;
2495 } 2523 }
2496 break; 2524 break;
@@ -2501,7 +2529,7 @@ handle_timer_irq(struct hfc_multi *hc)
2501 "%s: (id=%d) E1 " 2529 "%s: (id=%d) E1 "
2502 "lost clock sync\n", 2530 "lost clock sync\n",
2503 __func__, hc->id); 2531 __func__, hc->id);
2504 hc->chan[hc->dslot].sync = 0; 2532 hc->chan[hc->dnum[0]].sync = 0;
2505 break; 2533 break;
2506 } 2534 }
2507 check_framesync: 2535 check_framesync:
@@ -2512,7 +2540,7 @@ handle_timer_irq(struct hfc_multi *hc)
2512 "%s: (id=%d) E1 " 2540 "%s: (id=%d) E1 "
2513 "now in frame sync\n", 2541 "now in frame sync\n",
2514 __func__, hc->id); 2542 __func__, hc->id);
2515 hc->chan[hc->dslot].sync = 2; 2543 hc->chan[hc->dnum[0]].sync = 2;
2516 } 2544 }
2517 break; 2545 break;
2518 case 2: 2546 case 2:
@@ -2522,7 +2550,7 @@ handle_timer_irq(struct hfc_multi *hc)
2522 "%s: (id=%d) E1 lost " 2550 "%s: (id=%d) E1 lost "
2523 "clock & frame sync\n", 2551 "clock & frame sync\n",
2524 __func__, hc->id); 2552 __func__, hc->id);
2525 hc->chan[hc->dslot].sync = 0; 2553 hc->chan[hc->dnum[0]].sync = 0;
2526 break; 2554 break;
2527 } 2555 }
2528 temp = HFC_inb_nodebug(hc, R_SYNC_STA); 2556 temp = HFC_inb_nodebug(hc, R_SYNC_STA);
@@ -2532,7 +2560,7 @@ handle_timer_irq(struct hfc_multi *hc)
2532 "%s: (id=%d) E1 " 2560 "%s: (id=%d) E1 "
2533 "lost frame sync\n", 2561 "lost frame sync\n",
2534 __func__, hc->id); 2562 __func__, hc->id);
2535 hc->chan[hc->dslot].sync = 1; 2563 hc->chan[hc->dnum[0]].sync = 1;
2536 } 2564 }
2537 break; 2565 break;
2538 } 2566 }
@@ -2673,7 +2701,7 @@ hfcmulti_interrupt(int intno, void *dev_id)
2673 int i; 2701 int i;
2674 void __iomem *plx_acc; 2702 void __iomem *plx_acc;
2675 u_short wval; 2703 u_short wval;
2676 u_char e1_syncsta, temp; 2704 u_char e1_syncsta, temp, temp2;
2677 u_long flags; 2705 u_long flags;
2678 2706
2679 if (!hc) { 2707 if (!hc) {
@@ -2748,7 +2776,7 @@ hfcmulti_interrupt(int intno, void *dev_id)
2748 if (r_irq_misc & V_STA_IRQ) { 2776 if (r_irq_misc & V_STA_IRQ) {
2749 if (hc->ctype == HFC_TYPE_E1) { 2777 if (hc->ctype == HFC_TYPE_E1) {
2750 /* state machine */ 2778 /* state machine */
2751 dch = hc->chan[hc->dslot].dch; 2779 dch = hc->chan[hc->dnum[0]].dch;
2752 e1_syncsta = HFC_inb_nodebug(hc, R_SYNC_STA); 2780 e1_syncsta = HFC_inb_nodebug(hc, R_SYNC_STA);
2753 if (test_bit(HFC_CHIP_PLXSD, &hc->chip) 2781 if (test_bit(HFC_CHIP_PLXSD, &hc->chip)
2754 && hc->e1_getclock) { 2782 && hc->e1_getclock) {
@@ -2758,23 +2786,26 @@ hfcmulti_interrupt(int intno, void *dev_id)
2758 hc->syncronized = 0; 2786 hc->syncronized = 0;
2759 } 2787 }
2760 /* undocumented: status changes during read */ 2788 /* undocumented: status changes during read */
2761 dch->state = HFC_inb_nodebug(hc, R_E1_RD_STA); 2789 temp = HFC_inb_nodebug(hc, R_E1_RD_STA);
2762 while (dch->state != (temp = 2790 while (temp != (temp2 =
2763 HFC_inb_nodebug(hc, R_E1_RD_STA))) { 2791 HFC_inb_nodebug(hc, R_E1_RD_STA))) {
2764 if (debug & DEBUG_HFCMULTI_STATE) 2792 if (debug & DEBUG_HFCMULTI_STATE)
2765 printk(KERN_DEBUG "%s: reread " 2793 printk(KERN_DEBUG "%s: reread "
2766 "STATE because %d!=%d\n", 2794 "STATE because %d!=%d\n",
2767 __func__, temp, 2795 __func__, temp, temp2);
2768 dch->state); 2796 temp = temp2; /* repeat */
2769 dch->state = temp; /* repeat */
2770 } 2797 }
2771 dch->state = HFC_inb_nodebug(hc, R_E1_RD_STA) 2798 /* broadcast state change to all fragments */
2772 & 0x7;
2773 schedule_event(dch, FLG_PHCHANGE);
2774 if (debug & DEBUG_HFCMULTI_STATE) 2799 if (debug & DEBUG_HFCMULTI_STATE)
2775 printk(KERN_DEBUG 2800 printk(KERN_DEBUG
2776 "%s: E1 (id=%d) newstate %x\n", 2801 "%s: E1 (id=%d) newstate %x\n",
2777 __func__, hc->id, dch->state); 2802 __func__, hc->id, temp & 0x7);
2803 for (i = 0; i < hc->ports; i++) {
2804 dch = hc->chan[hc->dnum[i]].dch;
2805 dch->state = temp & 0x7;
2806 schedule_event(dch, FLG_PHCHANGE);
2807 }
2808
2778 if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) 2809 if (test_bit(HFC_CHIP_PLXSD, &hc->chip))
2779 plxsd_checksync(hc, 0); 2810 plxsd_checksync(hc, 0);
2780 } 2811 }
@@ -3018,8 +3049,10 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
3018 HFC_outb(hc, A_CON_HDLC, 0x20 | V_HDLC_TRP | V_IFF); 3049 HFC_outb(hc, A_CON_HDLC, 0x20 | V_HDLC_TRP | V_IFF);
3019 HFC_outb(hc, A_SUBCH_CFG, 0); 3050 HFC_outb(hc, A_SUBCH_CFG, 0);
3020 HFC_outb(hc, A_IRQ_MSK, 0); 3051 HFC_outb(hc, A_IRQ_MSK, 0);
3021 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F); 3052 if (hc->chan[ch].protocol != protocol) {
3022 HFC_wait(hc); 3053 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
3054 HFC_wait(hc);
3055 }
3023 HFC_outb(hc, R_SLOT, ((((ch / 4) * 8) + 3056 HFC_outb(hc, R_SLOT, ((((ch / 4) * 8) +
3024 ((ch % 4) * 4) + 1) << 1) | 1); 3057 ((ch % 4) * 4) + 1) << 1) | 1);
3025 HFC_outb(hc, A_SL_CFG, 0x80 | 0x20 | (ch << 1) | 1); 3058 HFC_outb(hc, A_SL_CFG, 0x80 | 0x20 | (ch << 1) | 1);
@@ -3039,8 +3072,10 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
3039 HFC_outb(hc, A_CON_HDLC, 0x20 | V_HDLC_TRP | V_IFF); 3072 HFC_outb(hc, A_CON_HDLC, 0x20 | V_HDLC_TRP | V_IFF);
3040 HFC_outb(hc, A_SUBCH_CFG, 0); 3073 HFC_outb(hc, A_SUBCH_CFG, 0);
3041 HFC_outb(hc, A_IRQ_MSK, 0); 3074 HFC_outb(hc, A_IRQ_MSK, 0);
3042 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F); 3075 if (hc->chan[ch].protocol != protocol) {
3043 HFC_wait(hc); 3076 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
3077 HFC_wait(hc);
3078 }
3044 /* tx silence */ 3079 /* tx silence */
3045 HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence); 3080 HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence);
3046 HFC_outb(hc, R_SLOT, (((ch / 4) * 8) + 3081 HFC_outb(hc, R_SLOT, (((ch / 4) * 8) +
@@ -3059,8 +3094,10 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
3059 V_HDLC_TRP | V_IFF); 3094 V_HDLC_TRP | V_IFF);
3060 HFC_outb(hc, A_SUBCH_CFG, 0); 3095 HFC_outb(hc, A_SUBCH_CFG, 0);
3061 HFC_outb(hc, A_IRQ_MSK, 0); 3096 HFC_outb(hc, A_IRQ_MSK, 0);
3062 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F); 3097 if (hc->chan[ch].protocol != protocol) {
3063 HFC_wait(hc); 3098 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
3099 HFC_wait(hc);
3100 }
3064 /* tx silence */ 3101 /* tx silence */
3065 HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence); 3102 HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence);
3066 /* enable RX fifo */ 3103 /* enable RX fifo */
@@ -3075,8 +3112,10 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
3075 V_HDLC_TRP); 3112 V_HDLC_TRP);
3076 HFC_outb(hc, A_SUBCH_CFG, 0); 3113 HFC_outb(hc, A_SUBCH_CFG, 0);
3077 HFC_outb(hc, A_IRQ_MSK, 0); 3114 HFC_outb(hc, A_IRQ_MSK, 0);
3078 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F); 3115 if (hc->chan[ch].protocol != protocol) {
3079 HFC_wait(hc); 3116 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
3117 HFC_wait(hc);
3118 }
3080 } 3119 }
3081 if (hc->ctype != HFC_TYPE_E1) { 3120 if (hc->ctype != HFC_TYPE_E1) {
3082 hc->hw.a_st_ctrl0[hc->chan[ch].port] |= 3121 hc->hw.a_st_ctrl0[hc->chan[ch].port] |=
@@ -3433,8 +3472,7 @@ handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
3433 struct hfc_multi *hc = bch->hw; 3472 struct hfc_multi *hc = bch->hw;
3434 int ret = -EINVAL; 3473 int ret = -EINVAL;
3435 struct mISDNhead *hh = mISDN_HEAD_P(skb); 3474 struct mISDNhead *hh = mISDN_HEAD_P(skb);
3436 unsigned int id; 3475 unsigned long flags;
3437 u_long flags;
3438 3476
3439 switch (hh->prim) { 3477 switch (hh->prim) {
3440 case PH_DATA_REQ: 3478 case PH_DATA_REQ:
@@ -3443,19 +3481,13 @@ handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
3443 spin_lock_irqsave(&hc->lock, flags); 3481 spin_lock_irqsave(&hc->lock, flags);
3444 ret = bchannel_senddata(bch, skb); 3482 ret = bchannel_senddata(bch, skb);
3445 if (ret > 0) { /* direct TX */ 3483 if (ret > 0) { /* direct TX */
3446 id = hh->id; /* skb can be freed */
3447 hfcmulti_tx(hc, bch->slot); 3484 hfcmulti_tx(hc, bch->slot);
3448 ret = 0; 3485 ret = 0;
3449 /* start fifo */ 3486 /* start fifo */
3450 HFC_outb_nodebug(hc, R_FIFO, 0); 3487 HFC_outb_nodebug(hc, R_FIFO, 0);
3451 HFC_wait_nodebug(hc); 3488 HFC_wait_nodebug(hc);
3452 if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) { 3489 }
3453 spin_unlock_irqrestore(&hc->lock, flags); 3490 spin_unlock_irqrestore(&hc->lock, flags);
3454 queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
3455 } else
3456 spin_unlock_irqrestore(&hc->lock, flags);
3457 } else
3458 spin_unlock_irqrestore(&hc->lock, flags);
3459 return ret; 3491 return ret;
3460 case PH_ACTIVATE_REQ: 3492 case PH_ACTIVATE_REQ:
3461 if (debug & DEBUG_HFCMULTI_MSG) 3493 if (debug & DEBUG_HFCMULTI_MSG)
@@ -3545,10 +3577,11 @@ channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
3545 3577
3546 switch (cq->op) { 3578 switch (cq->op) {
3547 case MISDN_CTRL_GETOP: 3579 case MISDN_CTRL_GETOP:
3548 cq->op = MISDN_CTRL_HFC_OP | MISDN_CTRL_HW_FEATURES_OP 3580 ret = mISDN_ctrl_bchannel(bch, cq);
3549 | MISDN_CTRL_RX_OFF | MISDN_CTRL_FILL_EMPTY; 3581 cq->op |= MISDN_CTRL_HFC_OP | MISDN_CTRL_HW_FEATURES_OP;
3550 break; 3582 break;
3551 case MISDN_CTRL_RX_OFF: /* turn off / on rx stream */ 3583 case MISDN_CTRL_RX_OFF: /* turn off / on rx stream */
3584 ret = mISDN_ctrl_bchannel(bch, cq);
3552 hc->chan[bch->slot].rx_off = !!cq->p1; 3585 hc->chan[bch->slot].rx_off = !!cq->p1;
3553 if (!hc->chan[bch->slot].rx_off) { 3586 if (!hc->chan[bch->slot].rx_off) {
3554 /* reset fifo on rx on */ 3587 /* reset fifo on rx on */
@@ -3561,11 +3594,10 @@ channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
3561 printk(KERN_DEBUG "%s: RX_OFF request (nr=%d off=%d)\n", 3594 printk(KERN_DEBUG "%s: RX_OFF request (nr=%d off=%d)\n",
3562 __func__, bch->nr, hc->chan[bch->slot].rx_off); 3595 __func__, bch->nr, hc->chan[bch->slot].rx_off);
3563 break; 3596 break;
3564 case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */ 3597 case MISDN_CTRL_FILL_EMPTY:
3565 test_and_set_bit(FLG_FILLEMPTY, &bch->Flags); 3598 ret = mISDN_ctrl_bchannel(bch, cq);
3566 if (debug & DEBUG_HFCMULTI_MSG) 3599 hc->silence = bch->fill[0];
3567 printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d " 3600 memset(hc->silence_data, hc->silence, sizeof(hc->silence_data));
3568 "off=%d)\n", __func__, bch->nr, !!cq->p1);
3569 break; 3601 break;
3570 case MISDN_CTRL_HW_FEATURES: /* fill features structure */ 3602 case MISDN_CTRL_HW_FEATURES: /* fill features structure */
3571 if (debug & DEBUG_HFCMULTI_MSG) 3603 if (debug & DEBUG_HFCMULTI_MSG)
@@ -3654,9 +3686,7 @@ channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
3654 ret = -EINVAL; 3686 ret = -EINVAL;
3655 break; 3687 break;
3656 default: 3688 default:
3657 printk(KERN_WARNING "%s: unknown Op %x\n", 3689 ret = mISDN_ctrl_bchannel(bch, cq);
3658 __func__, cq->op);
3659 ret = -EINVAL;
3660 break; 3690 break;
3661 } 3691 }
3662 return ret; 3692 return ret;
@@ -3676,8 +3706,7 @@ hfcm_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
3676 switch (cmd) { 3706 switch (cmd) {
3677 case CLOSE_CHANNEL: 3707 case CLOSE_CHANNEL:
3678 test_and_clear_bit(FLG_OPEN, &bch->Flags); 3708 test_and_clear_bit(FLG_OPEN, &bch->Flags);
3679 if (test_bit(FLG_ACTIVE, &bch->Flags)) 3709 deactivate_bchannel(bch); /* locked there */
3680 deactivate_bchannel(bch); /* locked there */
3681 ch->protocol = ISDN_P_NONE; 3710 ch->protocol = ISDN_P_NONE;
3682 ch->peer = NULL; 3711 ch->peer = NULL;
3683 module_put(THIS_MODULE); 3712 module_put(THIS_MODULE);
@@ -3839,31 +3868,37 @@ hfcmulti_initmode(struct dchannel *dch)
3839 if (debug & DEBUG_HFCMULTI_INIT) 3868 if (debug & DEBUG_HFCMULTI_INIT)
3840 printk(KERN_DEBUG "%s: entered\n", __func__); 3869 printk(KERN_DEBUG "%s: entered\n", __func__);
3841 3870
3871 i = dch->slot;
3872 pt = hc->chan[i].port;
3842 if (hc->ctype == HFC_TYPE_E1) { 3873 if (hc->ctype == HFC_TYPE_E1) {
3843 hc->chan[hc->dslot].slot_tx = -1; 3874 /* E1 */
3844 hc->chan[hc->dslot].slot_rx = -1; 3875 hc->chan[hc->dnum[pt]].slot_tx = -1;
3845 hc->chan[hc->dslot].conf = -1; 3876 hc->chan[hc->dnum[pt]].slot_rx = -1;
3846 if (hc->dslot) { 3877 hc->chan[hc->dnum[pt]].conf = -1;
3847 mode_hfcmulti(hc, hc->dslot, dch->dev.D.protocol, 3878 if (hc->dnum[pt]) {
3879 mode_hfcmulti(hc, dch->slot, dch->dev.D.protocol,
3848 -1, 0, -1, 0); 3880 -1, 0, -1, 0);
3849 dch->timer.function = (void *) hfcmulti_dbusy_timer; 3881 dch->timer.function = (void *) hfcmulti_dbusy_timer;
3850 dch->timer.data = (long) dch; 3882 dch->timer.data = (long) dch;
3851 init_timer(&dch->timer); 3883 init_timer(&dch->timer);
3852 } 3884 }
3853 for (i = 1; i <= 31; i++) { 3885 for (i = 1; i <= 31; i++) {
3854 if (i == hc->dslot) 3886 if (!((1 << i) & hc->bmask[pt])) /* skip unused chan */
3855 continue; 3887 continue;
3856 hc->chan[i].slot_tx = -1; 3888 hc->chan[i].slot_tx = -1;
3857 hc->chan[i].slot_rx = -1; 3889 hc->chan[i].slot_rx = -1;
3858 hc->chan[i].conf = -1; 3890 hc->chan[i].conf = -1;
3859 mode_hfcmulti(hc, i, ISDN_P_NONE, -1, 0, -1, 0); 3891 mode_hfcmulti(hc, i, ISDN_P_NONE, -1, 0, -1, 0);
3860 } 3892 }
3861 /* E1 */ 3893 }
3862 if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dslot].cfg)) { 3894 if (hc->ctype == HFC_TYPE_E1 && pt == 0) {
3895 /* E1, port 0 */
3896 dch = hc->chan[hc->dnum[0]].dch;
3897 if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dnum[0]].cfg)) {
3863 HFC_outb(hc, R_LOS0, 255); /* 2 ms */ 3898 HFC_outb(hc, R_LOS0, 255); /* 2 ms */
3864 HFC_outb(hc, R_LOS1, 255); /* 512 ms */ 3899 HFC_outb(hc, R_LOS1, 255); /* 512 ms */
3865 } 3900 }
3866 if (test_bit(HFC_CFG_OPTICAL, &hc->chan[hc->dslot].cfg)) { 3901 if (test_bit(HFC_CFG_OPTICAL, &hc->chan[hc->dnum[0]].cfg)) {
3867 HFC_outb(hc, R_RX0, 0); 3902 HFC_outb(hc, R_RX0, 0);
3868 hc->hw.r_tx0 = 0 | V_OUT_EN; 3903 hc->hw.r_tx0 = 0 | V_OUT_EN;
3869 } else { 3904 } else {
@@ -3876,12 +3911,12 @@ hfcmulti_initmode(struct dchannel *dch)
3876 HFC_outb(hc, R_TX_FR0, 0x00); 3911 HFC_outb(hc, R_TX_FR0, 0x00);
3877 HFC_outb(hc, R_TX_FR1, 0xf8); 3912 HFC_outb(hc, R_TX_FR1, 0xf8);
3878 3913
3879 if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dslot].cfg)) 3914 if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dnum[0]].cfg))
3880 HFC_outb(hc, R_TX_FR2, V_TX_MF | V_TX_E | V_NEG_E); 3915 HFC_outb(hc, R_TX_FR2, V_TX_MF | V_TX_E | V_NEG_E);
3881 3916
3882 HFC_outb(hc, R_RX_FR0, V_AUTO_RESYNC | V_AUTO_RECO | 0); 3917 HFC_outb(hc, R_RX_FR0, V_AUTO_RESYNC | V_AUTO_RECO | 0);
3883 3918
3884 if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dslot].cfg)) 3919 if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dnum[0]].cfg))
3885 HFC_outb(hc, R_RX_FR1, V_RX_MF | V_RX_MF_SYNC); 3920 HFC_outb(hc, R_RX_FR1, V_RX_MF | V_RX_MF_SYNC);
3886 3921
3887 if (dch->dev.D.protocol == ISDN_P_NT_E1) { 3922 if (dch->dev.D.protocol == ISDN_P_NT_E1) {
@@ -3944,13 +3979,14 @@ hfcmulti_initmode(struct dchannel *dch)
3944 hc->syncronized = 0; 3979 hc->syncronized = 0;
3945 plxsd_checksync(hc, 0); 3980 plxsd_checksync(hc, 0);
3946 } 3981 }
3947 } else { 3982 }
3948 i = dch->slot; 3983 if (hc->ctype != HFC_TYPE_E1) {
3984 /* ST */
3949 hc->chan[i].slot_tx = -1; 3985 hc->chan[i].slot_tx = -1;
3950 hc->chan[i].slot_rx = -1; 3986 hc->chan[i].slot_rx = -1;
3951 hc->chan[i].conf = -1; 3987 hc->chan[i].conf = -1;
3952 mode_hfcmulti(hc, i, dch->dev.D.protocol, -1, 0, -1, 0); 3988 mode_hfcmulti(hc, i, dch->dev.D.protocol, -1, 0, -1, 0);
3953 dch->timer.function = (void *)hfcmulti_dbusy_timer; 3989 dch->timer.function = (void *) hfcmulti_dbusy_timer;
3954 dch->timer.data = (long) dch; 3990 dch->timer.data = (long) dch;
3955 init_timer(&dch->timer); 3991 init_timer(&dch->timer);
3956 hc->chan[i - 2].slot_tx = -1; 3992 hc->chan[i - 2].slot_tx = -1;
@@ -3961,8 +3997,6 @@ hfcmulti_initmode(struct dchannel *dch)
3961 hc->chan[i - 1].slot_rx = -1; 3997 hc->chan[i - 1].slot_rx = -1;
3962 hc->chan[i - 1].conf = -1; 3998 hc->chan[i - 1].conf = -1;
3963 mode_hfcmulti(hc, i - 1, ISDN_P_NONE, -1, 0, -1, 0); 3999 mode_hfcmulti(hc, i - 1, ISDN_P_NONE, -1, 0, -1, 0);
3964 /* ST */
3965 pt = hc->chan[i].port;
3966 /* select interface */ 4000 /* select interface */
3967 HFC_outb(hc, R_ST_SEL, pt); 4001 HFC_outb(hc, R_ST_SEL, pt);
3968 /* undocumented: delay after R_ST_SEL */ 4002 /* undocumented: delay after R_ST_SEL */
@@ -4054,14 +4088,9 @@ open_dchannel(struct hfc_multi *hc, struct dchannel *dch,
4054 hfcmulti_initmode(dch); 4088 hfcmulti_initmode(dch);
4055 spin_unlock_irqrestore(&hc->lock, flags); 4089 spin_unlock_irqrestore(&hc->lock, flags);
4056 } 4090 }
4057 4091 if (test_bit(FLG_ACTIVE, &dch->Flags))
4058 if (((rq->protocol == ISDN_P_NT_S0) && (dch->state == 3)) ||
4059 ((rq->protocol == ISDN_P_TE_S0) && (dch->state == 7)) ||
4060 ((rq->protocol == ISDN_P_NT_E1) && (dch->state == 1)) ||
4061 ((rq->protocol == ISDN_P_TE_E1) && (dch->state == 1))) {
4062 _queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY, 4092 _queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY,
4063 0, NULL, GFP_KERNEL); 4093 0, NULL, GFP_KERNEL);
4064 }
4065 rq->ch = &dch->dev.D; 4094 rq->ch = &dch->dev.D;
4066 if (!try_module_get(THIS_MODULE)) 4095 if (!try_module_get(THIS_MODULE))
4067 printk(KERN_WARNING "%s:cannot get module\n", __func__); 4096 printk(KERN_WARNING "%s:cannot get module\n", __func__);
@@ -4091,7 +4120,6 @@ open_bchannel(struct hfc_multi *hc, struct dchannel *dch,
4091 } 4120 }
4092 if (test_and_set_bit(FLG_OPEN, &bch->Flags)) 4121 if (test_and_set_bit(FLG_OPEN, &bch->Flags))
4093 return -EBUSY; /* b-channel can be only open once */ 4122 return -EBUSY; /* b-channel can be only open once */
4094 test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
4095 bch->ch.protocol = rq->protocol; 4123 bch->ch.protocol = rq->protocol;
4096 hc->chan[ch].rx_off = 0; 4124 hc->chan[ch].rx_off = 0;
4097 rq->ch = &bch->ch; 4125 rq->ch = &bch->ch;
@@ -4112,7 +4140,7 @@ channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq)
4112 4140
4113 switch (cq->op) { 4141 switch (cq->op) {
4114 case MISDN_CTRL_GETOP: 4142 case MISDN_CTRL_GETOP:
4115 cq->op = MISDN_CTRL_HFC_OP; 4143 cq->op = MISDN_CTRL_HFC_OP | MISDN_CTRL_L1_TIMER3;
4116 break; 4144 break;
4117 case MISDN_CTRL_HFC_WD_INIT: /* init the watchdog */ 4145 case MISDN_CTRL_HFC_WD_INIT: /* init the watchdog */
4118 wd_cnt = cq->p1 & 0xf; 4146 wd_cnt = cq->p1 & 0xf;
@@ -4142,6 +4170,9 @@ channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq)
4142 __func__); 4170 __func__);
4143 HFC_outb(hc, R_BERT_WD_MD, hc->hw.r_bert_wd_md | V_WD_RES); 4171 HFC_outb(hc, R_BERT_WD_MD, hc->hw.r_bert_wd_md | V_WD_RES);
4144 break; 4172 break;
4173 case MISDN_CTRL_L1_TIMER3:
4174 ret = l1_event(dch->l1, HW_TIMER3_VALUE | (cq->p1 & 0xff));
4175 break;
4145 default: 4176 default:
4146 printk(KERN_WARNING "%s: unknown Op %x\n", 4177 printk(KERN_WARNING "%s: unknown Op %x\n",
4147 __func__, cq->op); 4178 __func__, cq->op);
@@ -4545,6 +4576,8 @@ release_port(struct hfc_multi *hc, struct dchannel *dch)
4545 } 4576 }
4546 /* free channels */ 4577 /* free channels */
4547 for (i = 0; i <= 31; i++) { 4578 for (i = 0; i <= 31; i++) {
4579 if (!((1 << i) & hc->bmask[pt])) /* skip unused chan */
4580 continue;
4548 if (hc->chan[i].bch) { 4581 if (hc->chan[i].bch) {
4549 if (debug & DEBUG_HFCMULTI_INIT) 4582 if (debug & DEBUG_HFCMULTI_INIT)
4550 printk(KERN_DEBUG 4583 printk(KERN_DEBUG
@@ -4600,7 +4633,8 @@ release_port(struct hfc_multi *hc, struct dchannel *dch)
4600 spin_unlock_irqrestore(&hc->lock, flags); 4633 spin_unlock_irqrestore(&hc->lock, flags);
4601 4634
4602 if (debug & DEBUG_HFCMULTI_INIT) 4635 if (debug & DEBUG_HFCMULTI_INIT)
4603 printk(KERN_DEBUG "%s: free port %d channel D\n", __func__, pt); 4636 printk(KERN_DEBUG "%s: free port %d channel D(%d)\n", __func__,
4637 pt+1, ci);
4604 mISDN_freedchannel(dch); 4638 mISDN_freedchannel(dch);
4605 kfree(dch); 4639 kfree(dch);
4606 4640
@@ -4622,15 +4656,19 @@ release_card(struct hfc_multi *hc)
4622 if (hc->iclock) 4656 if (hc->iclock)
4623 mISDN_unregister_clock(hc->iclock); 4657 mISDN_unregister_clock(hc->iclock);
4624 4658
4625 /* disable irq */ 4659 /* disable and free irq */
4626 spin_lock_irqsave(&hc->lock, flags); 4660 spin_lock_irqsave(&hc->lock, flags);
4627 disable_hwirq(hc); 4661 disable_hwirq(hc);
4628 spin_unlock_irqrestore(&hc->lock, flags); 4662 spin_unlock_irqrestore(&hc->lock, flags);
4629 udelay(1000); 4663 udelay(1000);
4664 if (hc->irq) {
4665 if (debug & DEBUG_HFCMULTI_INIT)
4666 printk(KERN_DEBUG "%s: free irq %d (hc=%p)\n",
4667 __func__, hc->irq, hc);
4668 free_irq(hc->irq, hc);
4669 hc->irq = 0;
4630 4670
4631 /* dimm leds */ 4671 }
4632 if (hc->leds)
4633 hfcmulti_leds(hc);
4634 4672
4635 /* disable D-channels & B-channels */ 4673 /* disable D-channels & B-channels */
4636 if (debug & DEBUG_HFCMULTI_INIT) 4674 if (debug & DEBUG_HFCMULTI_INIT)
@@ -4641,15 +4679,11 @@ release_card(struct hfc_multi *hc)
4641 release_port(hc, hc->chan[ch].dch); 4679 release_port(hc, hc->chan[ch].dch);
4642 } 4680 }
4643 4681
4644 /* release hardware & irq */ 4682 /* dimm leds */
4645 if (hc->irq) { 4683 if (hc->leds)
4646 if (debug & DEBUG_HFCMULTI_INIT) 4684 hfcmulti_leds(hc);
4647 printk(KERN_DEBUG "%s: free irq %d\n",
4648 __func__, hc->irq);
4649 free_irq(hc->irq, hc);
4650 hc->irq = 0;
4651 4685
4652 } 4686 /* release hardware */
4653 release_io_hfcmulti(hc); 4687 release_io_hfcmulti(hc);
4654 4688
4655 if (debug & DEBUG_HFCMULTI_INIT) 4689 if (debug & DEBUG_HFCMULTI_INIT)
@@ -4667,61 +4701,9 @@ release_card(struct hfc_multi *hc)
4667 __func__); 4701 __func__);
4668} 4702}
4669 4703
4670static int 4704static void
4671init_e1_port(struct hfc_multi *hc, struct hm_map *m) 4705init_e1_port_hw(struct hfc_multi *hc, struct hm_map *m)
4672{ 4706{
4673 struct dchannel *dch;
4674 struct bchannel *bch;
4675 int ch, ret = 0;
4676 char name[MISDN_MAX_IDLEN];
4677
4678 dch = kzalloc(sizeof(struct dchannel), GFP_KERNEL);
4679 if (!dch)
4680 return -ENOMEM;
4681 dch->debug = debug;
4682 mISDN_initdchannel(dch, MAX_DFRAME_LEN_L1, ph_state_change);
4683 dch->hw = hc;
4684 dch->dev.Dprotocols = (1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1);
4685 dch->dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
4686 (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
4687 dch->dev.D.send = handle_dmsg;
4688 dch->dev.D.ctrl = hfcm_dctrl;
4689 dch->dev.nrbchan = (hc->dslot) ? 30 : 31;
4690 dch->slot = hc->dslot;
4691 hc->chan[hc->dslot].dch = dch;
4692 hc->chan[hc->dslot].port = 0;
4693 hc->chan[hc->dslot].nt_timer = -1;
4694 for (ch = 1; ch <= 31; ch++) {
4695 if (ch == hc->dslot) /* skip dchannel */
4696 continue;
4697 bch = kzalloc(sizeof(struct bchannel), GFP_KERNEL);
4698 if (!bch) {
4699 printk(KERN_ERR "%s: no memory for bchannel\n",
4700 __func__);
4701 ret = -ENOMEM;
4702 goto free_chan;
4703 }
4704 hc->chan[ch].coeff = kzalloc(512, GFP_KERNEL);
4705 if (!hc->chan[ch].coeff) {
4706 printk(KERN_ERR "%s: no memory for coeffs\n",
4707 __func__);
4708 ret = -ENOMEM;
4709 kfree(bch);
4710 goto free_chan;
4711 }
4712 bch->nr = ch;
4713 bch->slot = ch;
4714 bch->debug = debug;
4715 mISDN_initbchannel(bch, MAX_DATA_MEM);
4716 bch->hw = hc;
4717 bch->ch.send = handle_bmsg;
4718 bch->ch.ctrl = hfcm_bctrl;
4719 bch->ch.nr = ch;
4720 list_add(&bch->ch.list, &dch->dev.bchannels);
4721 hc->chan[ch].bch = bch;
4722 hc->chan[ch].port = 0;
4723 set_channelmap(bch->nr, dch->dev.channelmap);
4724 }
4725 /* set optical line type */ 4707 /* set optical line type */
4726 if (port[Port_cnt] & 0x001) { 4708 if (port[Port_cnt] & 0x001) {
4727 if (!m->opticalsupport) { 4709 if (!m->opticalsupport) {
@@ -4737,7 +4719,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
4737 __func__, 4719 __func__,
4738 HFC_cnt + 1, 1); 4720 HFC_cnt + 1, 1);
4739 test_and_set_bit(HFC_CFG_OPTICAL, 4721 test_and_set_bit(HFC_CFG_OPTICAL,
4740 &hc->chan[hc->dslot].cfg); 4722 &hc->chan[hc->dnum[0]].cfg);
4741 } 4723 }
4742 } 4724 }
4743 /* set LOS report */ 4725 /* set LOS report */
@@ -4747,7 +4729,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
4747 "LOS report: card(%d) port(%d)\n", 4729 "LOS report: card(%d) port(%d)\n",
4748 __func__, HFC_cnt + 1, 1); 4730 __func__, HFC_cnt + 1, 1);
4749 test_and_set_bit(HFC_CFG_REPORT_LOS, 4731 test_and_set_bit(HFC_CFG_REPORT_LOS,
4750 &hc->chan[hc->dslot].cfg); 4732 &hc->chan[hc->dnum[0]].cfg);
4751 } 4733 }
4752 /* set AIS report */ 4734 /* set AIS report */
4753 if (port[Port_cnt] & 0x008) { 4735 if (port[Port_cnt] & 0x008) {
@@ -4756,7 +4738,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
4756 "AIS report: card(%d) port(%d)\n", 4738 "AIS report: card(%d) port(%d)\n",
4757 __func__, HFC_cnt + 1, 1); 4739 __func__, HFC_cnt + 1, 1);
4758 test_and_set_bit(HFC_CFG_REPORT_AIS, 4740 test_and_set_bit(HFC_CFG_REPORT_AIS,
4759 &hc->chan[hc->dslot].cfg); 4741 &hc->chan[hc->dnum[0]].cfg);
4760 } 4742 }
4761 /* set SLIP report */ 4743 /* set SLIP report */
4762 if (port[Port_cnt] & 0x010) { 4744 if (port[Port_cnt] & 0x010) {
@@ -4766,7 +4748,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
4766 "card(%d) port(%d)\n", 4748 "card(%d) port(%d)\n",
4767 __func__, HFC_cnt + 1, 1); 4749 __func__, HFC_cnt + 1, 1);
4768 test_and_set_bit(HFC_CFG_REPORT_SLIP, 4750 test_and_set_bit(HFC_CFG_REPORT_SLIP,
4769 &hc->chan[hc->dslot].cfg); 4751 &hc->chan[hc->dnum[0]].cfg);
4770 } 4752 }
4771 /* set RDI report */ 4753 /* set RDI report */
4772 if (port[Port_cnt] & 0x020) { 4754 if (port[Port_cnt] & 0x020) {
@@ -4776,7 +4758,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
4776 "card(%d) port(%d)\n", 4758 "card(%d) port(%d)\n",
4777 __func__, HFC_cnt + 1, 1); 4759 __func__, HFC_cnt + 1, 1);
4778 test_and_set_bit(HFC_CFG_REPORT_RDI, 4760 test_and_set_bit(HFC_CFG_REPORT_RDI,
4779 &hc->chan[hc->dslot].cfg); 4761 &hc->chan[hc->dnum[0]].cfg);
4780 } 4762 }
4781 /* set CRC-4 Mode */ 4763 /* set CRC-4 Mode */
4782 if (!(port[Port_cnt] & 0x100)) { 4764 if (!(port[Port_cnt] & 0x100)) {
@@ -4785,7 +4767,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
4785 " card(%d) port(%d)\n", 4767 " card(%d) port(%d)\n",
4786 __func__, HFC_cnt + 1, 1); 4768 __func__, HFC_cnt + 1, 1);
4787 test_and_set_bit(HFC_CFG_CRC4, 4769 test_and_set_bit(HFC_CFG_CRC4,
4788 &hc->chan[hc->dslot].cfg); 4770 &hc->chan[hc->dnum[0]].cfg);
4789 } else { 4771 } else {
4790 if (debug & DEBUG_HFCMULTI_INIT) 4772 if (debug & DEBUG_HFCMULTI_INIT)
4791 printk(KERN_DEBUG "%s: PORT turn off CRC4" 4773 printk(KERN_DEBUG "%s: PORT turn off CRC4"
@@ -4817,20 +4799,85 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
4817 } 4799 }
4818 /* set elastic jitter buffer */ 4800 /* set elastic jitter buffer */
4819 if (port[Port_cnt] & 0x3000) { 4801 if (port[Port_cnt] & 0x3000) {
4820 hc->chan[hc->dslot].jitter = (port[Port_cnt]>>12) & 0x3; 4802 hc->chan[hc->dnum[0]].jitter = (port[Port_cnt]>>12) & 0x3;
4821 if (debug & DEBUG_HFCMULTI_INIT) 4803 if (debug & DEBUG_HFCMULTI_INIT)
4822 printk(KERN_DEBUG 4804 printk(KERN_DEBUG
4823 "%s: PORT set elastic " 4805 "%s: PORT set elastic "
4824 "buffer to %d: card(%d) port(%d)\n", 4806 "buffer to %d: card(%d) port(%d)\n",
4825 __func__, hc->chan[hc->dslot].jitter, 4807 __func__, hc->chan[hc->dnum[0]].jitter,
4826 HFC_cnt + 1, 1); 4808 HFC_cnt + 1, 1);
4827 } else 4809 } else
4828 hc->chan[hc->dslot].jitter = 2; /* default */ 4810 hc->chan[hc->dnum[0]].jitter = 2; /* default */
4829 snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-e1.%d", HFC_cnt + 1); 4811}
4812
4813static int
4814init_e1_port(struct hfc_multi *hc, struct hm_map *m, int pt)
4815{
4816 struct dchannel *dch;
4817 struct bchannel *bch;
4818 int ch, ret = 0;
4819 char name[MISDN_MAX_IDLEN];
4820 int bcount = 0;
4821
4822 dch = kzalloc(sizeof(struct dchannel), GFP_KERNEL);
4823 if (!dch)
4824 return -ENOMEM;
4825 dch->debug = debug;
4826 mISDN_initdchannel(dch, MAX_DFRAME_LEN_L1, ph_state_change);
4827 dch->hw = hc;
4828 dch->dev.Dprotocols = (1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1);
4829 dch->dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
4830 (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
4831 dch->dev.D.send = handle_dmsg;
4832 dch->dev.D.ctrl = hfcm_dctrl;
4833 dch->slot = hc->dnum[pt];
4834 hc->chan[hc->dnum[pt]].dch = dch;
4835 hc->chan[hc->dnum[pt]].port = pt;
4836 hc->chan[hc->dnum[pt]].nt_timer = -1;
4837 for (ch = 1; ch <= 31; ch++) {
4838 if (!((1 << ch) & hc->bmask[pt])) /* skip unused channel */
4839 continue;
4840 bch = kzalloc(sizeof(struct bchannel), GFP_KERNEL);
4841 if (!bch) {
4842 printk(KERN_ERR "%s: no memory for bchannel\n",
4843 __func__);
4844 ret = -ENOMEM;
4845 goto free_chan;
4846 }
4847 hc->chan[ch].coeff = kzalloc(512, GFP_KERNEL);
4848 if (!hc->chan[ch].coeff) {
4849 printk(KERN_ERR "%s: no memory for coeffs\n",
4850 __func__);
4851 ret = -ENOMEM;
4852 kfree(bch);
4853 goto free_chan;
4854 }
4855 bch->nr = ch;
4856 bch->slot = ch;
4857 bch->debug = debug;
4858 mISDN_initbchannel(bch, MAX_DATA_MEM, poll >> 1);
4859 bch->hw = hc;
4860 bch->ch.send = handle_bmsg;
4861 bch->ch.ctrl = hfcm_bctrl;
4862 bch->ch.nr = ch;
4863 list_add(&bch->ch.list, &dch->dev.bchannels);
4864 hc->chan[ch].bch = bch;
4865 hc->chan[ch].port = pt;
4866 set_channelmap(bch->nr, dch->dev.channelmap);
4867 bcount++;
4868 }
4869 dch->dev.nrbchan = bcount;
4870 if (pt == 0)
4871 init_e1_port_hw(hc, m);
4872 if (hc->ports > 1)
4873 snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-e1.%d-%d",
4874 HFC_cnt + 1, pt+1);
4875 else
4876 snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-e1.%d", HFC_cnt + 1);
4830 ret = mISDN_register_device(&dch->dev, &hc->pci_dev->dev, name); 4877 ret = mISDN_register_device(&dch->dev, &hc->pci_dev->dev, name);
4831 if (ret) 4878 if (ret)
4832 goto free_chan; 4879 goto free_chan;
4833 hc->created[0] = 1; 4880 hc->created[pt] = 1;
4834 return ret; 4881 return ret;
4835free_chan: 4882free_chan:
4836 release_port(hc, dch); 4883 release_port(hc, dch);
@@ -4881,7 +4928,7 @@ init_multi_port(struct hfc_multi *hc, int pt)
4881 bch->nr = ch + 1; 4928 bch->nr = ch + 1;
4882 bch->slot = i + ch; 4929 bch->slot = i + ch;
4883 bch->debug = debug; 4930 bch->debug = debug;
4884 mISDN_initbchannel(bch, MAX_DATA_MEM); 4931 mISDN_initbchannel(bch, MAX_DATA_MEM, poll >> 1);
4885 bch->hw = hc; 4932 bch->hw = hc;
4886 bch->ch.send = handle_bmsg; 4933 bch->ch.send = handle_bmsg;
4887 bch->ch.ctrl = hfcm_bctrl; 4934 bch->ch.ctrl = hfcm_bctrl;
@@ -4963,7 +5010,8 @@ hfcmulti_init(struct hm_map *m, struct pci_dev *pdev,
4963 struct hfc_multi *hc; 5010 struct hfc_multi *hc;
4964 u_long flags; 5011 u_long flags;
4965 u_char dips = 0, pmj = 0; /* dip settings, port mode Jumpers */ 5012 u_char dips = 0, pmj = 0; /* dip settings, port mode Jumpers */
4966 int i; 5013 int i, ch;
5014 u_int maskcheck;
4967 5015
4968 if (HFC_cnt >= MAX_CARDS) { 5016 if (HFC_cnt >= MAX_CARDS) {
4969 printk(KERN_ERR "too many cards (max=%d).\n", 5017 printk(KERN_ERR "too many cards (max=%d).\n",
@@ -4997,18 +5045,36 @@ hfcmulti_init(struct hm_map *m, struct pci_dev *pdev,
4997 hc->id = HFC_cnt; 5045 hc->id = HFC_cnt;
4998 hc->pcm = pcm[HFC_cnt]; 5046 hc->pcm = pcm[HFC_cnt];
4999 hc->io_mode = iomode[HFC_cnt]; 5047 hc->io_mode = iomode[HFC_cnt];
5000 if (dslot[HFC_cnt] < 0 && hc->ctype == HFC_TYPE_E1) { 5048 if (hc->ctype == HFC_TYPE_E1 && dmask[E1_cnt]) {
5001 hc->dslot = 0; 5049 /* fragment card */
5002 printk(KERN_INFO "HFC-E1 card has disabled D-channel, but " 5050 pt = 0;
5003 "31 B-channels\n"); 5051 maskcheck = 0;
5004 } 5052 for (ch = 0; ch <= 31; ch++) {
5005 if (dslot[HFC_cnt] > 0 && dslot[HFC_cnt] < 32 5053 if (!((1 << ch) & dmask[E1_cnt]))
5006 && hc->ctype == HFC_TYPE_E1) { 5054 continue;
5007 hc->dslot = dslot[HFC_cnt]; 5055 hc->dnum[pt] = ch;
5008 printk(KERN_INFO "HFC-E1 card has alternating D-channel on " 5056 hc->bmask[pt] = bmask[bmask_cnt++];
5009 "time slot %d\n", dslot[HFC_cnt]); 5057 if ((maskcheck & hc->bmask[pt])
5010 } else 5058 || (dmask[E1_cnt] & hc->bmask[pt])) {
5011 hc->dslot = 16; 5059 printk(KERN_INFO
5060 "HFC-E1 #%d has overlapping B-channels on fragment #%d\n",
5061 E1_cnt + 1, pt);
5062 return -EINVAL;
5063 }
5064 maskcheck |= hc->bmask[pt];
5065 printk(KERN_INFO
5066 "HFC-E1 #%d uses D-channel on slot %d and a B-channel map of 0x%08x\n",
5067 E1_cnt + 1, ch, hc->bmask[pt]);
5068 pt++;
5069 }
5070 hc->ports = pt;
5071 }
5072 if (hc->ctype == HFC_TYPE_E1 && !dmask[E1_cnt]) {
5073 /* default card layout */
5074 hc->dnum[0] = 16;
5075 hc->bmask[0] = 0xfffefffe;
5076 hc->ports = 1;
5077 }
5012 5078
5013 /* set chip specific features */ 5079 /* set chip specific features */
5014 hc->masterclk = -1; 5080 hc->masterclk = -1;
@@ -5091,23 +5157,33 @@ hfcmulti_init(struct hm_map *m, struct pci_dev *pdev,
5091 goto free_card; 5157 goto free_card;
5092 } 5158 }
5093 if (hc->ctype == HFC_TYPE_E1) 5159 if (hc->ctype == HFC_TYPE_E1)
5094 ret_err = init_e1_port(hc, m); 5160 ret_err = init_e1_port(hc, m, pt);
5095 else 5161 else
5096 ret_err = init_multi_port(hc, pt); 5162 ret_err = init_multi_port(hc, pt);
5097 if (debug & DEBUG_HFCMULTI_INIT) 5163 if (debug & DEBUG_HFCMULTI_INIT)
5098 printk(KERN_DEBUG 5164 printk(KERN_DEBUG
5099 "%s: Registering D-channel, card(%d) port(%d)" 5165 "%s: Registering D-channel, card(%d) port(%d) "
5100 "result %d\n", 5166 "result %d\n",
5101 __func__, HFC_cnt + 1, pt, ret_err); 5167 __func__, HFC_cnt + 1, pt + 1, ret_err);
5102 5168
5103 if (ret_err) { 5169 if (ret_err) {
5104 while (pt) { /* release already registered ports */ 5170 while (pt) { /* release already registered ports */
5105 pt--; 5171 pt--;
5106 release_port(hc, hc->chan[(pt << 2) + 2].dch); 5172 if (hc->ctype == HFC_TYPE_E1)
5173 release_port(hc,
5174 hc->chan[hc->dnum[pt]].dch);
5175 else
5176 release_port(hc,
5177 hc->chan[(pt << 2) + 2].dch);
5107 } 5178 }
5108 goto free_card; 5179 goto free_card;
5109 } 5180 }
5110 Port_cnt++; 5181 if (hc->ctype != HFC_TYPE_E1)
5182 Port_cnt++; /* for each S0 port */
5183 }
5184 if (hc->ctype == HFC_TYPE_E1) {
5185 Port_cnt++; /* for each E1 port */
5186 E1_cnt++;
5111 } 5187 }
5112 5188
5113 /* disp switches */ 5189 /* disp switches */
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index e2c83a2d7691..81363ffa5357 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -453,7 +453,7 @@ hfcpci_empty_bfifo(struct bchannel *bch, struct bzfifo *bz,
453 } 453 }
454 bz->za[new_f2].z2 = cpu_to_le16(new_z2); 454 bz->za[new_f2].z2 = cpu_to_le16(new_z2);
455 bz->f2 = new_f2; /* next buffer */ 455 bz->f2 = new_f2; /* next buffer */
456 recv_Bchannel(bch, MISDN_ID_ANY); 456 recv_Bchannel(bch, MISDN_ID_ANY, false);
457 } 457 }
458} 458}
459 459
@@ -565,11 +565,6 @@ hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *rxbz,
565 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL)) 565 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
566 new_z2 -= B_FIFO_SIZE; /* buffer wrap */ 566 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
567 567
568 if (fcnt_rx > MAX_DATA_SIZE) { /* flush, if oversized */
569 *z2r = cpu_to_le16(new_z2); /* new position */
570 return;
571 }
572
573 fcnt_tx = le16_to_cpu(*z2t) - le16_to_cpu(*z1t); 568 fcnt_tx = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
574 if (fcnt_tx <= 0) 569 if (fcnt_tx <= 0)
575 fcnt_tx += B_FIFO_SIZE; 570 fcnt_tx += B_FIFO_SIZE;
@@ -577,8 +572,16 @@ hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *rxbz,
577 fcnt_tx = B_FIFO_SIZE - fcnt_tx; 572 fcnt_tx = B_FIFO_SIZE - fcnt_tx;
578 /* remaining bytes to send (bytes in tx-fifo) */ 573 /* remaining bytes to send (bytes in tx-fifo) */
579 574
580 bch->rx_skb = mI_alloc_skb(fcnt_rx, GFP_ATOMIC); 575 if (test_bit(FLG_RX_OFF, &bch->Flags)) {
581 if (bch->rx_skb) { 576 bch->dropcnt += fcnt_rx;
577 *z2r = cpu_to_le16(new_z2);
578 return;
579 }
580 maxlen = bchannel_get_rxbuf(bch, fcnt_rx);
581 if (maxlen < 0) {
582 pr_warning("B%d: No bufferspace for %d bytes\n",
583 bch->nr, fcnt_rx);
584 } else {
582 ptr = skb_put(bch->rx_skb, fcnt_rx); 585 ptr = skb_put(bch->rx_skb, fcnt_rx);
583 if (le16_to_cpu(*z2r) + fcnt_rx <= B_FIFO_SIZE + B_SUB_VAL) 586 if (le16_to_cpu(*z2r) + fcnt_rx <= B_FIFO_SIZE + B_SUB_VAL)
584 maxlen = fcnt_rx; /* complete transfer */ 587 maxlen = fcnt_rx; /* complete transfer */
@@ -596,10 +599,8 @@ hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *rxbz,
596 ptr1 = bdata; /* start of buffer */ 599 ptr1 = bdata; /* start of buffer */
597 memcpy(ptr, ptr1, fcnt_rx); /* rest */ 600 memcpy(ptr, ptr1, fcnt_rx); /* rest */
598 } 601 }
599 recv_Bchannel(bch, fcnt_tx); /* bch, id */ 602 recv_Bchannel(bch, fcnt_tx, false); /* bch, id, !force */
600 } else 603 }
601 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
602
603 *z2r = cpu_to_le16(new_z2); /* new position */ 604 *z2r = cpu_to_le16(new_z2); /* new position */
604} 605}
605 606
@@ -760,9 +761,14 @@ hfcpci_fill_fifo(struct bchannel *bch)
760 761
761 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO)) 762 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
762 printk(KERN_DEBUG "%s\n", __func__); 763 printk(KERN_DEBUG "%s\n", __func__);
763 if ((!bch->tx_skb) || bch->tx_skb->len <= 0) 764 if ((!bch->tx_skb) || bch->tx_skb->len == 0) {
764 return; 765 if (!test_bit(FLG_FILLEMPTY, &bch->Flags) &&
765 count = bch->tx_skb->len - bch->tx_idx; 766 !test_bit(FLG_TRANSPARENT, &bch->Flags))
767 return;
768 count = HFCPCI_FILLEMPTY;
769 } else {
770 count = bch->tx_skb->len - bch->tx_idx;
771 }
766 if ((bch->nr & 2) && (!hc->hw.bswapped)) { 772 if ((bch->nr & 2) && (!hc->hw.bswapped)) {
767 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2; 773 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
768 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b2; 774 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b2;
@@ -781,16 +787,10 @@ hfcpci_fill_fifo(struct bchannel *bch)
781 fcnt = le16_to_cpu(*z2t) - le16_to_cpu(*z1t); 787 fcnt = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
782 if (fcnt <= 0) 788 if (fcnt <= 0)
783 fcnt += B_FIFO_SIZE; 789 fcnt += B_FIFO_SIZE;
784 /* fcnt contains available bytes in fifo */ 790 if (test_bit(FLG_FILLEMPTY, &bch->Flags)) {
785 fcnt = B_FIFO_SIZE - fcnt; 791 /* fcnt contains available bytes in fifo */
786 /* remaining bytes to send (bytes in fifo) */ 792 if (count > fcnt)
787 793 count = fcnt;
788 /* "fill fifo if empty" feature */
789 if (test_bit(FLG_FILLEMPTY, &bch->Flags) && !fcnt) {
790 /* printk(KERN_DEBUG "%s: buffer empty, so we have "
791 "underrun\n", __func__); */
792 /* fill buffer, to prevent future underrun */
793 count = HFCPCI_FILLEMPTY;
794 new_z1 = le16_to_cpu(*z1t) + count; 794 new_z1 = le16_to_cpu(*z1t) + count;
795 /* new buffer Position */ 795 /* new buffer Position */
796 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL)) 796 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
@@ -802,17 +802,20 @@ hfcpci_fill_fifo(struct bchannel *bch)
802 printk(KERN_DEBUG "hfcpci_FFt fillempty " 802 printk(KERN_DEBUG "hfcpci_FFt fillempty "
803 "fcnt(%d) maxl(%d) nz1(%x) dst(%p)\n", 803 "fcnt(%d) maxl(%d) nz1(%x) dst(%p)\n",
804 fcnt, maxlen, new_z1, dst); 804 fcnt, maxlen, new_z1, dst);
805 fcnt += count;
806 if (maxlen > count) 805 if (maxlen > count)
807 maxlen = count; /* limit size */ 806 maxlen = count; /* limit size */
808 memset(dst, 0x2a, maxlen); /* first copy */ 807 memset(dst, bch->fill[0], maxlen); /* first copy */
809 count -= maxlen; /* remaining bytes */ 808 count -= maxlen; /* remaining bytes */
810 if (count) { 809 if (count) {
811 dst = bdata; /* start of buffer */ 810 dst = bdata; /* start of buffer */
812 memset(dst, 0x2a, count); 811 memset(dst, bch->fill[0], count);
813 } 812 }
814 *z1t = cpu_to_le16(new_z1); /* now send data */ 813 *z1t = cpu_to_le16(new_z1); /* now send data */
814 return;
815 } 815 }
816 /* fcnt contains available bytes in fifo */
817 fcnt = B_FIFO_SIZE - fcnt;
818 /* remaining bytes to send (bytes in fifo) */
816 819
817 next_t_frame: 820 next_t_frame:
818 count = bch->tx_skb->len - bch->tx_idx; 821 count = bch->tx_skb->len - bch->tx_idx;
@@ -849,9 +852,6 @@ hfcpci_fill_fifo(struct bchannel *bch)
849 *z1t = cpu_to_le16(new_z1); /* now send data */ 852 *z1t = cpu_to_le16(new_z1); /* now send data */
850 if (bch->tx_idx < bch->tx_skb->len) 853 if (bch->tx_idx < bch->tx_skb->len)
851 return; 854 return;
852 /* send confirm, on trans, free on hdlc. */
853 if (test_bit(FLG_TRANSPARENT, &bch->Flags))
854 confirm_Bsend(bch);
855 dev_kfree_skb(bch->tx_skb); 855 dev_kfree_skb(bch->tx_skb);
856 if (get_next_bframe(bch)) 856 if (get_next_bframe(bch))
857 goto next_t_frame; 857 goto next_t_frame;
@@ -1533,24 +1533,7 @@ deactivate_bchannel(struct bchannel *bch)
1533static int 1533static int
1534channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) 1534channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
1535{ 1535{
1536 int ret = 0; 1536 return mISDN_ctrl_bchannel(bch, cq);
1537
1538 switch (cq->op) {
1539 case MISDN_CTRL_GETOP:
1540 cq->op = MISDN_CTRL_FILL_EMPTY;
1541 break;
1542 case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */
1543 test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
1544 if (debug & DEBUG_HW_OPEN)
1545 printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d "
1546 "off=%d)\n", __func__, bch->nr, !!cq->p1);
1547 break;
1548 default:
1549 printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op);
1550 ret = -EINVAL;
1551 break;
1552 }
1553 return ret;
1554} 1537}
1555static int 1538static int
1556hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg) 1539hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
@@ -1581,8 +1564,7 @@ hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1581 break; 1564 break;
1582 case CLOSE_CHANNEL: 1565 case CLOSE_CHANNEL:
1583 test_and_clear_bit(FLG_OPEN, &bch->Flags); 1566 test_and_clear_bit(FLG_OPEN, &bch->Flags);
1584 if (test_bit(FLG_ACTIVE, &bch->Flags)) 1567 deactivate_bchannel(bch);
1585 deactivate_bchannel(bch);
1586 ch->protocol = ISDN_P_NONE; 1568 ch->protocol = ISDN_P_NONE;
1587 ch->peer = NULL; 1569 ch->peer = NULL;
1588 module_put(THIS_MODULE); 1570 module_put(THIS_MODULE);
@@ -1692,22 +1674,17 @@ hfcpci_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
1692 struct hfc_pci *hc = bch->hw; 1674 struct hfc_pci *hc = bch->hw;
1693 int ret = -EINVAL; 1675 int ret = -EINVAL;
1694 struct mISDNhead *hh = mISDN_HEAD_P(skb); 1676 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1695 unsigned int id; 1677 unsigned long flags;
1696 u_long flags;
1697 1678
1698 switch (hh->prim) { 1679 switch (hh->prim) {
1699 case PH_DATA_REQ: 1680 case PH_DATA_REQ:
1700 spin_lock_irqsave(&hc->lock, flags); 1681 spin_lock_irqsave(&hc->lock, flags);
1701 ret = bchannel_senddata(bch, skb); 1682 ret = bchannel_senddata(bch, skb);
1702 if (ret > 0) { /* direct TX */ 1683 if (ret > 0) { /* direct TX */
1703 id = hh->id; /* skb can be freed */
1704 hfcpci_fill_fifo(bch); 1684 hfcpci_fill_fifo(bch);
1705 ret = 0; 1685 ret = 0;
1706 spin_unlock_irqrestore(&hc->lock, flags); 1686 }
1707 if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) 1687 spin_unlock_irqrestore(&hc->lock, flags);
1708 queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
1709 } else
1710 spin_unlock_irqrestore(&hc->lock, flags);
1711 return ret; 1688 return ret;
1712 case PH_ACTIVATE_REQ: 1689 case PH_ACTIVATE_REQ:
1713 spin_lock_irqsave(&hc->lock, flags); 1690 spin_lock_irqsave(&hc->lock, flags);
@@ -1819,7 +1796,7 @@ channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq)
1819 switch (cq->op) { 1796 switch (cq->op) {
1820 case MISDN_CTRL_GETOP: 1797 case MISDN_CTRL_GETOP:
1821 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT | 1798 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT |
1822 MISDN_CTRL_DISCONNECT; 1799 MISDN_CTRL_DISCONNECT | MISDN_CTRL_L1_TIMER3;
1823 break; 1800 break;
1824 case MISDN_CTRL_LOOP: 1801 case MISDN_CTRL_LOOP:
1825 /* channel 0 disabled loop */ 1802 /* channel 0 disabled loop */
@@ -1896,6 +1873,9 @@ channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq)
1896 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn); 1873 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1897 hc->hw.trm &= 0x7f; /* disable IOM-loop */ 1874 hc->hw.trm &= 0x7f; /* disable IOM-loop */
1898 break; 1875 break;
1876 case MISDN_CTRL_L1_TIMER3:
1877 ret = l1_event(hc->dch.l1, HW_TIMER3_VALUE | (cq->p1 & 0xff));
1878 break;
1899 default: 1879 default:
1900 printk(KERN_WARNING "%s: unknown Op %x\n", 1880 printk(KERN_WARNING "%s: unknown Op %x\n",
1901 __func__, cq->op); 1881 __func__, cq->op);
@@ -1969,7 +1949,6 @@ open_bchannel(struct hfc_pci *hc, struct channel_req *rq)
1969 bch = &hc->bch[rq->adr.channel - 1]; 1949 bch = &hc->bch[rq->adr.channel - 1];
1970 if (test_and_set_bit(FLG_OPEN, &bch->Flags)) 1950 if (test_and_set_bit(FLG_OPEN, &bch->Flags))
1971 return -EBUSY; /* b-channel can be only open once */ 1951 return -EBUSY; /* b-channel can be only open once */
1972 test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
1973 bch->ch.protocol = rq->protocol; 1952 bch->ch.protocol = rq->protocol;
1974 rq->ch = &bch->ch; /* TODO: E-channel */ 1953 rq->ch = &bch->ch; /* TODO: E-channel */
1975 if (!try_module_get(THIS_MODULE)) 1954 if (!try_module_get(THIS_MODULE))
@@ -2121,7 +2100,7 @@ setup_card(struct hfc_pci *card)
2121 card->bch[i].nr = i + 1; 2100 card->bch[i].nr = i + 1;
2122 set_channelmap(i + 1, card->dch.dev.channelmap); 2101 set_channelmap(i + 1, card->dch.dev.channelmap);
2123 card->bch[i].debug = debug; 2102 card->bch[i].debug = debug;
2124 mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM); 2103 mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM, poll >> 1);
2125 card->bch[i].hw = card; 2104 card->bch[i].hw = card;
2126 card->bch[i].ch.send = hfcpci_l2l1B; 2105 card->bch[i].ch.send = hfcpci_l2l1B;
2127 card->bch[i].ch.ctrl = hfc_bctrl; 2106 card->bch[i].ch.ctrl = hfc_bctrl;
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 8cde2a0538ab..83206e453d4e 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -226,19 +226,12 @@ hfcusb_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
226 if (debug & DBG_HFC_CALL_TRACE) 226 if (debug & DBG_HFC_CALL_TRACE)
227 printk(KERN_DEBUG "%s: %s PH_DATA_REQ ret(%i)\n", 227 printk(KERN_DEBUG "%s: %s PH_DATA_REQ ret(%i)\n",
228 hw->name, __func__, ret); 228 hw->name, __func__, ret);
229 if (ret > 0) { 229 if (ret > 0)
230 /*
231 * other l1 drivers don't send early confirms on
232 * transp data, but hfcsusb does because tx_next
233 * skb is needed in tx_iso_complete()
234 */
235 queue_ch_frame(ch, PH_DATA_CNF, hh->id, NULL);
236 ret = 0; 230 ret = 0;
237 }
238 return ret; 231 return ret;
239 case PH_ACTIVATE_REQ: 232 case PH_ACTIVATE_REQ:
240 if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags)) { 233 if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags)) {
241 hfcsusb_start_endpoint(hw, bch->nr); 234 hfcsusb_start_endpoint(hw, bch->nr - 1);
242 ret = hfcsusb_setup_bch(bch, ch->protocol); 235 ret = hfcsusb_setup_bch(bch, ch->protocol);
243 } else 236 } else
244 ret = 0; 237 ret = 0;
@@ -498,16 +491,9 @@ open_bchannel(struct hfcsusb *hw, struct channel_req *rq)
498 bch = &hw->bch[rq->adr.channel - 1]; 491 bch = &hw->bch[rq->adr.channel - 1];
499 if (test_and_set_bit(FLG_OPEN, &bch->Flags)) 492 if (test_and_set_bit(FLG_OPEN, &bch->Flags))
500 return -EBUSY; /* b-channel can be only open once */ 493 return -EBUSY; /* b-channel can be only open once */
501 test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
502 bch->ch.protocol = rq->protocol; 494 bch->ch.protocol = rq->protocol;
503 rq->ch = &bch->ch; 495 rq->ch = &bch->ch;
504 496
505 /* start USB endpoint for bchannel */
506 if (rq->adr.channel == 1)
507 hfcsusb_start_endpoint(hw, HFC_CHAN_B1);
508 else
509 hfcsusb_start_endpoint(hw, HFC_CHAN_B2);
510
511 if (!try_module_get(THIS_MODULE)) 497 if (!try_module_get(THIS_MODULE))
512 printk(KERN_WARNING "%s: %s:cannot get module\n", 498 printk(KERN_WARNING "%s: %s:cannot get module\n",
513 hw->name, __func__); 499 hw->name, __func__);
@@ -819,24 +805,7 @@ hfcsusb_ph_command(struct hfcsusb *hw, u_char command)
819static int 805static int
820channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) 806channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
821{ 807{
822 int ret = 0; 808 return mISDN_ctrl_bchannel(bch, cq);
823
824 switch (cq->op) {
825 case MISDN_CTRL_GETOP:
826 cq->op = MISDN_CTRL_FILL_EMPTY;
827 break;
828 case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */
829 test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
830 if (debug & DEBUG_HW_OPEN)
831 printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d "
832 "off=%d)\n", __func__, bch->nr, !!cq->p1);
833 break;
834 default:
835 printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op);
836 ret = -EINVAL;
837 break;
838 }
839 return ret;
840} 809}
841 810
842/* collect data from incoming interrupt or isochron USB data */ 811/* collect data from incoming interrupt or isochron USB data */
@@ -873,7 +842,21 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
873 hdlc = 1; 842 hdlc = 1;
874 } 843 }
875 if (fifo->bch) { 844 if (fifo->bch) {
845 if (test_bit(FLG_RX_OFF, &fifo->bch->Flags)) {
846 fifo->bch->dropcnt += len;
847 spin_unlock(&hw->lock);
848 return;
849 }
850 maxlen = bchannel_get_rxbuf(fifo->bch, len);
876 rx_skb = fifo->bch->rx_skb; 851 rx_skb = fifo->bch->rx_skb;
852 if (maxlen < 0) {
853 if (rx_skb)
854 skb_trim(rx_skb, 0);
855 pr_warning("%s.B%d: No bufferspace for %d bytes\n",
856 hw->name, fifo->bch->nr, len);
857 spin_unlock(&hw->lock);
858 return;
859 }
877 maxlen = fifo->bch->maxlen; 860 maxlen = fifo->bch->maxlen;
878 hdlc = test_bit(FLG_HDLC, &fifo->bch->Flags); 861 hdlc = test_bit(FLG_HDLC, &fifo->bch->Flags);
879 } 862 }
@@ -883,25 +866,22 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
883 hdlc = 1; 866 hdlc = 1;
884 } 867 }
885 868
886 if (!rx_skb) {
887 rx_skb = mI_alloc_skb(maxlen, GFP_ATOMIC);
888 if (rx_skb) {
889 if (fifo->dch)
890 fifo->dch->rx_skb = rx_skb;
891 if (fifo->bch)
892 fifo->bch->rx_skb = rx_skb;
893 if (fifo->ech)
894 fifo->ech->rx_skb = rx_skb;
895 skb_trim(rx_skb, 0);
896 } else {
897 printk(KERN_DEBUG "%s: %s: No mem for rx_skb\n",
898 hw->name, __func__);
899 spin_unlock(&hw->lock);
900 return;
901 }
902 }
903
904 if (fifo->dch || fifo->ech) { 869 if (fifo->dch || fifo->ech) {
870 if (!rx_skb) {
871 rx_skb = mI_alloc_skb(maxlen, GFP_ATOMIC);
872 if (rx_skb) {
873 if (fifo->dch)
874 fifo->dch->rx_skb = rx_skb;
875 if (fifo->ech)
876 fifo->ech->rx_skb = rx_skb;
877 skb_trim(rx_skb, 0);
878 } else {
879 printk(KERN_DEBUG "%s: %s: No mem for rx_skb\n",
880 hw->name, __func__);
881 spin_unlock(&hw->lock);
882 return;
883 }
884 }
905 /* D/E-Channel SKB range check */ 885 /* D/E-Channel SKB range check */
906 if ((rx_skb->len + len) >= MAX_DFRAME_LEN_L1) { 886 if ((rx_skb->len + len) >= MAX_DFRAME_LEN_L1) {
907 printk(KERN_DEBUG "%s: %s: sbk mem exceeded " 887 printk(KERN_DEBUG "%s: %s: sbk mem exceeded "
@@ -911,16 +891,6 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
911 spin_unlock(&hw->lock); 891 spin_unlock(&hw->lock);
912 return; 892 return;
913 } 893 }
914 } else if (fifo->bch) {
915 /* B-Channel SKB range check */
916 if ((rx_skb->len + len) >= (MAX_BCH_SIZE + 3)) {
917 printk(KERN_DEBUG "%s: %s: sbk mem exceeded "
918 "for fifo(%d) HFCUSB_B_RX\n",
919 hw->name, __func__, fifon);
920 skb_trim(rx_skb, 0);
921 spin_unlock(&hw->lock);
922 return;
923 }
924 } 894 }
925 895
926 memcpy(skb_put(rx_skb, len), data, len); 896 memcpy(skb_put(rx_skb, len), data, len);
@@ -948,7 +918,8 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
948 if (fifo->dch) 918 if (fifo->dch)
949 recv_Dchannel(fifo->dch); 919 recv_Dchannel(fifo->dch);
950 if (fifo->bch) 920 if (fifo->bch)
951 recv_Bchannel(fifo->bch, MISDN_ID_ANY); 921 recv_Bchannel(fifo->bch, MISDN_ID_ANY,
922 0);
952 if (fifo->ech) 923 if (fifo->ech)
953 recv_Echannel(fifo->ech, 924 recv_Echannel(fifo->ech,
954 &hw->dch); 925 &hw->dch);
@@ -969,8 +940,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
969 } 940 }
970 } else { 941 } else {
971 /* deliver transparent data to layer2 */ 942 /* deliver transparent data to layer2 */
972 if (rx_skb->len >= poll) 943 recv_Bchannel(fifo->bch, MISDN_ID_ANY, false);
973 recv_Bchannel(fifo->bch, MISDN_ID_ANY);
974 } 944 }
975 spin_unlock(&hw->lock); 945 spin_unlock(&hw->lock);
976} 946}
@@ -1200,8 +1170,8 @@ tx_iso_complete(struct urb *urb)
1200 int k, tx_offset, num_isoc_packets, sink, remain, current_len, 1170 int k, tx_offset, num_isoc_packets, sink, remain, current_len,
1201 errcode, hdlc, i; 1171 errcode, hdlc, i;
1202 int *tx_idx; 1172 int *tx_idx;
1203 int frame_complete, fifon, status; 1173 int frame_complete, fifon, status, fillempty = 0;
1204 __u8 threshbit; 1174 __u8 threshbit, *p;
1205 1175
1206 spin_lock(&hw->lock); 1176 spin_lock(&hw->lock);
1207 if (fifo->stop_gracefull) { 1177 if (fifo->stop_gracefull) {
@@ -1219,6 +1189,9 @@ tx_iso_complete(struct urb *urb)
1219 tx_skb = fifo->bch->tx_skb; 1189 tx_skb = fifo->bch->tx_skb;
1220 tx_idx = &fifo->bch->tx_idx; 1190 tx_idx = &fifo->bch->tx_idx;
1221 hdlc = test_bit(FLG_HDLC, &fifo->bch->Flags); 1191 hdlc = test_bit(FLG_HDLC, &fifo->bch->Flags);
1192 if (!tx_skb && !hdlc &&
1193 test_bit(FLG_FILLEMPTY, &fifo->bch->Flags))
1194 fillempty = 1;
1222 } else { 1195 } else {
1223 printk(KERN_DEBUG "%s: %s: neither BCH nor DCH\n", 1196 printk(KERN_DEBUG "%s: %s: neither BCH nor DCH\n",
1224 hw->name, __func__); 1197 hw->name, __func__);
@@ -1277,6 +1250,8 @@ tx_iso_complete(struct urb *urb)
1277 /* Generate next ISO Packets */ 1250 /* Generate next ISO Packets */
1278 if (tx_skb) 1251 if (tx_skb)
1279 remain = tx_skb->len - *tx_idx; 1252 remain = tx_skb->len - *tx_idx;
1253 else if (fillempty)
1254 remain = 15; /* > not complete */
1280 else 1255 else
1281 remain = 0; 1256 remain = 0;
1282 1257
@@ -1307,15 +1282,20 @@ tx_iso_complete(struct urb *urb)
1307 } 1282 }
1308 1283
1309 /* copy tx data to iso-urb buffer */ 1284 /* copy tx data to iso-urb buffer */
1310 memcpy(context_iso_urb->buffer + tx_offset + 1, 1285 p = context_iso_urb->buffer + tx_offset + 1;
1311 (tx_skb->data + *tx_idx), current_len); 1286 if (fillempty) {
1312 *tx_idx += current_len; 1287 memset(p, fifo->bch->fill[0],
1313 1288 current_len);
1289 } else {
1290 memcpy(p, (tx_skb->data + *tx_idx),
1291 current_len);
1292 *tx_idx += current_len;
1293 }
1314 urb->iso_frame_desc[k].offset = tx_offset; 1294 urb->iso_frame_desc[k].offset = tx_offset;
1315 urb->iso_frame_desc[k].length = current_len + 1; 1295 urb->iso_frame_desc[k].length = current_len + 1;
1316 1296
1317 /* USB data log for every D ISO out */ 1297 /* USB data log for every D ISO out */
1318 if ((fifon == HFCUSB_D_RX) && 1298 if ((fifon == HFCUSB_D_RX) && !fillempty &&
1319 (debug & DBG_HFC_USB_VERBOSE)) { 1299 (debug & DBG_HFC_USB_VERBOSE)) {
1320 printk(KERN_DEBUG 1300 printk(KERN_DEBUG
1321 "%s: %s (%d/%d) offs(%d) len(%d) ", 1301 "%s: %s (%d/%d) offs(%d) len(%d) ",
@@ -1365,12 +1345,8 @@ tx_iso_complete(struct urb *urb)
1365 if (fifo->dch && get_next_dframe(fifo->dch)) 1345 if (fifo->dch && get_next_dframe(fifo->dch))
1366 tx_skb = fifo->dch->tx_skb; 1346 tx_skb = fifo->dch->tx_skb;
1367 else if (fifo->bch && 1347 else if (fifo->bch &&
1368 get_next_bframe(fifo->bch)) { 1348 get_next_bframe(fifo->bch))
1369 if (test_bit(FLG_TRANSPARENT,
1370 &fifo->bch->Flags))
1371 confirm_Bsend(fifo->bch);
1372 tx_skb = fifo->bch->tx_skb; 1349 tx_skb = fifo->bch->tx_skb;
1373 }
1374 } 1350 }
1375 } 1351 }
1376 errcode = usb_submit_urb(urb, GFP_ATOMIC); 1352 errcode = usb_submit_urb(urb, GFP_ATOMIC);
@@ -1812,7 +1788,7 @@ deactivate_bchannel(struct bchannel *bch)
1812 mISDN_clear_bchannel(bch); 1788 mISDN_clear_bchannel(bch);
1813 spin_unlock_irqrestore(&hw->lock, flags); 1789 spin_unlock_irqrestore(&hw->lock, flags);
1814 hfcsusb_setup_bch(bch, ISDN_P_NONE); 1790 hfcsusb_setup_bch(bch, ISDN_P_NONE);
1815 hfcsusb_stop_endpoint(hw, bch->nr); 1791 hfcsusb_stop_endpoint(hw, bch->nr - 1);
1816} 1792}
1817 1793
1818/* 1794/*
@@ -1836,8 +1812,7 @@ hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1836 1812
1837 case CLOSE_CHANNEL: 1813 case CLOSE_CHANNEL:
1838 test_and_clear_bit(FLG_OPEN, &bch->Flags); 1814 test_and_clear_bit(FLG_OPEN, &bch->Flags);
1839 if (test_bit(FLG_ACTIVE, &bch->Flags)) 1815 deactivate_bchannel(bch);
1840 deactivate_bchannel(bch);
1841 ch->protocol = ISDN_P_NONE; 1816 ch->protocol = ISDN_P_NONE;
1842 ch->peer = NULL; 1817 ch->peer = NULL;
1843 module_put(THIS_MODULE); 1818 module_put(THIS_MODULE);
@@ -1883,7 +1858,7 @@ setup_instance(struct hfcsusb *hw, struct device *parent)
1883 hw->bch[i].nr = i + 1; 1858 hw->bch[i].nr = i + 1;
1884 set_channelmap(i + 1, hw->dch.dev.channelmap); 1859 set_channelmap(i + 1, hw->dch.dev.channelmap);
1885 hw->bch[i].debug = debug; 1860 hw->bch[i].debug = debug;
1886 mISDN_initbchannel(&hw->bch[i], MAX_DATA_MEM); 1861 mISDN_initbchannel(&hw->bch[i], MAX_DATA_MEM, poll >> 1);
1887 hw->bch[i].hw = hw; 1862 hw->bch[i].hw = hw;
1888 hw->bch[i].ch.send = hfcusb_l2l1B; 1863 hw->bch[i].ch.send = hfcusb_l2l1B;
1889 hw->bch[i].ch.ctrl = hfc_bctrl; 1864 hw->bch[i].ch.ctrl = hfc_bctrl;
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index 884369f09cad..752e0825591f 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -603,10 +603,11 @@ isac_l1hw(struct mISDNchannel *ch, struct sk_buff *skb)
603} 603}
604 604
605static int 605static int
606isac_ctrl(struct isac_hw *isac, u32 cmd, u_long para) 606isac_ctrl(struct isac_hw *isac, u32 cmd, unsigned long para)
607{ 607{
608 u8 tl = 0; 608 u8 tl = 0;
609 u_long flags; 609 unsigned long flags;
610 int ret = 0;
610 611
611 switch (cmd) { 612 switch (cmd) {
612 case HW_TESTLOOP: 613 case HW_TESTLOOP:
@@ -626,12 +627,15 @@ isac_ctrl(struct isac_hw *isac, u32 cmd, u_long para)
626 } 627 }
627 spin_unlock_irqrestore(isac->hwlock, flags); 628 spin_unlock_irqrestore(isac->hwlock, flags);
628 break; 629 break;
630 case HW_TIMER3_VALUE:
631 ret = l1_event(isac->dch.l1, HW_TIMER3_VALUE | (para & 0xff));
632 break;
629 default: 633 default:
630 pr_debug("%s: %s unknown command %x %lx\n", isac->name, 634 pr_debug("%s: %s unknown command %x %lx\n", isac->name,
631 __func__, cmd, para); 635 __func__, cmd, para);
632 return -1; 636 ret = -1;
633 } 637 }
634 return 0; 638 return ret;
635} 639}
636 640
637static int 641static int
@@ -929,22 +933,21 @@ static void
929hscx_empty_fifo(struct hscx_hw *hscx, u8 count) 933hscx_empty_fifo(struct hscx_hw *hscx, u8 count)
930{ 934{
931 u8 *p; 935 u8 *p;
936 int maxlen;
932 937
933 pr_debug("%s: B%1d %d\n", hscx->ip->name, hscx->bch.nr, count); 938 pr_debug("%s: B%1d %d\n", hscx->ip->name, hscx->bch.nr, count);
934 if (!hscx->bch.rx_skb) { 939 if (test_bit(FLG_RX_OFF, &hscx->bch.Flags)) {
935 hscx->bch.rx_skb = mI_alloc_skb(hscx->bch.maxlen, GFP_ATOMIC); 940 hscx->bch.dropcnt += count;
936 if (!hscx->bch.rx_skb) { 941 hscx_cmdr(hscx, 0x80); /* RMC */
937 pr_info("%s: B receive out of memory\n", 942 return;
938 hscx->ip->name);
939 hscx_cmdr(hscx, 0x80); /* RMC */
940 return;
941 }
942 } 943 }
943 if ((hscx->bch.rx_skb->len + count) > hscx->bch.maxlen) { 944 maxlen = bchannel_get_rxbuf(&hscx->bch, count);
944 pr_debug("%s: overrun %d\n", hscx->ip->name, 945 if (maxlen < 0) {
945 hscx->bch.rx_skb->len + count);
946 skb_trim(hscx->bch.rx_skb, 0);
947 hscx_cmdr(hscx, 0x80); /* RMC */ 946 hscx_cmdr(hscx, 0x80); /* RMC */
947 if (hscx->bch.rx_skb)
948 skb_trim(hscx->bch.rx_skb, 0);
949 pr_warning("%s.B%d: No bufferspace for %d bytes\n",
950 hscx->ip->name, hscx->bch.nr, count);
948 return; 951 return;
949 } 952 }
950 p = skb_put(hscx->bch.rx_skb, count); 953 p = skb_put(hscx->bch.rx_skb, count);
@@ -971,22 +974,28 @@ hscx_fill_fifo(struct hscx_hw *hscx)
971 int count, more; 974 int count, more;
972 u8 *p; 975 u8 *p;
973 976
974 if (!hscx->bch.tx_skb) 977 if (!hscx->bch.tx_skb) {
975 return; 978 if (!test_bit(FLG_TX_EMPTY, &hscx->bch.Flags))
976 count = hscx->bch.tx_skb->len - hscx->bch.tx_idx; 979 return;
977 if (count <= 0)
978 return;
979 p = hscx->bch.tx_skb->data + hscx->bch.tx_idx;
980
981 more = test_bit(FLG_TRANSPARENT, &hscx->bch.Flags) ? 1 : 0;
982 if (count > hscx->fifo_size) {
983 count = hscx->fifo_size; 980 count = hscx->fifo_size;
984 more = 1; 981 more = 1;
985 } 982 p = hscx->log;
986 pr_debug("%s: B%1d %d/%d/%d\n", hscx->ip->name, hscx->bch.nr, count, 983 memset(p, hscx->bch.fill[0], count);
987 hscx->bch.tx_idx, hscx->bch.tx_skb->len); 984 } else {
988 hscx->bch.tx_idx += count; 985 count = hscx->bch.tx_skb->len - hscx->bch.tx_idx;
986 if (count <= 0)
987 return;
988 p = hscx->bch.tx_skb->data + hscx->bch.tx_idx;
989 989
990 more = test_bit(FLG_TRANSPARENT, &hscx->bch.Flags) ? 1 : 0;
991 if (count > hscx->fifo_size) {
992 count = hscx->fifo_size;
993 more = 1;
994 }
995 pr_debug("%s: B%1d %d/%d/%d\n", hscx->ip->name, hscx->bch.nr,
996 count, hscx->bch.tx_idx, hscx->bch.tx_skb->len);
997 hscx->bch.tx_idx += count;
998 }
990 if (hscx->ip->type & IPAC_TYPE_IPACX) 999 if (hscx->ip->type & IPAC_TYPE_IPACX)
991 hscx->ip->write_fifo(hscx->ip->hw, 1000 hscx->ip->write_fifo(hscx->ip->hw,
992 hscx->off + IPACX_XFIFOB, p, count); 1001 hscx->off + IPACX_XFIFOB, p, count);
@@ -997,7 +1006,7 @@ hscx_fill_fifo(struct hscx_hw *hscx)
997 } 1006 }
998 hscx_cmdr(hscx, more ? 0x08 : 0x0a); 1007 hscx_cmdr(hscx, more ? 0x08 : 0x0a);
999 1008
1000 if (hscx->bch.debug & DEBUG_HW_BFIFO) { 1009 if (hscx->bch.tx_skb && (hscx->bch.debug & DEBUG_HW_BFIFO)) {
1001 snprintf(hscx->log, 64, "B%1d-send %s %d ", 1010 snprintf(hscx->log, 64, "B%1d-send %s %d ",
1002 hscx->bch.nr, hscx->ip->name, count); 1011 hscx->bch.nr, hscx->ip->name, count);
1003 print_hex_dump_bytes(hscx->log, DUMP_PREFIX_OFFSET, p, count); 1012 print_hex_dump_bytes(hscx->log, DUMP_PREFIX_OFFSET, p, count);
@@ -1007,17 +1016,17 @@ hscx_fill_fifo(struct hscx_hw *hscx)
1007static void 1016static void
1008hscx_xpr(struct hscx_hw *hx) 1017hscx_xpr(struct hscx_hw *hx)
1009{ 1018{
1010 if (hx->bch.tx_skb && hx->bch.tx_idx < hx->bch.tx_skb->len) 1019 if (hx->bch.tx_skb && hx->bch.tx_idx < hx->bch.tx_skb->len) {
1011 hscx_fill_fifo(hx); 1020 hscx_fill_fifo(hx);
1012 else { 1021 } else {
1013 if (hx->bch.tx_skb) { 1022 if (hx->bch.tx_skb)
1014 /* send confirm, on trans, free on hdlc. */
1015 if (test_bit(FLG_TRANSPARENT, &hx->bch.Flags))
1016 confirm_Bsend(&hx->bch);
1017 dev_kfree_skb(hx->bch.tx_skb); 1023 dev_kfree_skb(hx->bch.tx_skb);
1018 } 1024 if (get_next_bframe(&hx->bch)) {
1019 if (get_next_bframe(&hx->bch)) 1025 hscx_fill_fifo(hx);
1026 test_and_clear_bit(FLG_TX_EMPTY, &hx->bch.Flags);
1027 } else if (test_bit(FLG_TX_EMPTY, &hx->bch.Flags)) {
1020 hscx_fill_fifo(hx); 1028 hscx_fill_fifo(hx);
1029 }
1021 } 1030 }
1022} 1031}
1023 1032
@@ -1069,7 +1078,7 @@ ipac_rme(struct hscx_hw *hx)
1069 skb_trim(hx->bch.rx_skb, 0); 1078 skb_trim(hx->bch.rx_skb, 0);
1070 } else { 1079 } else {
1071 skb_trim(hx->bch.rx_skb, hx->bch.rx_skb->len - 1); 1080 skb_trim(hx->bch.rx_skb, hx->bch.rx_skb->len - 1);
1072 recv_Bchannel(&hx->bch, 0); 1081 recv_Bchannel(&hx->bch, 0, false);
1073 } 1082 }
1074} 1083}
1075 1084
@@ -1120,11 +1129,8 @@ ipac_irq(struct hscx_hw *hx, u8 ista)
1120 1129
1121 if (istab & IPACX_B_RPF) { 1130 if (istab & IPACX_B_RPF) {
1122 hscx_empty_fifo(hx, hx->fifo_size); 1131 hscx_empty_fifo(hx, hx->fifo_size);
1123 if (test_bit(FLG_TRANSPARENT, &hx->bch.Flags)) { 1132 if (test_bit(FLG_TRANSPARENT, &hx->bch.Flags))
1124 /* receive transparent audio data */ 1133 recv_Bchannel(&hx->bch, 0, false);
1125 if (hx->bch.rx_skb)
1126 recv_Bchannel(&hx->bch, 0);
1127 }
1128 } 1134 }
1129 1135
1130 if (istab & IPACX_B_RFO) { 1136 if (istab & IPACX_B_RFO) {
@@ -1137,7 +1143,9 @@ ipac_irq(struct hscx_hw *hx, u8 ista)
1137 1143
1138 if (istab & IPACX_B_XDU) { 1144 if (istab & IPACX_B_XDU) {
1139 if (test_bit(FLG_TRANSPARENT, &hx->bch.Flags)) { 1145 if (test_bit(FLG_TRANSPARENT, &hx->bch.Flags)) {
1140 hscx_fill_fifo(hx); 1146 if (test_bit(FLG_FILLEMPTY, &hx->bch.Flags))
1147 test_and_set_bit(FLG_TX_EMPTY, &hx->bch.Flags);
1148 hscx_xpr(hx);
1141 return; 1149 return;
1142 } 1150 }
1143 pr_debug("%s: B%1d XDU error at len %d\n", hx->ip->name, 1151 pr_debug("%s: B%1d XDU error at len %d\n", hx->ip->name,
@@ -1338,22 +1346,17 @@ hscx_l2l1(struct mISDNchannel *ch, struct sk_buff *skb)
1338 struct hscx_hw *hx = container_of(bch, struct hscx_hw, bch); 1346 struct hscx_hw *hx = container_of(bch, struct hscx_hw, bch);
1339 int ret = -EINVAL; 1347 int ret = -EINVAL;
1340 struct mISDNhead *hh = mISDN_HEAD_P(skb); 1348 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1341 u32 id; 1349 unsigned long flags;
1342 u_long flags;
1343 1350
1344 switch (hh->prim) { 1351 switch (hh->prim) {
1345 case PH_DATA_REQ: 1352 case PH_DATA_REQ:
1346 spin_lock_irqsave(hx->ip->hwlock, flags); 1353 spin_lock_irqsave(hx->ip->hwlock, flags);
1347 ret = bchannel_senddata(bch, skb); 1354 ret = bchannel_senddata(bch, skb);
1348 if (ret > 0) { /* direct TX */ 1355 if (ret > 0) { /* direct TX */
1349 id = hh->id; /* skb can be freed */
1350 ret = 0; 1356 ret = 0;
1351 hscx_fill_fifo(hx); 1357 hscx_fill_fifo(hx);
1352 spin_unlock_irqrestore(hx->ip->hwlock, flags); 1358 }
1353 if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) 1359 spin_unlock_irqrestore(hx->ip->hwlock, flags);
1354 queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
1355 } else
1356 spin_unlock_irqrestore(hx->ip->hwlock, flags);
1357 return ret; 1360 return ret;
1358 case PH_ACTIVATE_REQ: 1361 case PH_ACTIVATE_REQ:
1359 spin_lock_irqsave(hx->ip->hwlock, flags); 1362 spin_lock_irqsave(hx->ip->hwlock, flags);
@@ -1388,20 +1391,7 @@ hscx_l2l1(struct mISDNchannel *ch, struct sk_buff *skb)
1388static int 1391static int
1389channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) 1392channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
1390{ 1393{
1391 int ret = 0; 1394 return mISDN_ctrl_bchannel(bch, cq);
1392
1393 switch (cq->op) {
1394 case MISDN_CTRL_GETOP:
1395 cq->op = 0;
1396 break;
1397 /* Nothing implemented yet */
1398 case MISDN_CTRL_FILL_EMPTY:
1399 default:
1400 pr_info("%s: unknown Op %x\n", __func__, cq->op);
1401 ret = -EINVAL;
1402 break;
1403 }
1404 return ret;
1405} 1395}
1406 1396
1407static int 1397static int
@@ -1416,15 +1406,10 @@ hscx_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
1416 switch (cmd) { 1406 switch (cmd) {
1417 case CLOSE_CHANNEL: 1407 case CLOSE_CHANNEL:
1418 test_and_clear_bit(FLG_OPEN, &bch->Flags); 1408 test_and_clear_bit(FLG_OPEN, &bch->Flags);
1419 if (test_bit(FLG_ACTIVE, &bch->Flags)) { 1409 spin_lock_irqsave(hx->ip->hwlock, flags);
1420 spin_lock_irqsave(hx->ip->hwlock, flags); 1410 mISDN_freebchannel(bch);
1421 mISDN_freebchannel(bch); 1411 hscx_mode(hx, ISDN_P_NONE);
1422 hscx_mode(hx, ISDN_P_NONE); 1412 spin_unlock_irqrestore(hx->ip->hwlock, flags);
1423 spin_unlock_irqrestore(hx->ip->hwlock, flags);
1424 } else {
1425 skb_queue_purge(&bch->rqueue);
1426 bch->rcount = 0;
1427 }
1428 ch->protocol = ISDN_P_NONE; 1413 ch->protocol = ISDN_P_NONE;
1429 ch->peer = NULL; 1414 ch->peer = NULL;
1430 module_put(hx->ip->owner); 1415 module_put(hx->ip->owner);
@@ -1526,7 +1511,7 @@ channel_ctrl(struct ipac_hw *ipac, struct mISDN_ctrl_req *cq)
1526 1511
1527 switch (cq->op) { 1512 switch (cq->op) {
1528 case MISDN_CTRL_GETOP: 1513 case MISDN_CTRL_GETOP:
1529 cq->op = MISDN_CTRL_LOOP; 1514 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
1530 break; 1515 break;
1531 case MISDN_CTRL_LOOP: 1516 case MISDN_CTRL_LOOP:
1532 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */ 1517 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
@@ -1536,6 +1521,9 @@ channel_ctrl(struct ipac_hw *ipac, struct mISDN_ctrl_req *cq)
1536 } 1521 }
1537 ret = ipac->ctrl(ipac, HW_TESTLOOP, cq->channel); 1522 ret = ipac->ctrl(ipac, HW_TESTLOOP, cq->channel);
1538 break; 1523 break;
1524 case MISDN_CTRL_L1_TIMER3:
1525 ret = ipac->isac.ctrl(&ipac->isac, HW_TIMER3_VALUE, cq->p1);
1526 break;
1539 default: 1527 default:
1540 pr_info("%s: unknown CTRL OP %x\n", ipac->name, cq->op); 1528 pr_info("%s: unknown CTRL OP %x\n", ipac->name, cq->op);
1541 ret = -EINVAL; 1529 ret = -EINVAL;
@@ -1621,7 +1609,8 @@ mISDNipac_init(struct ipac_hw *ipac, void *hw)
1621 set_channelmap(i + 1, ipac->isac.dch.dev.channelmap); 1609 set_channelmap(i + 1, ipac->isac.dch.dev.channelmap);
1622 list_add(&ipac->hscx[i].bch.ch.list, 1610 list_add(&ipac->hscx[i].bch.ch.list,
1623 &ipac->isac.dch.dev.bchannels); 1611 &ipac->isac.dch.dev.bchannels);
1624 mISDN_initbchannel(&ipac->hscx[i].bch, MAX_DATA_MEM); 1612 mISDN_initbchannel(&ipac->hscx[i].bch, MAX_DATA_MEM,
1613 ipac->hscx[i].fifo_size);
1625 ipac->hscx[i].bch.ch.nr = i + 1; 1614 ipac->hscx[i].bch.ch.nr = i + 1;
1626 ipac->hscx[i].bch.ch.send = &hscx_l2l1; 1615 ipac->hscx[i].bch.ch.send = &hscx_l2l1;
1627 ipac->hscx[i].bch.ch.ctrl = hscx_bctrl; 1616 ipac->hscx[i].bch.ch.ctrl = hscx_bctrl;
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 9a6da6edcfa8..be5973ded6d6 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -421,13 +421,19 @@ deliver_status(struct isar_ch *ch, int status)
421static inline void 421static inline void
422isar_rcv_frame(struct isar_ch *ch) 422isar_rcv_frame(struct isar_ch *ch)
423{ 423{
424 u8 *ptr; 424 u8 *ptr;
425 int maxlen;
425 426
426 if (!ch->is->clsb) { 427 if (!ch->is->clsb) {
427 pr_debug("%s; ISAR zero len frame\n", ch->is->name); 428 pr_debug("%s; ISAR zero len frame\n", ch->is->name);
428 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); 429 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
429 return; 430 return;
430 } 431 }
432 if (test_bit(FLG_RX_OFF, &ch->bch.Flags)) {
433 ch->bch.dropcnt += ch->is->clsb;
434 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
435 return;
436 }
431 switch (ch->bch.state) { 437 switch (ch->bch.state) {
432 case ISDN_P_NONE: 438 case ISDN_P_NONE:
433 pr_debug("%s: ISAR protocol 0 spurious IIS_RDATA %x/%x/%x\n", 439 pr_debug("%s: ISAR protocol 0 spurious IIS_RDATA %x/%x/%x\n",
@@ -437,36 +443,22 @@ isar_rcv_frame(struct isar_ch *ch)
437 case ISDN_P_B_RAW: 443 case ISDN_P_B_RAW:
438 case ISDN_P_B_L2DTMF: 444 case ISDN_P_B_L2DTMF:
439 case ISDN_P_B_MODEM_ASYNC: 445 case ISDN_P_B_MODEM_ASYNC:
440 if (!ch->bch.rx_skb) { 446 maxlen = bchannel_get_rxbuf(&ch->bch, ch->is->clsb);
441 ch->bch.rx_skb = mI_alloc_skb(ch->bch.maxlen, 447 if (maxlen < 0) {
442 GFP_ATOMIC); 448 pr_warning("%s.B%d: No bufferspace for %d bytes\n",
443 if (unlikely(!ch->bch.rx_skb)) { 449 ch->is->name, ch->bch.nr, ch->is->clsb);
444 pr_info("%s: B receive out of memory\n", 450 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
445 ch->is->name); 451 break;
446 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
447 break;
448 }
449 } 452 }
450 rcv_mbox(ch->is, skb_put(ch->bch.rx_skb, ch->is->clsb)); 453 rcv_mbox(ch->is, skb_put(ch->bch.rx_skb, ch->is->clsb));
451 recv_Bchannel(&ch->bch, 0); 454 recv_Bchannel(&ch->bch, 0, false);
452 break; 455 break;
453 case ISDN_P_B_HDLC: 456 case ISDN_P_B_HDLC:
454 if (!ch->bch.rx_skb) { 457 maxlen = bchannel_get_rxbuf(&ch->bch, ch->is->clsb);
455 ch->bch.rx_skb = mI_alloc_skb(ch->bch.maxlen, 458 if (maxlen < 0) {
456 GFP_ATOMIC); 459 pr_warning("%s.B%d: No bufferspace for %d bytes\n",
457 if (unlikely(!ch->bch.rx_skb)) { 460 ch->is->name, ch->bch.nr, ch->is->clsb);
458 pr_info("%s: B receive out of memory\n",
459 ch->is->name);
460 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
461 break;
462 }
463 }
464 if ((ch->bch.rx_skb->len + ch->is->clsb) >
465 (ch->bch.maxlen + 2)) {
466 pr_debug("%s: incoming packet too large\n",
467 ch->is->name);
468 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); 461 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
469 skb_trim(ch->bch.rx_skb, 0);
470 break; 462 break;
471 } 463 }
472 if (ch->is->cmsb & HDLC_ERROR) { 464 if (ch->is->cmsb & HDLC_ERROR) {
@@ -494,7 +486,7 @@ isar_rcv_frame(struct isar_ch *ch)
494 break; 486 break;
495 } 487 }
496 skb_trim(ch->bch.rx_skb, ch->bch.rx_skb->len - 2); 488 skb_trim(ch->bch.rx_skb, ch->bch.rx_skb->len - 2);
497 recv_Bchannel(&ch->bch, 0); 489 recv_Bchannel(&ch->bch, 0, false);
498 } 490 }
499 break; 491 break;
500 case ISDN_P_B_T30_FAX: 492 case ISDN_P_B_T30_FAX:
@@ -530,7 +522,7 @@ isar_rcv_frame(struct isar_ch *ch)
530 ch->state = STFAX_ESCAPE; 522 ch->state = STFAX_ESCAPE;
531 /* set_skb_flag(skb, DF_NOMOREDATA); */ 523 /* set_skb_flag(skb, DF_NOMOREDATA); */
532 } 524 }
533 recv_Bchannel(&ch->bch, 0); 525 recv_Bchannel(&ch->bch, 0, false);
534 if (ch->is->cmsb & SART_NMD) 526 if (ch->is->cmsb & SART_NMD)
535 deliver_status(ch, HW_MOD_NOCARR); 527 deliver_status(ch, HW_MOD_NOCARR);
536 break; 528 break;
@@ -570,7 +562,7 @@ isar_rcv_frame(struct isar_ch *ch)
570 break; 562 break;
571 } 563 }
572 skb_trim(ch->bch.rx_skb, ch->bch.rx_skb->len - 2); 564 skb_trim(ch->bch.rx_skb, ch->bch.rx_skb->len - 2);
573 recv_Bchannel(&ch->bch, 0); 565 recv_Bchannel(&ch->bch, 0, false);
574 } 566 }
575 if (ch->is->cmsb & SART_NMD) { /* ABORT */ 567 if (ch->is->cmsb & SART_NMD) { /* ABORT */
576 pr_debug("%s: isar_rcv_frame: no more data\n", 568 pr_debug("%s: isar_rcv_frame: no more data\n",
@@ -598,16 +590,25 @@ isar_fill_fifo(struct isar_ch *ch)
598 u8 msb; 590 u8 msb;
599 u8 *ptr; 591 u8 *ptr;
600 592
601 pr_debug("%s: ch%d tx_skb %p tx_idx %d\n", 593 pr_debug("%s: ch%d tx_skb %d tx_idx %d\n", ch->is->name, ch->bch.nr,
602 ch->is->name, ch->bch.nr, ch->bch.tx_skb, ch->bch.tx_idx); 594 ch->bch.tx_skb ? ch->bch.tx_skb->len : -1, ch->bch.tx_idx);
603 if (!ch->bch.tx_skb) 595 if (!(ch->is->bstat &
596 (ch->dpath == 1 ? BSTAT_RDM1 : BSTAT_RDM2)))
597 return;
598 if (!ch->bch.tx_skb) {
599 if (!test_bit(FLG_TX_EMPTY, &ch->bch.Flags) ||
600 (ch->bch.state != ISDN_P_B_RAW))
601 return;
602 count = ch->mml;
603 /* use the card buffer */
604 memset(ch->is->buf, ch->bch.fill[0], count);
605 send_mbox(ch->is, SET_DPS(ch->dpath) | ISAR_HIS_SDATA,
606 0, count, ch->is->buf);
604 return; 607 return;
608 }
605 count = ch->bch.tx_skb->len - ch->bch.tx_idx; 609 count = ch->bch.tx_skb->len - ch->bch.tx_idx;
606 if (count <= 0) 610 if (count <= 0)
607 return; 611 return;
608 if (!(ch->is->bstat &
609 (ch->dpath == 1 ? BSTAT_RDM1 : BSTAT_RDM2)))
610 return;
611 if (count > ch->mml) { 612 if (count > ch->mml) {
612 msb = 0; 613 msb = 0;
613 count = ch->mml; 614 count = ch->mml;
@@ -686,9 +687,9 @@ sel_bch_isar(struct isar_hw *isar, u8 dpath)
686static void 687static void
687send_next(struct isar_ch *ch) 688send_next(struct isar_ch *ch)
688{ 689{
689 pr_debug("%s: %s ch%d tx_skb %p tx_idx %d\n", 690 pr_debug("%s: %s ch%d tx_skb %d tx_idx %d\n", ch->is->name, __func__,
690 ch->is->name, __func__, ch->bch.nr, 691 ch->bch.nr, ch->bch.tx_skb ? ch->bch.tx_skb->len : -1,
691 ch->bch.tx_skb, ch->bch.tx_idx); 692 ch->bch.tx_idx);
692 if (ch->bch.state == ISDN_P_B_T30_FAX) { 693 if (ch->bch.state == ISDN_P_B_T30_FAX) {
693 if (ch->cmd == PCTRL_CMD_FTH) { 694 if (ch->cmd == PCTRL_CMD_FTH) {
694 if (test_bit(FLG_LASTDATA, &ch->bch.Flags)) { 695 if (test_bit(FLG_LASTDATA, &ch->bch.Flags)) {
@@ -702,15 +703,14 @@ send_next(struct isar_ch *ch)
702 } 703 }
703 } 704 }
704 } 705 }
705 if (ch->bch.tx_skb) { 706 if (ch->bch.tx_skb)
706 /* send confirm, on trans, free on hdlc. */
707 if (test_bit(FLG_TRANSPARENT, &ch->bch.Flags))
708 confirm_Bsend(&ch->bch);
709 dev_kfree_skb(ch->bch.tx_skb); 707 dev_kfree_skb(ch->bch.tx_skb);
710 } 708 if (get_next_bframe(&ch->bch)) {
711 if (get_next_bframe(&ch->bch))
712 isar_fill_fifo(ch); 709 isar_fill_fifo(ch);
713 else { 710 test_and_clear_bit(FLG_TX_EMPTY, &ch->bch.Flags);
711 } else if (test_bit(FLG_TX_EMPTY, &ch->bch.Flags)) {
712 isar_fill_fifo(ch);
713 } else {
714 if (test_and_clear_bit(FLG_DLEETX, &ch->bch.Flags)) { 714 if (test_and_clear_bit(FLG_DLEETX, &ch->bch.Flags)) {
715 if (test_and_clear_bit(FLG_LASTDATA, 715 if (test_and_clear_bit(FLG_LASTDATA,
716 &ch->bch.Flags)) { 716 &ch->bch.Flags)) {
@@ -724,6 +724,8 @@ send_next(struct isar_ch *ch)
724 } else { 724 } else {
725 deliver_status(ch, HW_MOD_CONNECT); 725 deliver_status(ch, HW_MOD_CONNECT);
726 } 726 }
727 } else if (test_bit(FLG_FILLEMPTY, &ch->bch.Flags)) {
728 test_and_set_bit(FLG_TX_EMPTY, &ch->bch.Flags);
727 } 729 }
728 } 730 }
729} 731}
@@ -1487,14 +1489,10 @@ isar_l2l1(struct mISDNchannel *ch, struct sk_buff *skb)
1487 spin_lock_irqsave(ich->is->hwlock, flags); 1489 spin_lock_irqsave(ich->is->hwlock, flags);
1488 ret = bchannel_senddata(bch, skb); 1490 ret = bchannel_senddata(bch, skb);
1489 if (ret > 0) { /* direct TX */ 1491 if (ret > 0) { /* direct TX */
1490 id = hh->id; /* skb can be freed */
1491 ret = 0; 1492 ret = 0;
1492 isar_fill_fifo(ich); 1493 isar_fill_fifo(ich);
1493 spin_unlock_irqrestore(ich->is->hwlock, flags); 1494 }
1494 if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) 1495 spin_unlock_irqrestore(ich->is->hwlock, flags);
1495 queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
1496 } else
1497 spin_unlock_irqrestore(ich->is->hwlock, flags);
1498 return ret; 1496 return ret;
1499 case PH_ACTIVATE_REQ: 1497 case PH_ACTIVATE_REQ:
1500 spin_lock_irqsave(ich->is->hwlock, flags); 1498 spin_lock_irqsave(ich->is->hwlock, flags);
@@ -1575,20 +1573,7 @@ isar_l2l1(struct mISDNchannel *ch, struct sk_buff *skb)
1575static int 1573static int
1576channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) 1574channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
1577{ 1575{
1578 int ret = 0; 1576 return mISDN_ctrl_bchannel(bch, cq);
1579
1580 switch (cq->op) {
1581 case MISDN_CTRL_GETOP:
1582 cq->op = 0;
1583 break;
1584 /* Nothing implemented yet */
1585 case MISDN_CTRL_FILL_EMPTY:
1586 default:
1587 pr_info("%s: unknown Op %x\n", __func__, cq->op);
1588 ret = -EINVAL;
1589 break;
1590 }
1591 return ret;
1592} 1577}
1593 1578
1594static int 1579static int
@@ -1603,15 +1588,10 @@ isar_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
1603 switch (cmd) { 1588 switch (cmd) {
1604 case CLOSE_CHANNEL: 1589 case CLOSE_CHANNEL:
1605 test_and_clear_bit(FLG_OPEN, &bch->Flags); 1590 test_and_clear_bit(FLG_OPEN, &bch->Flags);
1606 if (test_bit(FLG_ACTIVE, &bch->Flags)) { 1591 spin_lock_irqsave(ich->is->hwlock, flags);
1607 spin_lock_irqsave(ich->is->hwlock, flags); 1592 mISDN_freebchannel(bch);
1608 mISDN_freebchannel(bch); 1593 modeisar(ich, ISDN_P_NONE);
1609 modeisar(ich, ISDN_P_NONE); 1594 spin_unlock_irqrestore(ich->is->hwlock, flags);
1610 spin_unlock_irqrestore(ich->is->hwlock, flags);
1611 } else {
1612 skb_queue_purge(&bch->rqueue);
1613 bch->rcount = 0;
1614 }
1615 ch->protocol = ISDN_P_NONE; 1595 ch->protocol = ISDN_P_NONE;
1616 ch->peer = NULL; 1596 ch->peer = NULL;
1617 module_put(ich->is->owner); 1597 module_put(ich->is->owner);
@@ -1677,7 +1657,6 @@ isar_open(struct isar_hw *isar, struct channel_req *rq)
1677 bch = &isar->ch[rq->adr.channel - 1].bch; 1657 bch = &isar->ch[rq->adr.channel - 1].bch;
1678 if (test_and_set_bit(FLG_OPEN, &bch->Flags)) 1658 if (test_and_set_bit(FLG_OPEN, &bch->Flags))
1679 return -EBUSY; /* b-channel can be only open once */ 1659 return -EBUSY; /* b-channel can be only open once */
1680 test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
1681 bch->ch.protocol = rq->protocol; 1660 bch->ch.protocol = rq->protocol;
1682 rq->ch = &bch->ch; 1661 rq->ch = &bch->ch;
1683 return 0; 1662 return 0;
@@ -1691,7 +1670,7 @@ mISDNisar_init(struct isar_hw *isar, void *hw)
1691 isar->hw = hw; 1670 isar->hw = hw;
1692 for (i = 0; i < 2; i++) { 1671 for (i = 0; i < 2; i++) {
1693 isar->ch[i].bch.nr = i + 1; 1672 isar->ch[i].bch.nr = i + 1;
1694 mISDN_initbchannel(&isar->ch[i].bch, MAX_DATA_MEM); 1673 mISDN_initbchannel(&isar->ch[i].bch, MAX_DATA_MEM, 32);
1695 isar->ch[i].bch.ch.nr = i + 1; 1674 isar->ch[i].bch.ch.nr = i + 1;
1696 isar->ch[i].bch.ch.send = &isar_l2l1; 1675 isar->ch[i].bch.ch.send = &isar_l2l1;
1697 isar->ch[i].bch.ch.ctrl = isar_bctrl; 1676 isar->ch[i].bch.ch.ctrl = isar_bctrl;
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index c726e09d0981..c3e3e7686273 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -386,24 +386,20 @@ read_dma(struct tiger_ch *bc, u32 idx, int cnt)
386 bc->bch.nr, idx); 386 bc->bch.nr, idx);
387 } 387 }
388 bc->lastrx = idx; 388 bc->lastrx = idx;
389 if (!bc->bch.rx_skb) { 389 if (test_bit(FLG_RX_OFF, &bc->bch.Flags)) {
390 bc->bch.rx_skb = mI_alloc_skb(bc->bch.maxlen, GFP_ATOMIC); 390 bc->bch.dropcnt += cnt;
391 if (!bc->bch.rx_skb) { 391 return;
392 pr_info("%s: B%1d receive out of memory\n",
393 card->name, bc->bch.nr);
394 return;
395 }
396 } 392 }
397 393 stat = bchannel_get_rxbuf(&bc->bch, cnt);
398 if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags)) { 394 /* only transparent use the count here, HDLC overun is detected later */
399 if ((bc->bch.rx_skb->len + cnt) > bc->bch.maxlen) { 395 if (stat == ENOMEM) {
400 pr_debug("%s: B%1d overrun %d\n", card->name, 396 pr_warning("%s.B%d: No memory for %d bytes\n",
401 bc->bch.nr, bc->bch.rx_skb->len + cnt); 397 card->name, bc->bch.nr, cnt);
402 skb_trim(bc->bch.rx_skb, 0); 398 return;
403 return; 399 }
404 } 400 if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags))
405 p = skb_put(bc->bch.rx_skb, cnt); 401 p = skb_put(bc->bch.rx_skb, cnt);
406 } else 402 else
407 p = bc->hrbuf; 403 p = bc->hrbuf;
408 404
409 for (i = 0; i < cnt; i++) { 405 for (i = 0; i < cnt; i++) {
@@ -414,48 +410,45 @@ read_dma(struct tiger_ch *bc, u32 idx, int cnt)
414 idx = 0; 410 idx = 0;
415 p[i] = val & 0xff; 411 p[i] = val & 0xff;
416 } 412 }
413
414 if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags)) {
415 recv_Bchannel(&bc->bch, 0, false);
416 return;
417 }
418
417 pn = bc->hrbuf; 419 pn = bc->hrbuf;
418next_frame: 420 while (cnt > 0) {
419 if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
420 stat = isdnhdlc_decode(&bc->hrecv, pn, cnt, &i, 421 stat = isdnhdlc_decode(&bc->hrecv, pn, cnt, &i,
421 bc->bch.rx_skb->data, bc->bch.maxlen); 422 bc->bch.rx_skb->data, bc->bch.maxlen);
422 if (stat > 0) /* valid frame received */ 423 if (stat > 0) { /* valid frame received */
423 p = skb_put(bc->bch.rx_skb, stat); 424 p = skb_put(bc->bch.rx_skb, stat);
424 else if (stat == -HDLC_CRC_ERROR) 425 if (debug & DEBUG_HW_BFIFO) {
426 snprintf(card->log, LOG_SIZE,
427 "B%1d-recv %s %d ", bc->bch.nr,
428 card->name, stat);
429 print_hex_dump_bytes(card->log,
430 DUMP_PREFIX_OFFSET, p,
431 stat);
432 }
433 recv_Bchannel(&bc->bch, 0, false);
434 stat = bchannel_get_rxbuf(&bc->bch, bc->bch.maxlen);
435 if (stat < 0) {
436 pr_warning("%s.B%d: No memory for %d bytes\n",
437 card->name, bc->bch.nr, cnt);
438 return;
439 }
440 } else if (stat == -HDLC_CRC_ERROR) {
425 pr_info("%s: B%1d receive frame CRC error\n", 441 pr_info("%s: B%1d receive frame CRC error\n",
426 card->name, bc->bch.nr); 442 card->name, bc->bch.nr);
427 else if (stat == -HDLC_FRAMING_ERROR) 443 } else if (stat == -HDLC_FRAMING_ERROR) {
428 pr_info("%s: B%1d receive framing error\n", 444 pr_info("%s: B%1d receive framing error\n",
429 card->name, bc->bch.nr); 445 card->name, bc->bch.nr);
430 else if (stat == -HDLC_LENGTH_ERROR) 446 } else if (stat == -HDLC_LENGTH_ERROR) {
431 pr_info("%s: B%1d receive frame too long (> %d)\n", 447 pr_info("%s: B%1d receive frame too long (> %d)\n",
432 card->name, bc->bch.nr, bc->bch.maxlen); 448 card->name, bc->bch.nr, bc->bch.maxlen);
433 } else
434 stat = cnt;
435
436 if (stat > 0) {
437 if (debug & DEBUG_HW_BFIFO) {
438 snprintf(card->log, LOG_SIZE, "B%1d-recv %s %d ",
439 bc->bch.nr, card->name, stat);
440 print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET,
441 p, stat);
442 } 449 }
443 recv_Bchannel(&bc->bch, 0);
444 }
445 if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
446 pn += i; 450 pn += i;
447 cnt -= i; 451 cnt -= i;
448 if (!bc->bch.rx_skb) {
449 bc->bch.rx_skb = mI_alloc_skb(bc->bch.maxlen,
450 GFP_ATOMIC);
451 if (!bc->bch.rx_skb) {
452 pr_info("%s: B%1d receive out of memory\n",
453 card->name, bc->bch.nr);
454 return;
455 }
456 }
457 if (cnt > 0)
458 goto next_frame;
459 } 452 }
460} 453}
461 454
@@ -544,22 +537,31 @@ static void
544fill_dma(struct tiger_ch *bc) 537fill_dma(struct tiger_ch *bc)
545{ 538{
546 struct tiger_hw *card = bc->bch.hw; 539 struct tiger_hw *card = bc->bch.hw;
547 int count, i; 540 int count, i, fillempty = 0;
548 u32 m, v; 541 u32 m, v, n = 0;
549 u8 *p; 542 u8 *p;
550 543
551 if (bc->free == 0) 544 if (bc->free == 0)
552 return; 545 return;
553 count = bc->bch.tx_skb->len - bc->bch.tx_idx; 546 if (!bc->bch.tx_skb) {
554 if (count <= 0) 547 if (!test_bit(FLG_TX_EMPTY, &bc->bch.Flags))
555 return; 548 return;
556 pr_debug("%s: %s B%1d %d/%d/%d/%d state %x idx %d/%d\n", card->name, 549 fillempty = 1;
557 __func__, bc->bch.nr, count, bc->free, bc->bch.tx_idx, 550 count = card->send.size >> 1;
558 bc->bch.tx_skb->len, bc->txstate, bc->idx, card->send.idx); 551 p = bc->bch.fill;
552 } else {
553 count = bc->bch.tx_skb->len - bc->bch.tx_idx;
554 if (count <= 0)
555 return;
556 pr_debug("%s: %s B%1d %d/%d/%d/%d state %x idx %d/%d\n",
557 card->name, __func__, bc->bch.nr, count, bc->free,
558 bc->bch.tx_idx, bc->bch.tx_skb->len, bc->txstate,
559 bc->idx, card->send.idx);
560 p = bc->bch.tx_skb->data + bc->bch.tx_idx;
561 }
559 if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN)) 562 if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
560 resync(bc, card); 563 resync(bc, card);
561 p = bc->bch.tx_skb->data + bc->bch.tx_idx; 564 if (test_bit(FLG_HDLC, &bc->bch.Flags) && !fillempty) {
562 if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
563 count = isdnhdlc_encode(&bc->hsend, p, count, &i, 565 count = isdnhdlc_encode(&bc->hsend, p, count, &i,
564 bc->hsbuf, bc->free); 566 bc->hsbuf, bc->free);
565 pr_debug("%s: B%1d hdlc encoded %d in %d\n", card->name, 567 pr_debug("%s: B%1d hdlc encoded %d in %d\n", card->name,
@@ -570,17 +572,33 @@ fill_dma(struct tiger_ch *bc)
570 } else { 572 } else {
571 if (count > bc->free) 573 if (count > bc->free)
572 count = bc->free; 574 count = bc->free;
573 bc->bch.tx_idx += count; 575 if (!fillempty)
576 bc->bch.tx_idx += count;
574 bc->free -= count; 577 bc->free -= count;
575 } 578 }
576 m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff; 579 m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
577 for (i = 0; i < count; i++) { 580 if (fillempty) {
578 if (bc->idx >= card->send.size) 581 n = p[0];
579 bc->idx = 0; 582 if (!(bc->bch.nr & 1))
580 v = card->send.start[bc->idx]; 583 n <<= 8;
581 v &= m; 584 for (i = 0; i < count; i++) {
582 v |= (bc->bch.nr & 1) ? (u32)(p[i]) : ((u32)(p[i])) << 8; 585 if (bc->idx >= card->send.size)
583 card->send.start[bc->idx++] = v; 586 bc->idx = 0;
587 v = card->send.start[bc->idx];
588 v &= m;
589 v |= n;
590 card->send.start[bc->idx++] = v;
591 }
592 } else {
593 for (i = 0; i < count; i++) {
594 if (bc->idx >= card->send.size)
595 bc->idx = 0;
596 v = card->send.start[bc->idx];
597 v &= m;
598 n = p[i];
599 v |= (bc->bch.nr & 1) ? n : n << 8;
600 card->send.start[bc->idx++] = v;
601 }
584 } 602 }
585 if (debug & DEBUG_HW_BFIFO) { 603 if (debug & DEBUG_HW_BFIFO) {
586 snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ", 604 snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
@@ -595,21 +613,26 @@ fill_dma(struct tiger_ch *bc)
595static int 613static int
596bc_next_frame(struct tiger_ch *bc) 614bc_next_frame(struct tiger_ch *bc)
597{ 615{
598 if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len) 616 int ret = 1;
617
618 if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len) {
599 fill_dma(bc); 619 fill_dma(bc);
600 else { 620 } else {
601 if (bc->bch.tx_skb) { 621 if (bc->bch.tx_skb)
602 /* send confirm, on trans, free on hdlc. */
603 if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags))
604 confirm_Bsend(&bc->bch);
605 dev_kfree_skb(bc->bch.tx_skb); 622 dev_kfree_skb(bc->bch.tx_skb);
606 } 623 if (get_next_bframe(&bc->bch)) {
607 if (get_next_bframe(&bc->bch))
608 fill_dma(bc); 624 fill_dma(bc);
609 else 625 test_and_clear_bit(FLG_TX_EMPTY, &bc->bch.Flags);
610 return 0; 626 } else if (test_bit(FLG_TX_EMPTY, &bc->bch.Flags)) {
627 fill_dma(bc);
628 } else if (test_bit(FLG_FILLEMPTY, &bc->bch.Flags)) {
629 test_and_set_bit(FLG_TX_EMPTY, &bc->bch.Flags);
630 ret = 0;
631 } else {
632 ret = 0;
633 }
611 } 634 }
612 return 1; 635 return ret;
613} 636}
614 637
615static void 638static void
@@ -732,22 +755,17 @@ nj_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
732 struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch); 755 struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
733 struct tiger_hw *card = bch->hw; 756 struct tiger_hw *card = bch->hw;
734 struct mISDNhead *hh = mISDN_HEAD_P(skb); 757 struct mISDNhead *hh = mISDN_HEAD_P(skb);
735 u32 id; 758 unsigned long flags;
736 u_long flags;
737 759
738 switch (hh->prim) { 760 switch (hh->prim) {
739 case PH_DATA_REQ: 761 case PH_DATA_REQ:
740 spin_lock_irqsave(&card->lock, flags); 762 spin_lock_irqsave(&card->lock, flags);
741 ret = bchannel_senddata(bch, skb); 763 ret = bchannel_senddata(bch, skb);
742 if (ret > 0) { /* direct TX */ 764 if (ret > 0) { /* direct TX */
743 id = hh->id; /* skb can be freed */
744 fill_dma(bc); 765 fill_dma(bc);
745 ret = 0; 766 ret = 0;
746 spin_unlock_irqrestore(&card->lock, flags); 767 }
747 if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) 768 spin_unlock_irqrestore(&card->lock, flags);
748 queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
749 } else
750 spin_unlock_irqrestore(&card->lock, flags);
751 return ret; 769 return ret;
752 case PH_ACTIVATE_REQ: 770 case PH_ACTIVATE_REQ:
753 spin_lock_irqsave(&card->lock, flags); 771 spin_lock_irqsave(&card->lock, flags);
@@ -778,21 +796,7 @@ nj_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
778static int 796static int
779channel_bctrl(struct tiger_ch *bc, struct mISDN_ctrl_req *cq) 797channel_bctrl(struct tiger_ch *bc, struct mISDN_ctrl_req *cq)
780{ 798{
781 int ret = 0; 799 return mISDN_ctrl_bchannel(&bc->bch, cq);
782 struct tiger_hw *card = bc->bch.hw;
783
784 switch (cq->op) {
785 case MISDN_CTRL_GETOP:
786 cq->op = 0;
787 break;
788 /* Nothing implemented yet */
789 case MISDN_CTRL_FILL_EMPTY:
790 default:
791 pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op);
792 ret = -EINVAL;
793 break;
794 }
795 return ret;
796} 800}
797 801
798static int 802static int
@@ -808,14 +812,10 @@ nj_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
808 switch (cmd) { 812 switch (cmd) {
809 case CLOSE_CHANNEL: 813 case CLOSE_CHANNEL:
810 test_and_clear_bit(FLG_OPEN, &bch->Flags); 814 test_and_clear_bit(FLG_OPEN, &bch->Flags);
811 if (test_bit(FLG_ACTIVE, &bch->Flags)) { 815 spin_lock_irqsave(&card->lock, flags);
812 spin_lock_irqsave(&card->lock, flags); 816 mISDN_freebchannel(bch);
813 mISDN_freebchannel(bch); 817 mode_tiger(bc, ISDN_P_NONE);
814 test_and_clear_bit(FLG_TX_BUSY, &bch->Flags); 818 spin_unlock_irqrestore(&card->lock, flags);
815 test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
816 mode_tiger(bc, ISDN_P_NONE);
817 spin_unlock_irqrestore(&card->lock, flags);
818 }
819 ch->protocol = ISDN_P_NONE; 819 ch->protocol = ISDN_P_NONE;
820 ch->peer = NULL; 820 ch->peer = NULL;
821 module_put(THIS_MODULE); 821 module_put(THIS_MODULE);
@@ -837,7 +837,7 @@ channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq)
837 837
838 switch (cq->op) { 838 switch (cq->op) {
839 case MISDN_CTRL_GETOP: 839 case MISDN_CTRL_GETOP:
840 cq->op = MISDN_CTRL_LOOP; 840 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
841 break; 841 break;
842 case MISDN_CTRL_LOOP: 842 case MISDN_CTRL_LOOP:
843 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */ 843 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
@@ -847,6 +847,9 @@ channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq)
847 } 847 }
848 ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel); 848 ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel);
849 break; 849 break;
850 case MISDN_CTRL_L1_TIMER3:
851 ret = card->isac.ctrl(&card->isac, HW_TIMER3_VALUE, cq->p1);
852 break;
850 default: 853 default:
851 pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op); 854 pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op);
852 ret = -EINVAL; 855 ret = -EINVAL;
@@ -1027,7 +1030,8 @@ setup_instance(struct tiger_hw *card)
1027 for (i = 0; i < 2; i++) { 1030 for (i = 0; i < 2; i++) {
1028 card->bc[i].bch.nr = i + 1; 1031 card->bc[i].bch.nr = i + 1;
1029 set_channelmap(i + 1, card->isac.dch.dev.channelmap); 1032 set_channelmap(i + 1, card->isac.dch.dev.channelmap);
1030 mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM); 1033 mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM,
1034 NJ_DMA_RXSIZE >> 1);
1031 card->bc[i].bch.hw = card; 1035 card->bc[i].bch.hw = card;
1032 card->bc[i].bch.ch.send = nj_l2l1B; 1036 card->bc[i].bch.ch.send = nj_l2l1B;
1033 card->bc[i].bch.ch.ctrl = nj_bctrl; 1037 card->bc[i].bch.ch.ctrl = nj_bctrl;
diff --git a/drivers/isdn/hardware/mISDN/speedfax.c b/drivers/isdn/hardware/mISDN/speedfax.c
index 04689935148b..93f344d74e54 100644
--- a/drivers/isdn/hardware/mISDN/speedfax.c
+++ b/drivers/isdn/hardware/mISDN/speedfax.c
@@ -224,7 +224,7 @@ channel_ctrl(struct sfax_hw *sf, struct mISDN_ctrl_req *cq)
224 224
225 switch (cq->op) { 225 switch (cq->op) {
226 case MISDN_CTRL_GETOP: 226 case MISDN_CTRL_GETOP:
227 cq->op = MISDN_CTRL_LOOP; 227 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
228 break; 228 break;
229 case MISDN_CTRL_LOOP: 229 case MISDN_CTRL_LOOP:
230 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */ 230 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
@@ -234,6 +234,9 @@ channel_ctrl(struct sfax_hw *sf, struct mISDN_ctrl_req *cq)
234 } 234 }
235 ret = sf->isac.ctrl(&sf->isac, HW_TESTLOOP, cq->channel); 235 ret = sf->isac.ctrl(&sf->isac, HW_TESTLOOP, cq->channel);
236 break; 236 break;
237 case MISDN_CTRL_L1_TIMER3:
238 ret = sf->isac.ctrl(&sf->isac, HW_TIMER3_VALUE, cq->p1);
239 break;
237 default: 240 default:
238 pr_info("%s: unknown Op %x\n", sf->name, cq->op); 241 pr_info("%s: unknown Op %x\n", sf->name, cq->op);
239 ret = -EINVAL; 242 ret = -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 2183357f0799..26a86b846099 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -465,6 +465,7 @@ W6692_empty_Bfifo(struct w6692_ch *wch, int count)
465{ 465{
466 struct w6692_hw *card = wch->bch.hw; 466 struct w6692_hw *card = wch->bch.hw;
467 u8 *ptr; 467 u8 *ptr;
468 int maxlen;
468 469
469 pr_debug("%s: empty_Bfifo %d\n", card->name, count); 470 pr_debug("%s: empty_Bfifo %d\n", card->name, count);
470 if (unlikely(wch->bch.state == ISDN_P_NONE)) { 471 if (unlikely(wch->bch.state == ISDN_P_NONE)) {
@@ -474,20 +475,18 @@ W6692_empty_Bfifo(struct w6692_ch *wch, int count)
474 skb_trim(wch->bch.rx_skb, 0); 475 skb_trim(wch->bch.rx_skb, 0);
475 return; 476 return;
476 } 477 }
477 if (!wch->bch.rx_skb) { 478 if (test_bit(FLG_RX_OFF, &wch->bch.Flags)) {
478 wch->bch.rx_skb = mI_alloc_skb(wch->bch.maxlen, GFP_ATOMIC); 479 wch->bch.dropcnt += count;
479 if (unlikely(!wch->bch.rx_skb)) { 480 WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT);
480 pr_info("%s: B receive out of memory\n", card->name); 481 return;
481 WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RACK |
482 W_B_CMDR_RACT);
483 return;
484 }
485 } 482 }
486 if (wch->bch.rx_skb->len + count > wch->bch.maxlen) { 483 maxlen = bchannel_get_rxbuf(&wch->bch, count);
487 pr_debug("%s: empty_Bfifo incoming packet too large\n", 484 if (maxlen < 0) {
488 card->name);
489 WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT); 485 WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT);
490 skb_trim(wch->bch.rx_skb, 0); 486 if (wch->bch.rx_skb)
487 skb_trim(wch->bch.rx_skb, 0);
488 pr_warning("%s.B%d: No bufferspace for %d bytes\n",
489 card->name, wch->bch.nr, count);
491 return; 490 return;
492 } 491 }
493 ptr = skb_put(wch->bch.rx_skb, count); 492 ptr = skb_put(wch->bch.rx_skb, count);
@@ -504,16 +503,22 @@ static void
504W6692_fill_Bfifo(struct w6692_ch *wch) 503W6692_fill_Bfifo(struct w6692_ch *wch)
505{ 504{
506 struct w6692_hw *card = wch->bch.hw; 505 struct w6692_hw *card = wch->bch.hw;
507 int count; 506 int count, fillempty = 0;
508 u8 *ptr, cmd = W_B_CMDR_RACT | W_B_CMDR_XMS; 507 u8 *ptr, cmd = W_B_CMDR_RACT | W_B_CMDR_XMS;
509 508
510 pr_debug("%s: fill Bfifo\n", card->name); 509 pr_debug("%s: fill Bfifo\n", card->name);
511 if (!wch->bch.tx_skb) 510 if (!wch->bch.tx_skb) {
512 return; 511 if (!test_bit(FLG_TX_EMPTY, &wch->bch.Flags))
513 count = wch->bch.tx_skb->len - wch->bch.tx_idx; 512 return;
514 if (count <= 0) 513 ptr = wch->bch.fill;
515 return; 514 count = W_B_FIFO_THRESH;
516 ptr = wch->bch.tx_skb->data + wch->bch.tx_idx; 515 fillempty = 1;
516 } else {
517 count = wch->bch.tx_skb->len - wch->bch.tx_idx;
518 if (count <= 0)
519 return;
520 ptr = wch->bch.tx_skb->data + wch->bch.tx_idx;
521 }
517 if (count > W_B_FIFO_THRESH) 522 if (count > W_B_FIFO_THRESH)
518 count = W_B_FIFO_THRESH; 523 count = W_B_FIFO_THRESH;
519 else if (test_bit(FLG_HDLC, &wch->bch.Flags)) 524 else if (test_bit(FLG_HDLC, &wch->bch.Flags))
@@ -522,9 +527,16 @@ W6692_fill_Bfifo(struct w6692_ch *wch)
522 pr_debug("%s: fill Bfifo%d/%d\n", card->name, 527 pr_debug("%s: fill Bfifo%d/%d\n", card->name,
523 count, wch->bch.tx_idx); 528 count, wch->bch.tx_idx);
524 wch->bch.tx_idx += count; 529 wch->bch.tx_idx += count;
525 outsb(wch->addr + W_B_XFIFO, ptr, count); 530 if (fillempty) {
531 while (count > 0) {
532 outsb(wch->addr + W_B_XFIFO, ptr, MISDN_BCH_FILL_SIZE);
533 count -= MISDN_BCH_FILL_SIZE;
534 }
535 } else {
536 outsb(wch->addr + W_B_XFIFO, ptr, count);
537 }
526 WriteW6692B(wch, W_B_CMDR, cmd); 538 WriteW6692B(wch, W_B_CMDR, cmd);
527 if (debug & DEBUG_HW_DFIFO) { 539 if ((debug & DEBUG_HW_BFIFO) && !fillempty) {
528 snprintf(card->log, 63, "B%1d-send %s %d ", 540 snprintf(card->log, 63, "B%1d-send %s %d ",
529 wch->bch.nr, card->name, count); 541 wch->bch.nr, card->name, count);
530 print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, ptr, count); 542 print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, ptr, count);
@@ -638,17 +650,17 @@ w6692_mode(struct w6692_ch *wch, u32 pr)
638static void 650static void
639send_next(struct w6692_ch *wch) 651send_next(struct w6692_ch *wch)
640{ 652{
641 if (wch->bch.tx_skb && wch->bch.tx_idx < wch->bch.tx_skb->len) 653 if (wch->bch.tx_skb && wch->bch.tx_idx < wch->bch.tx_skb->len) {
642 W6692_fill_Bfifo(wch); 654 W6692_fill_Bfifo(wch);
643 else { 655 } else {
644 if (wch->bch.tx_skb) { 656 if (wch->bch.tx_skb)
645 /* send confirm, on trans, free on hdlc. */
646 if (test_bit(FLG_TRANSPARENT, &wch->bch.Flags))
647 confirm_Bsend(&wch->bch);
648 dev_kfree_skb(wch->bch.tx_skb); 657 dev_kfree_skb(wch->bch.tx_skb);
649 } 658 if (get_next_bframe(&wch->bch)) {
650 if (get_next_bframe(&wch->bch)) 659 W6692_fill_Bfifo(wch);
660 test_and_clear_bit(FLG_TX_EMPTY, &wch->bch.Flags);
661 } else if (test_bit(FLG_TX_EMPTY, &wch->bch.Flags)) {
651 W6692_fill_Bfifo(wch); 662 W6692_fill_Bfifo(wch);
663 }
652 } 664 }
653} 665}
654 666
@@ -698,7 +710,7 @@ W6692B_interrupt(struct w6692_hw *card, int ch)
698 if (count == 0) 710 if (count == 0)
699 count = W_B_FIFO_THRESH; 711 count = W_B_FIFO_THRESH;
700 W6692_empty_Bfifo(wch, count); 712 W6692_empty_Bfifo(wch, count);
701 recv_Bchannel(&wch->bch, 0); 713 recv_Bchannel(&wch->bch, 0, false);
702 } 714 }
703 } 715 }
704 if (stat & W_B_EXI_RMR) { 716 if (stat & W_B_EXI_RMR) {
@@ -714,9 +726,8 @@ W6692B_interrupt(struct w6692_hw *card, int ch)
714 W_B_CMDR_RRST | W_B_CMDR_RACT); 726 W_B_CMDR_RRST | W_B_CMDR_RACT);
715 } else { 727 } else {
716 W6692_empty_Bfifo(wch, W_B_FIFO_THRESH); 728 W6692_empty_Bfifo(wch, W_B_FIFO_THRESH);
717 if (test_bit(FLG_TRANSPARENT, &wch->bch.Flags) && 729 if (test_bit(FLG_TRANSPARENT, &wch->bch.Flags))
718 wch->bch.rx_skb && (wch->bch.rx_skb->len > 0)) 730 recv_Bchannel(&wch->bch, 0, false);
719 recv_Bchannel(&wch->bch, 0);
720 } 731 }
721 } 732 }
722 if (stat & W_B_EXI_RDOV) { 733 if (stat & W_B_EXI_RDOV) {
@@ -738,8 +749,8 @@ W6692B_interrupt(struct w6692_hw *card, int ch)
738 wch->bch.nr, star); 749 wch->bch.nr, star);
739 } 750 }
740 if (star & W_B_STAR_XDOW) { 751 if (star & W_B_STAR_XDOW) {
741 pr_debug("%s: B%d XDOW proto=%x\n", card->name, 752 pr_warning("%s: B%d XDOW proto=%x\n", card->name,
742 wch->bch.nr, wch->bch.state); 753 wch->bch.nr, wch->bch.state);
743#ifdef ERROR_STATISTIC 754#ifdef ERROR_STATISTIC
744 wch->bch.err_xdu++; 755 wch->bch.err_xdu++;
745#endif 756#endif
@@ -752,20 +763,21 @@ W6692B_interrupt(struct w6692_hw *card, int ch)
752 } 763 }
753 } 764 }
754 send_next(wch); 765 send_next(wch);
755 if (stat & W_B_EXI_XDUN) 766 if (star & W_B_STAR_XDOW)
756 return; /* handle XDOW only once */ 767 return; /* handle XDOW only once */
757 } 768 }
758 if (stat & W_B_EXI_XDUN) { 769 if (stat & W_B_EXI_XDUN) {
759 pr_debug("%s: B%d XDUN proto=%x\n", card->name, 770 pr_warning("%s: B%d XDUN proto=%x\n", card->name,
760 wch->bch.nr, wch->bch.state); 771 wch->bch.nr, wch->bch.state);
761#ifdef ERROR_STATISTIC 772#ifdef ERROR_STATISTIC
762 wch->bch.err_xdu++; 773 wch->bch.err_xdu++;
763#endif 774#endif
764 WriteW6692B(wch, W_B_CMDR, W_B_CMDR_XRST | W_B_CMDR_RACT); 775 /* resend - no XRST needed */
765 /* resend */
766 if (wch->bch.tx_skb) { 776 if (wch->bch.tx_skb) {
767 if (!test_bit(FLG_TRANSPARENT, &wch->bch.Flags)) 777 if (!test_bit(FLG_TRANSPARENT, &wch->bch.Flags))
768 wch->bch.tx_idx = 0; 778 wch->bch.tx_idx = 0;
779 } else if (test_bit(FLG_FILLEMPTY, &wch->bch.Flags)) {
780 test_and_set_bit(FLG_TX_EMPTY, &wch->bch.Flags);
769 } 781 }
770 send_next(wch); 782 send_next(wch);
771 } 783 }
@@ -944,22 +956,17 @@ w6692_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
944 struct w6692_hw *card = bch->hw; 956 struct w6692_hw *card = bch->hw;
945 int ret = -EINVAL; 957 int ret = -EINVAL;
946 struct mISDNhead *hh = mISDN_HEAD_P(skb); 958 struct mISDNhead *hh = mISDN_HEAD_P(skb);
947 u32 id; 959 unsigned long flags;
948 u_long flags;
949 960
950 switch (hh->prim) { 961 switch (hh->prim) {
951 case PH_DATA_REQ: 962 case PH_DATA_REQ:
952 spin_lock_irqsave(&card->lock, flags); 963 spin_lock_irqsave(&card->lock, flags);
953 ret = bchannel_senddata(bch, skb); 964 ret = bchannel_senddata(bch, skb);
954 if (ret > 0) { /* direct TX */ 965 if (ret > 0) { /* direct TX */
955 id = hh->id; /* skb can be freed */
956 ret = 0; 966 ret = 0;
957 W6692_fill_Bfifo(bc); 967 W6692_fill_Bfifo(bc);
958 spin_unlock_irqrestore(&card->lock, flags); 968 }
959 if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) 969 spin_unlock_irqrestore(&card->lock, flags);
960 queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
961 } else
962 spin_unlock_irqrestore(&card->lock, flags);
963 return ret; 970 return ret;
964 case PH_ACTIVATE_REQ: 971 case PH_ACTIVATE_REQ:
965 spin_lock_irqsave(&card->lock, flags); 972 spin_lock_irqsave(&card->lock, flags);
@@ -994,20 +1001,7 @@ w6692_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
994static int 1001static int
995channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) 1002channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
996{ 1003{
997 int ret = 0; 1004 return mISDN_ctrl_bchannel(bch, cq);
998
999 switch (cq->op) {
1000 case MISDN_CTRL_GETOP:
1001 cq->op = 0;
1002 break;
1003 /* Nothing implemented yet */
1004 case MISDN_CTRL_FILL_EMPTY:
1005 default:
1006 pr_info("%s: unknown Op %x\n", __func__, cq->op);
1007 ret = -EINVAL;
1008 break;
1009 }
1010 return ret;
1011} 1005}
1012 1006
1013static int 1007static int
@@ -1022,7 +1016,6 @@ open_bchannel(struct w6692_hw *card, struct channel_req *rq)
1022 bch = &card->bc[rq->adr.channel - 1].bch; 1016 bch = &card->bc[rq->adr.channel - 1].bch;
1023 if (test_and_set_bit(FLG_OPEN, &bch->Flags)) 1017 if (test_and_set_bit(FLG_OPEN, &bch->Flags))
1024 return -EBUSY; /* b-channel can be only open once */ 1018 return -EBUSY; /* b-channel can be only open once */
1025 test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
1026 bch->ch.protocol = rq->protocol; 1019 bch->ch.protocol = rq->protocol;
1027 rq->ch = &bch->ch; 1020 rq->ch = &bch->ch;
1028 return 0; 1021 return 0;
@@ -1035,7 +1028,10 @@ channel_ctrl(struct w6692_hw *card, struct mISDN_ctrl_req *cq)
1035 1028
1036 switch (cq->op) { 1029 switch (cq->op) {
1037 case MISDN_CTRL_GETOP: 1030 case MISDN_CTRL_GETOP:
1038 cq->op = 0; 1031 cq->op = MISDN_CTRL_L1_TIMER3;
1032 break;
1033 case MISDN_CTRL_L1_TIMER3:
1034 ret = l1_event(card->dch.l1, HW_TIMER3_VALUE | (cq->p1 & 0xff));
1039 break; 1035 break;
1040 default: 1036 default:
1041 pr_info("%s: unknown CTRL OP %x\n", card->name, cq->op); 1037 pr_info("%s: unknown CTRL OP %x\n", card->name, cq->op);
@@ -1058,15 +1054,10 @@ w6692_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
1058 switch (cmd) { 1054 switch (cmd) {
1059 case CLOSE_CHANNEL: 1055 case CLOSE_CHANNEL:
1060 test_and_clear_bit(FLG_OPEN, &bch->Flags); 1056 test_and_clear_bit(FLG_OPEN, &bch->Flags);
1061 if (test_bit(FLG_ACTIVE, &bch->Flags)) { 1057 spin_lock_irqsave(&card->lock, flags);
1062 spin_lock_irqsave(&card->lock, flags); 1058 mISDN_freebchannel(bch);
1063 mISDN_freebchannel(bch); 1059 w6692_mode(bc, ISDN_P_NONE);
1064 w6692_mode(bc, ISDN_P_NONE); 1060 spin_unlock_irqrestore(&card->lock, flags);
1065 spin_unlock_irqrestore(&card->lock, flags);
1066 } else {
1067 skb_queue_purge(&bch->rqueue);
1068 bch->rcount = 0;
1069 }
1070 ch->protocol = ISDN_P_NONE; 1061 ch->protocol = ISDN_P_NONE;
1071 ch->peer = NULL; 1062 ch->peer = NULL;
1072 module_put(THIS_MODULE); 1063 module_put(THIS_MODULE);
@@ -1320,7 +1311,8 @@ setup_instance(struct w6692_hw *card)
1320 card->dch.hw = card; 1311 card->dch.hw = card;
1321 card->dch.dev.nrbchan = 2; 1312 card->dch.dev.nrbchan = 2;
1322 for (i = 0; i < 2; i++) { 1313 for (i = 0; i < 2; i++) {
1323 mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM); 1314 mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM,
1315 W_B_FIFO_THRESH);
1324 card->bc[i].bch.hw = card; 1316 card->bc[i].bch.hw = card;
1325 card->bc[i].bch.nr = i + 1; 1317 card->bc[i].bch.nr = i + 1;
1326 card->bc[i].bch.ch.nr = i + 1; 1318 card->bc[i].bch.ch.nr = i + 1;
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index ba91333e3e41..88e4f0ee073c 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -156,17 +156,9 @@ static ssize_t
156hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t *off) 156hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
157{ 157{
158 int rc; 158 int rc;
159 unsigned char valbuf[128];
160 hysdn_card *card = file->private_data; 159 hysdn_card *card = file->private_data;
161 160
162 if (count > (sizeof(valbuf) - 1)) 161 rc = kstrtoul_from_user(buf, count, 0, &card->debug_flags);
163 count = sizeof(valbuf) - 1; /* limit length */
164 if (copy_from_user(valbuf, buf, count))
165 return (-EFAULT); /* copy failed */
166
167 valbuf[count] = 0; /* terminating 0 */
168
169 rc = kstrtoul(valbuf, 0, &card->debug_flags);
170 if (rc < 0) 162 if (rc < 0)
171 return rc; 163 return rc;
172 hysdn_addlog(card, "debug set to 0x%lx", card->debug_flags); 164 hysdn_addlog(card, "debug set to 0x%lx", card->debug_flags);
diff --git a/drivers/isdn/i4l/isdn_bsdcomp.c b/drivers/isdn/i4l/isdn_bsdcomp.c
index c59e8d2c0675..8837ac5a492d 100644
--- a/drivers/isdn/i4l/isdn_bsdcomp.c
+++ b/drivers/isdn/i4l/isdn_bsdcomp.c
@@ -612,7 +612,7 @@ static int bsd_compress(void *state, struct sk_buff *skb_in, struct sk_buff *skb
612 db->n_bits++; 612 db->n_bits++;
613 613
614 /* If output length is too large then this is an incompressible frame. */ 614 /* If output length is too large then this is an incompressible frame. */
615 if (!skb_out || (skb_out && skb_out->len >= skb_in->len)) { 615 if (!skb_out || skb_out->len >= skb_in->len) {
616 ++db->incomp_count; 616 ++db->incomp_count;
617 db->incomp_bytes += isize; 617 db->incomp_bytes += isize;
618 return 0; 618 return 0;
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
index a24530f05db0..c401634c00ec 100644
--- a/drivers/isdn/mISDN/core.c
+++ b/drivers/isdn/mISDN/core.c
@@ -355,6 +355,22 @@ mISDN_unregister_Bprotocol(struct Bprotocol *bp)
355} 355}
356EXPORT_SYMBOL(mISDN_unregister_Bprotocol); 356EXPORT_SYMBOL(mISDN_unregister_Bprotocol);
357 357
358static const char *msg_no_channel = "<no channel>";
359static const char *msg_no_stack = "<no stack>";
360static const char *msg_no_stackdev = "<no stack device>";
361
362const char *mISDNDevName4ch(struct mISDNchannel *ch)
363{
364 if (!ch)
365 return msg_no_channel;
366 if (!ch->st)
367 return msg_no_stack;
368 if (!ch->st->dev)
369 return msg_no_stackdev;
370 return dev_name(&ch->st->dev->dev);
371};
372EXPORT_SYMBOL(mISDNDevName4ch);
373
358static int 374static int
359mISDNInit(void) 375mISDNInit(void)
360{ 376{
diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h
index afe4173ae007..fc1733a08845 100644
--- a/drivers/isdn/mISDN/dsp.h
+++ b/drivers/isdn/mISDN/dsp.h
@@ -76,7 +76,9 @@ extern u8 dsp_silence;
76#define MAX_SECONDS_JITTER_CHECK 5 76#define MAX_SECONDS_JITTER_CHECK 5
77 77
78extern struct timer_list dsp_spl_tl; 78extern struct timer_list dsp_spl_tl;
79extern u32 dsp_spl_jiffies; 79
80/* the datatype need to match jiffies datatype */
81extern unsigned long dsp_spl_jiffies;
80 82
81/* the structure of conferences: 83/* the structure of conferences:
82 * 84 *
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
index 334feab060a1..a4f05c54c32b 100644
--- a/drivers/isdn/mISDN/dsp_cmx.c
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -742,8 +742,8 @@ dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp)
742 member->dsp->pcm_slot_tx, 742 member->dsp->pcm_slot_tx,
743 member->dsp->pcm_bank_tx, 743 member->dsp->pcm_bank_tx,
744 member->dsp->pcm_bank_rx); 744 member->dsp->pcm_bank_rx);
745 conf->hardware = 0; 745 conf->hardware = 1;
746 conf->software = 1; 746 conf->software = tx_data;
747 return; 747 return;
748 } 748 }
749 /* find a new slot */ 749 /* find a new slot */
@@ -834,8 +834,8 @@ dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp)
834 nextm->dsp->name, 834 nextm->dsp->name,
835 member->dsp->pcm_slot_tx, 835 member->dsp->pcm_slot_tx,
836 member->dsp->pcm_slot_rx); 836 member->dsp->pcm_slot_rx);
837 conf->hardware = 0; 837 conf->hardware = 1;
838 conf->software = 1; 838 conf->software = tx_data;
839 return; 839 return;
840 } 840 }
841 /* find two new slot */ 841 /* find two new slot */
@@ -939,8 +939,11 @@ dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp)
939 /* for more than two members.. */ 939 /* for more than two members.. */
940 940
941 /* if all members already have the same conference */ 941 /* if all members already have the same conference */
942 if (all_conf) 942 if (all_conf) {
943 conf->hardware = 1;
944 conf->software = tx_data;
943 return; 945 return;
946 }
944 947
945 /* 948 /*
946 * if there is an existing conference, but not all members have joined 949 * if there is an existing conference, but not all members have joined
@@ -1013,6 +1016,8 @@ dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp)
1013 dsp_cmx_hw_message(member->dsp, 1016 dsp_cmx_hw_message(member->dsp,
1014 MISDN_CTRL_HFC_CONF_JOIN, current_conf, 0, 0, 0); 1017 MISDN_CTRL_HFC_CONF_JOIN, current_conf, 0, 0, 0);
1015 } 1018 }
1019 conf->hardware = 1;
1020 conf->software = tx_data;
1016 return; 1021 return;
1017 } 1022 }
1018 1023
@@ -1328,7 +1333,7 @@ dsp_cmx_send_member(struct dsp *dsp, int len, s32 *c, int members)
1328 } 1333 }
1329 if (dsp->conf && dsp->conf->software && dsp->conf->hardware) 1334 if (dsp->conf && dsp->conf->software && dsp->conf->hardware)
1330 tx_data_only = 1; 1335 tx_data_only = 1;
1331 if (dsp->conf->software && dsp->echo.hardware) 1336 if (dsp->echo.software && dsp->echo.hardware)
1332 tx_data_only = 1; 1337 tx_data_only = 1;
1333 } 1338 }
1334 1339
@@ -1619,7 +1624,7 @@ send_packet:
1619 1624
1620static u32 jittercount; /* counter for jitter check */ 1625static u32 jittercount; /* counter for jitter check */
1621struct timer_list dsp_spl_tl; 1626struct timer_list dsp_spl_tl;
1622u32 dsp_spl_jiffies; /* calculate the next time to fire */ 1627unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
1623static u16 dsp_count; /* last sample count */ 1628static u16 dsp_count; /* last sample count */
1624static int dsp_count_valid; /* if we have last sample count */ 1629static int dsp_count_valid; /* if we have last sample count */
1625 1630
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index 2ac2d7a25a9f..28c99c623bcd 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -268,6 +268,7 @@ dsp_fill_empty(struct dsp *dsp)
268 } 268 }
269 cq.op = MISDN_CTRL_FILL_EMPTY; 269 cq.op = MISDN_CTRL_FILL_EMPTY;
270 cq.p1 = 1; 270 cq.p1 = 1;
271 cq.p2 = dsp_silence;
271 if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) { 272 if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) {
272 printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n", 273 printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n",
273 __func__); 274 __func__);
diff --git a/drivers/isdn/mISDN/dsp_dtmf.c b/drivers/isdn/mISDN/dsp_dtmf.c
index 887860bdc63b..642f30be5ce2 100644
--- a/drivers/isdn/mISDN/dsp_dtmf.c
+++ b/drivers/isdn/mISDN/dsp_dtmf.c
@@ -222,16 +222,25 @@ coefficients:
222 goto storedigit; 222 goto storedigit;
223 } 223 }
224 224
225 if (dsp_debug & DEBUG_DSP_DTMFCOEFF) 225 if (dsp_debug & DEBUG_DSP_DTMFCOEFF) {
226 s32 tresh_100 = tresh/100;
227
228 if (tresh_100 == 0) {
229 tresh_100 = 1;
230 printk(KERN_DEBUG
231 "tresh(%d) too small set tresh/100 to 1\n",
232 tresh);
233 }
226 printk(KERN_DEBUG "a %3d %3d %3d %3d %3d %3d %3d %3d" 234 printk(KERN_DEBUG "a %3d %3d %3d %3d %3d %3d %3d %3d"
227 " tr:%3d r %3d %3d %3d %3d %3d %3d %3d %3d\n", 235 " tr:%3d r %3d %3d %3d %3d %3d %3d %3d %3d\n",
228 result[0] / 10000, result[1] / 10000, result[2] / 10000, 236 result[0] / 10000, result[1] / 10000, result[2] / 10000,
229 result[3] / 10000, result[4] / 10000, result[5] / 10000, 237 result[3] / 10000, result[4] / 10000, result[5] / 10000,
230 result[6] / 10000, result[7] / 10000, tresh / 10000, 238 result[6] / 10000, result[7] / 10000, tresh / 10000,
231 result[0] / (tresh / 100), result[1] / (tresh / 100), 239 result[0] / (tresh_100), result[1] / (tresh_100),
232 result[2] / (tresh / 100), result[3] / (tresh / 100), 240 result[2] / (tresh_100), result[3] / (tresh_100),
233 result[4] / (tresh / 100), result[5] / (tresh / 100), 241 result[4] / (tresh_100), result[5] / (tresh_100),
234 result[6] / (tresh / 100), result[7] / (tresh / 100)); 242 result[6] / (tresh_100), result[7] / (tresh_100));
243 }
235 244
236 /* calc digit (lowgroup/highgroup) */ 245 /* calc digit (lowgroup/highgroup) */
237 lowgroup = -1; 246 lowgroup = -1;
diff --git a/drivers/isdn/mISDN/hwchannel.c b/drivers/isdn/mISDN/hwchannel.c
index c74c363554c4..ef34fd40867c 100644
--- a/drivers/isdn/mISDN/hwchannel.c
+++ b/drivers/isdn/mISDN/hwchannel.c
@@ -81,10 +81,16 @@ mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf)
81EXPORT_SYMBOL(mISDN_initdchannel); 81EXPORT_SYMBOL(mISDN_initdchannel);
82 82
83int 83int
84mISDN_initbchannel(struct bchannel *ch, int maxlen) 84mISDN_initbchannel(struct bchannel *ch, unsigned short maxlen,
85 unsigned short minlen)
85{ 86{
86 ch->Flags = 0; 87 ch->Flags = 0;
88 ch->minlen = minlen;
89 ch->next_minlen = minlen;
90 ch->init_minlen = minlen;
87 ch->maxlen = maxlen; 91 ch->maxlen = maxlen;
92 ch->next_maxlen = maxlen;
93 ch->init_maxlen = maxlen;
88 ch->hw = NULL; 94 ch->hw = NULL;
89 ch->rx_skb = NULL; 95 ch->rx_skb = NULL;
90 ch->tx_skb = NULL; 96 ch->tx_skb = NULL;
@@ -134,6 +140,14 @@ mISDN_clear_bchannel(struct bchannel *ch)
134 test_and_clear_bit(FLG_TX_BUSY, &ch->Flags); 140 test_and_clear_bit(FLG_TX_BUSY, &ch->Flags);
135 test_and_clear_bit(FLG_TX_NEXT, &ch->Flags); 141 test_and_clear_bit(FLG_TX_NEXT, &ch->Flags);
136 test_and_clear_bit(FLG_ACTIVE, &ch->Flags); 142 test_and_clear_bit(FLG_ACTIVE, &ch->Flags);
143 test_and_clear_bit(FLG_FILLEMPTY, &ch->Flags);
144 test_and_clear_bit(FLG_TX_EMPTY, &ch->Flags);
145 test_and_clear_bit(FLG_RX_OFF, &ch->Flags);
146 ch->dropcnt = 0;
147 ch->minlen = ch->init_minlen;
148 ch->next_minlen = ch->init_minlen;
149 ch->maxlen = ch->init_maxlen;
150 ch->next_maxlen = ch->init_maxlen;
137} 151}
138EXPORT_SYMBOL(mISDN_clear_bchannel); 152EXPORT_SYMBOL(mISDN_clear_bchannel);
139 153
@@ -148,6 +162,51 @@ mISDN_freebchannel(struct bchannel *ch)
148} 162}
149EXPORT_SYMBOL(mISDN_freebchannel); 163EXPORT_SYMBOL(mISDN_freebchannel);
150 164
165int
166mISDN_ctrl_bchannel(struct bchannel *bch, struct mISDN_ctrl_req *cq)
167{
168 int ret = 0;
169
170 switch (cq->op) {
171 case MISDN_CTRL_GETOP:
172 cq->op = MISDN_CTRL_RX_BUFFER | MISDN_CTRL_FILL_EMPTY |
173 MISDN_CTRL_RX_OFF;
174 break;
175 case MISDN_CTRL_FILL_EMPTY:
176 if (cq->p1) {
177 memset(bch->fill, cq->p2 & 0xff, MISDN_BCH_FILL_SIZE);
178 test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
179 } else {
180 test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
181 }
182 break;
183 case MISDN_CTRL_RX_OFF:
184 /* read back dropped byte count */
185 cq->p2 = bch->dropcnt;
186 if (cq->p1)
187 test_and_set_bit(FLG_RX_OFF, &bch->Flags);
188 else
189 test_and_clear_bit(FLG_RX_OFF, &bch->Flags);
190 bch->dropcnt = 0;
191 break;
192 case MISDN_CTRL_RX_BUFFER:
193 if (cq->p2 > MISDN_CTRL_RX_SIZE_IGNORE)
194 bch->next_maxlen = cq->p2;
195 if (cq->p1 > MISDN_CTRL_RX_SIZE_IGNORE)
196 bch->next_minlen = cq->p1;
197 /* we return the old values */
198 cq->p1 = bch->minlen;
199 cq->p2 = bch->maxlen;
200 break;
201 default:
202 pr_info("mISDN unhandled control %x operation\n", cq->op);
203 ret = -EINVAL;
204 break;
205 }
206 return ret;
207}
208EXPORT_SYMBOL(mISDN_ctrl_bchannel);
209
151static inline u_int 210static inline u_int
152get_sapi_tei(u_char *p) 211get_sapi_tei(u_char *p)
153{ 212{
@@ -197,24 +256,37 @@ recv_Echannel(struct dchannel *ech, struct dchannel *dch)
197EXPORT_SYMBOL(recv_Echannel); 256EXPORT_SYMBOL(recv_Echannel);
198 257
199void 258void
200recv_Bchannel(struct bchannel *bch, unsigned int id) 259recv_Bchannel(struct bchannel *bch, unsigned int id, bool force)
201{ 260{
202 struct mISDNhead *hh; 261 struct mISDNhead *hh;
203 262
204 hh = mISDN_HEAD_P(bch->rx_skb); 263 /* if allocation did fail upper functions still may call us */
205 hh->prim = PH_DATA_IND; 264 if (unlikely(!bch->rx_skb))
206 hh->id = id;
207 if (bch->rcount >= 64) {
208 printk(KERN_WARNING "B-channel %p receive queue overflow, "
209 "flushing!\n", bch);
210 skb_queue_purge(&bch->rqueue);
211 bch->rcount = 0;
212 return; 265 return;
266 if (unlikely(!bch->rx_skb->len)) {
267 /* we have no data to send - this may happen after recovery
268 * from overflow or too small allocation.
269 * We need to free the buffer here */
270 dev_kfree_skb(bch->rx_skb);
271 bch->rx_skb = NULL;
272 } else {
273 if (test_bit(FLG_TRANSPARENT, &bch->Flags) &&
274 (bch->rx_skb->len < bch->minlen) && !force)
275 return;
276 hh = mISDN_HEAD_P(bch->rx_skb);
277 hh->prim = PH_DATA_IND;
278 hh->id = id;
279 if (bch->rcount >= 64) {
280 printk(KERN_WARNING
281 "B%d receive queue overflow - flushing!\n",
282 bch->nr);
283 skb_queue_purge(&bch->rqueue);
284 }
285 bch->rcount++;
286 skb_queue_tail(&bch->rqueue, bch->rx_skb);
287 bch->rx_skb = NULL;
288 schedule_event(bch, FLG_RECVQUEUE);
213 } 289 }
214 bch->rcount++;
215 skb_queue_tail(&bch->rqueue, bch->rx_skb);
216 bch->rx_skb = NULL;
217 schedule_event(bch, FLG_RECVQUEUE);
218} 290}
219EXPORT_SYMBOL(recv_Bchannel); 291EXPORT_SYMBOL(recv_Bchannel);
220 292
@@ -272,7 +344,7 @@ get_next_dframe(struct dchannel *dch)
272} 344}
273EXPORT_SYMBOL(get_next_dframe); 345EXPORT_SYMBOL(get_next_dframe);
274 346
275void 347static void
276confirm_Bsend(struct bchannel *bch) 348confirm_Bsend(struct bchannel *bch)
277{ 349{
278 struct sk_buff *skb; 350 struct sk_buff *skb;
@@ -294,7 +366,6 @@ confirm_Bsend(struct bchannel *bch)
294 skb_queue_tail(&bch->rqueue, skb); 366 skb_queue_tail(&bch->rqueue, skb);
295 schedule_event(bch, FLG_RECVQUEUE); 367 schedule_event(bch, FLG_RECVQUEUE);
296} 368}
297EXPORT_SYMBOL(confirm_Bsend);
298 369
299int 370int
300get_next_bframe(struct bchannel *bch) 371get_next_bframe(struct bchannel *bch)
@@ -305,8 +376,8 @@ get_next_bframe(struct bchannel *bch)
305 if (bch->tx_skb) { 376 if (bch->tx_skb) {
306 bch->next_skb = NULL; 377 bch->next_skb = NULL;
307 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags); 378 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
308 if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) 379 /* confirm imediately to allow next data */
309 confirm_Bsend(bch); /* not for transparent */ 380 confirm_Bsend(bch);
310 return 1; 381 return 1;
311 } else { 382 } else {
312 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags); 383 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
@@ -395,7 +466,62 @@ bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
395 /* write to fifo */ 466 /* write to fifo */
396 ch->tx_skb = skb; 467 ch->tx_skb = skb;
397 ch->tx_idx = 0; 468 ch->tx_idx = 0;
469 confirm_Bsend(ch);
398 return 1; 470 return 1;
399 } 471 }
400} 472}
401EXPORT_SYMBOL(bchannel_senddata); 473EXPORT_SYMBOL(bchannel_senddata);
474
475/* The function allocates a new receive skb on demand with a size for the
476 * requirements of the current protocol. It returns the tailroom of the
477 * receive skb or an error.
478 */
479int
480bchannel_get_rxbuf(struct bchannel *bch, int reqlen)
481{
482 int len;
483
484 if (bch->rx_skb) {
485 len = skb_tailroom(bch->rx_skb);
486 if (len < reqlen) {
487 pr_warning("B%d no space for %d (only %d) bytes\n",
488 bch->nr, reqlen, len);
489 if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
490 /* send what we have now and try a new buffer */
491 recv_Bchannel(bch, 0, true);
492 } else {
493 /* on HDLC we have to drop too big frames */
494 return -EMSGSIZE;
495 }
496 } else {
497 return len;
498 }
499 }
500 /* update current min/max length first */
501 if (unlikely(bch->maxlen != bch->next_maxlen))
502 bch->maxlen = bch->next_maxlen;
503 if (unlikely(bch->minlen != bch->next_minlen))
504 bch->minlen = bch->next_minlen;
505 if (unlikely(reqlen > bch->maxlen))
506 return -EMSGSIZE;
507 if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
508 if (reqlen >= bch->minlen) {
509 len = reqlen;
510 } else {
511 len = 2 * bch->minlen;
512 if (len > bch->maxlen)
513 len = bch->maxlen;
514 }
515 } else {
516 /* with HDLC we do not know the length yet */
517 len = bch->maxlen;
518 }
519 bch->rx_skb = mI_alloc_skb(len, GFP_ATOMIC);
520 if (!bch->rx_skb) {
521 pr_warning("B%d receive no memory for %d bytes\n",
522 bch->nr, len);
523 len = -ENOMEM;
524 }
525 return len;
526}
527EXPORT_SYMBOL(bchannel_get_rxbuf);
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 0f88acf1185f..db50f788855d 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -1420,7 +1420,7 @@ init_card(struct l1oip *hc, int pri, int bundle)
1420 bch->nr = i + ch; 1420 bch->nr = i + ch;
1421 bch->slot = i + ch; 1421 bch->slot = i + ch;
1422 bch->debug = debug; 1422 bch->debug = debug;
1423 mISDN_initbchannel(bch, MAX_DATA_MEM); 1423 mISDN_initbchannel(bch, MAX_DATA_MEM, 0);
1424 bch->hw = hc; 1424 bch->hw = hc;
1425 bch->ch.send = handle_bmsg; 1425 bch->ch.send = handle_bmsg;
1426 bch->ch.ctrl = l1oip_bctrl; 1426 bch->ch.ctrl = l1oip_bctrl;
diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c
index 0fc49b375514..bebc57b72138 100644
--- a/drivers/isdn/mISDN/layer1.c
+++ b/drivers/isdn/mISDN/layer1.c
@@ -28,13 +28,15 @@ static u_int *debug;
28struct layer1 { 28struct layer1 {
29 u_long Flags; 29 u_long Flags;
30 struct FsmInst l1m; 30 struct FsmInst l1m;
31 struct FsmTimer timer; 31 struct FsmTimer timer3;
32 struct FsmTimer timerX;
32 int delay; 33 int delay;
34 int t3_value;
33 struct dchannel *dch; 35 struct dchannel *dch;
34 dchannel_l1callback *dcb; 36 dchannel_l1callback *dcb;
35}; 37};
36 38
37#define TIMER3_VALUE 7000 39#define TIMER3_DEFAULT_VALUE 7000
38 40
39static 41static
40struct Fsm l1fsm_s = {NULL, 0, 0, NULL, NULL}; 42struct Fsm l1fsm_s = {NULL, 0, 0, NULL, NULL};
@@ -134,7 +136,7 @@ l1_deact_req_s(struct FsmInst *fi, int event, void *arg)
134 struct layer1 *l1 = fi->userdata; 136 struct layer1 *l1 = fi->userdata;
135 137
136 mISDN_FsmChangeState(fi, ST_L1_F3); 138 mISDN_FsmChangeState(fi, ST_L1_F3);
137 mISDN_FsmRestartTimer(&l1->timer, 550, EV_TIMER_DEACT, NULL, 2); 139 mISDN_FsmRestartTimer(&l1->timerX, 550, EV_TIMER_DEACT, NULL, 2);
138 test_and_set_bit(FLG_L1_DEACTTIMER, &l1->Flags); 140 test_and_set_bit(FLG_L1_DEACTTIMER, &l1->Flags);
139} 141}
140 142
@@ -179,11 +181,11 @@ l1_info4_ind(struct FsmInst *fi, int event, void *arg)
179 mISDN_FsmChangeState(fi, ST_L1_F7); 181 mISDN_FsmChangeState(fi, ST_L1_F7);
180 l1->dcb(l1->dch, INFO3_P8); 182 l1->dcb(l1->dch, INFO3_P8);
181 if (test_and_clear_bit(FLG_L1_DEACTTIMER, &l1->Flags)) 183 if (test_and_clear_bit(FLG_L1_DEACTTIMER, &l1->Flags))
182 mISDN_FsmDelTimer(&l1->timer, 4); 184 mISDN_FsmDelTimer(&l1->timerX, 4);
183 if (!test_bit(FLG_L1_ACTIVATED, &l1->Flags)) { 185 if (!test_bit(FLG_L1_ACTIVATED, &l1->Flags)) {
184 if (test_and_clear_bit(FLG_L1_T3RUN, &l1->Flags)) 186 if (test_and_clear_bit(FLG_L1_T3RUN, &l1->Flags))
185 mISDN_FsmDelTimer(&l1->timer, 3); 187 mISDN_FsmDelTimer(&l1->timer3, 3);
186 mISDN_FsmRestartTimer(&l1->timer, 110, EV_TIMER_ACT, NULL, 2); 188 mISDN_FsmRestartTimer(&l1->timerX, 110, EV_TIMER_ACT, NULL, 2);
187 test_and_set_bit(FLG_L1_ACTTIMER, &l1->Flags); 189 test_and_set_bit(FLG_L1_ACTTIMER, &l1->Flags);
188 } 190 }
189} 191}
@@ -201,7 +203,7 @@ l1_timer3(struct FsmInst *fi, int event, void *arg)
201 } 203 }
202 if (l1->l1m.state != ST_L1_F6) { 204 if (l1->l1m.state != ST_L1_F6) {
203 mISDN_FsmChangeState(fi, ST_L1_F3); 205 mISDN_FsmChangeState(fi, ST_L1_F3);
204 l1->dcb(l1->dch, HW_POWERUP_REQ); 206 /* do not force anything here, we need send INFO 0 */
205 } 207 }
206} 208}
207 209
@@ -233,8 +235,9 @@ l1_activate_s(struct FsmInst *fi, int event, void *arg)
233{ 235{
234 struct layer1 *l1 = fi->userdata; 236 struct layer1 *l1 = fi->userdata;
235 237
236 mISDN_FsmRestartTimer(&l1->timer, TIMER3_VALUE, EV_TIMER3, NULL, 2); 238 mISDN_FsmRestartTimer(&l1->timer3, l1->t3_value, EV_TIMER3, NULL, 2);
237 test_and_set_bit(FLG_L1_T3RUN, &l1->Flags); 239 test_and_set_bit(FLG_L1_T3RUN, &l1->Flags);
240 /* Tell HW to send INFO 1 */
238 l1->dcb(l1->dch, HW_RESET_REQ); 241 l1->dcb(l1->dch, HW_RESET_REQ);
239} 242}
240 243
@@ -302,7 +305,8 @@ static struct FsmNode L1SFnList[] =
302 305
303static void 306static void
304release_l1(struct layer1 *l1) { 307release_l1(struct layer1 *l1) {
305 mISDN_FsmDelTimer(&l1->timer, 0); 308 mISDN_FsmDelTimer(&l1->timerX, 0);
309 mISDN_FsmDelTimer(&l1->timer3, 0);
306 if (l1->dch) 310 if (l1->dch)
307 l1->dch->l1 = NULL; 311 l1->dch->l1 = NULL;
308 module_put(THIS_MODULE); 312 module_put(THIS_MODULE);
@@ -356,6 +360,16 @@ l1_event(struct layer1 *l1, u_int event)
356 release_l1(l1); 360 release_l1(l1);
357 break; 361 break;
358 default: 362 default:
363 if ((event & ~HW_TIMER3_VMASK) == HW_TIMER3_VALUE) {
364 int val = event & HW_TIMER3_VMASK;
365
366 if (val < 5)
367 val = 5;
368 if (val > 30)
369 val = 30;
370 l1->t3_value = val;
371 break;
372 }
359 if (*debug & DEBUG_L1) 373 if (*debug & DEBUG_L1)
360 printk(KERN_DEBUG "%s %x unhandled\n", 374 printk(KERN_DEBUG "%s %x unhandled\n",
361 __func__, event); 375 __func__, event);
@@ -377,13 +391,15 @@ create_l1(struct dchannel *dch, dchannel_l1callback *dcb) {
377 nl1->l1m.fsm = &l1fsm_s; 391 nl1->l1m.fsm = &l1fsm_s;
378 nl1->l1m.state = ST_L1_F3; 392 nl1->l1m.state = ST_L1_F3;
379 nl1->Flags = 0; 393 nl1->Flags = 0;
394 nl1->t3_value = TIMER3_DEFAULT_VALUE;
380 nl1->l1m.debug = *debug & DEBUG_L1_FSM; 395 nl1->l1m.debug = *debug & DEBUG_L1_FSM;
381 nl1->l1m.userdata = nl1; 396 nl1->l1m.userdata = nl1;
382 nl1->l1m.userint = 0; 397 nl1->l1m.userint = 0;
383 nl1->l1m.printdebug = l1m_debug; 398 nl1->l1m.printdebug = l1m_debug;
384 nl1->dch = dch; 399 nl1->dch = dch;
385 nl1->dcb = dcb; 400 nl1->dcb = dcb;
386 mISDN_FsmInitTimer(&nl1->l1m, &nl1->timer); 401 mISDN_FsmInitTimer(&nl1->l1m, &nl1->timer3);
402 mISDN_FsmInitTimer(&nl1->l1m, &nl1->timerX);
387 __module_get(THIS_MODULE); 403 __module_get(THIS_MODULE);
388 dch->l1 = nl1; 404 dch->l1 = nl1;
389 return 0; 405 return 0;
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index 39d7375fa551..0dc8abca1407 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -58,6 +58,8 @@ enum {
58 EV_L1_DEACTIVATE, 58 EV_L1_DEACTIVATE,
59 EV_L2_T200, 59 EV_L2_T200,
60 EV_L2_T203, 60 EV_L2_T203,
61 EV_L2_T200I,
62 EV_L2_T203I,
61 EV_L2_SET_OWN_BUSY, 63 EV_L2_SET_OWN_BUSY,
62 EV_L2_CLEAR_OWN_BUSY, 64 EV_L2_CLEAR_OWN_BUSY,
63 EV_L2_FRAME_ERROR, 65 EV_L2_FRAME_ERROR,
@@ -86,6 +88,8 @@ static char *strL2Event[] =
86 "EV_L1_DEACTIVATE", 88 "EV_L1_DEACTIVATE",
87 "EV_L2_T200", 89 "EV_L2_T200",
88 "EV_L2_T203", 90 "EV_L2_T203",
91 "EV_L2_T200I",
92 "EV_L2_T203I",
89 "EV_L2_SET_OWN_BUSY", 93 "EV_L2_SET_OWN_BUSY",
90 "EV_L2_CLEAR_OWN_BUSY", 94 "EV_L2_CLEAR_OWN_BUSY",
91 "EV_L2_FRAME_ERROR", 95 "EV_L2_FRAME_ERROR",
@@ -106,8 +110,8 @@ l2m_debug(struct FsmInst *fi, char *fmt, ...)
106 vaf.fmt = fmt; 110 vaf.fmt = fmt;
107 vaf.va = &va; 111 vaf.va = &va;
108 112
109 printk(KERN_DEBUG "l2 (sapi %d tei %d): %pV\n", 113 printk(KERN_DEBUG "%s l2 (sapi %d tei %d): %pV\n",
110 l2->sapi, l2->tei, &vaf); 114 mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei, &vaf);
111 115
112 va_end(va); 116 va_end(va);
113} 117}
@@ -150,7 +154,8 @@ l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb)
150 mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr; 154 mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr;
151 err = l2->up->send(l2->up, skb); 155 err = l2->up->send(l2->up, skb);
152 if (err) { 156 if (err) {
153 printk(KERN_WARNING "%s: err=%d\n", __func__, err); 157 printk(KERN_WARNING "%s: dev %s err=%d\n", __func__,
158 mISDNDevName4ch(&l2->ch), err);
154 dev_kfree_skb(skb); 159 dev_kfree_skb(skb);
155 } 160 }
156} 161}
@@ -174,7 +179,8 @@ l2up_create(struct layer2 *l2, u_int prim, int len, void *arg)
174 memcpy(skb_put(skb, len), arg, len); 179 memcpy(skb_put(skb, len), arg, len);
175 err = l2->up->send(l2->up, skb); 180 err = l2->up->send(l2->up, skb);
176 if (err) { 181 if (err) {
177 printk(KERN_WARNING "%s: err=%d\n", __func__, err); 182 printk(KERN_WARNING "%s: dev %s err=%d\n", __func__,
183 mISDNDevName4ch(&l2->ch), err);
178 dev_kfree_skb(skb); 184 dev_kfree_skb(skb);
179 } 185 }
180} 186}
@@ -185,7 +191,8 @@ l2down_skb(struct layer2 *l2, struct sk_buff *skb) {
185 191
186 ret = l2->ch.recv(l2->ch.peer, skb); 192 ret = l2->ch.recv(l2->ch.peer, skb);
187 if (ret && (*debug & DEBUG_L2_RECV)) 193 if (ret && (*debug & DEBUG_L2_RECV))
188 printk(KERN_DEBUG "l2down_skb: ret(%d)\n", ret); 194 printk(KERN_DEBUG "l2down_skb: dev %s ret(%d)\n",
195 mISDNDevName4ch(&l2->ch), ret);
189 return ret; 196 return ret;
190} 197}
191 198
@@ -276,12 +283,37 @@ ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) {
276 return ret; 283 return ret;
277} 284}
278 285
286static void
287l2_timeout(struct FsmInst *fi, int event, void *arg)
288{
289 struct layer2 *l2 = fi->userdata;
290 struct sk_buff *skb;
291 struct mISDNhead *hh;
292
293 skb = mI_alloc_skb(0, GFP_ATOMIC);
294 if (!skb) {
295 printk(KERN_WARNING "%s: L2(%d,%d) nr:%x timer %s no skb\n",
296 mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei,
297 l2->ch.nr, event == EV_L2_T200 ? "T200" : "T203");
298 return;
299 }
300 hh = mISDN_HEAD_P(skb);
301 hh->prim = event == EV_L2_T200 ? DL_TIMER200_IND : DL_TIMER203_IND;
302 hh->id = l2->ch.nr;
303 if (*debug & DEBUG_TIMER)
304 printk(KERN_DEBUG "%s: L2(%d,%d) nr:%x timer %s expired\n",
305 mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei,
306 l2->ch.nr, event == EV_L2_T200 ? "T200" : "T203");
307 if (l2->ch.st)
308 l2->ch.st->own.recv(&l2->ch.st->own, skb);
309}
310
279static int 311static int
280l2mgr(struct layer2 *l2, u_int prim, void *arg) { 312l2mgr(struct layer2 *l2, u_int prim, void *arg) {
281 long c = (long)arg; 313 long c = (long)arg;
282 314
283 printk(KERN_WARNING 315 printk(KERN_WARNING "l2mgr: dev %s addr:%x prim %x %c\n",
284 "l2mgr: addr:%x prim %x %c\n", l2->id, prim, (char)c); 316 mISDNDevName4ch(&l2->ch), l2->id, prim, (char)c);
285 if (test_bit(FLG_LAPD, &l2->flag) && 317 if (test_bit(FLG_LAPD, &l2->flag) &&
286 !test_bit(FLG_FIXED_TEI, &l2->flag)) { 318 !test_bit(FLG_FIXED_TEI, &l2->flag)) {
287 switch (c) { 319 switch (c) {
@@ -603,8 +635,8 @@ send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr)
603 else { 635 else {
604 skb = mI_alloc_skb(i, GFP_ATOMIC); 636 skb = mI_alloc_skb(i, GFP_ATOMIC);
605 if (!skb) { 637 if (!skb) {
606 printk(KERN_WARNING "%s: can't alloc skbuff\n", 638 printk(KERN_WARNING "%s: can't alloc skbuff in %s\n",
607 __func__); 639 mISDNDevName4ch(&l2->ch), __func__);
608 return; 640 return;
609 } 641 }
610 } 642 }
@@ -1089,8 +1121,8 @@ enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf)
1089 tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0); 1121 tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
1090 skb = mI_alloc_skb(i, GFP_ATOMIC); 1122 skb = mI_alloc_skb(i, GFP_ATOMIC);
1091 if (!skb) { 1123 if (!skb) {
1092 printk(KERN_WARNING 1124 printk(KERN_WARNING "%s: isdnl2 can't alloc sbbuff in %s\n",
1093 "isdnl2 can't alloc sbbuff for enquiry_cr\n"); 1125 mISDNDevName4ch(&l2->ch), __func__);
1094 return; 1126 return;
1095 } 1127 }
1096 memcpy(skb_put(skb, i), tmp, i); 1128 memcpy(skb_put(skb, i), tmp, i);
@@ -1150,7 +1182,7 @@ invoke_retransmission(struct layer2 *l2, unsigned int nr)
1150 else 1182 else
1151 printk(KERN_WARNING 1183 printk(KERN_WARNING
1152 "%s: windowar[%d] is NULL\n", 1184 "%s: windowar[%d] is NULL\n",
1153 __func__, p1); 1185 mISDNDevName4ch(&l2->ch), p1);
1154 l2->windowar[p1] = NULL; 1186 l2->windowar[p1] = NULL;
1155 } 1187 }
1156 mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL); 1188 mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
@@ -1461,8 +1493,8 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1461 p1 = (l2->vs - l2->va) % 8; 1493 p1 = (l2->vs - l2->va) % 8;
1462 p1 = (p1 + l2->sow) % l2->window; 1494 p1 = (p1 + l2->sow) % l2->window;
1463 if (l2->windowar[p1]) { 1495 if (l2->windowar[p1]) {
1464 printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n", 1496 printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n",
1465 p1); 1497 mISDNDevName4ch(&l2->ch), p1);
1466 dev_kfree_skb(l2->windowar[p1]); 1498 dev_kfree_skb(l2->windowar[p1]);
1467 } 1499 }
1468 l2->windowar[p1] = skb; 1500 l2->windowar[p1] = skb;
@@ -1482,12 +1514,14 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1482 memcpy(skb_push(nskb, i), header, i); 1514 memcpy(skb_push(nskb, i), header, i);
1483 else { 1515 else {
1484 printk(KERN_WARNING 1516 printk(KERN_WARNING
1485 "isdnl2 pull_iqueue skb header(%d/%d) too short\n", i, p1); 1517 "%s: L2 pull_iqueue skb header(%d/%d) too short\n",
1518 mISDNDevName4ch(&l2->ch), i, p1);
1486 oskb = nskb; 1519 oskb = nskb;
1487 nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC); 1520 nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
1488 if (!nskb) { 1521 if (!nskb) {
1489 dev_kfree_skb(oskb); 1522 dev_kfree_skb(oskb);
1490 printk(KERN_WARNING "%s: no skb mem\n", __func__); 1523 printk(KERN_WARNING "%s: no skb mem in %s\n",
1524 mISDNDevName4ch(&l2->ch), __func__);
1491 return; 1525 return;
1492 } 1526 }
1493 memcpy(skb_put(nskb, i), header, i); 1527 memcpy(skb_put(nskb, i), header, i);
@@ -1814,11 +1848,16 @@ static struct FsmNode L2FnList[] =
1814 {ST_L2_8, EV_L2_SUPER, l2_st8_got_super}, 1848 {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
1815 {ST_L2_7, EV_L2_I, l2_got_iframe}, 1849 {ST_L2_7, EV_L2_I, l2_got_iframe},
1816 {ST_L2_8, EV_L2_I, l2_got_iframe}, 1850 {ST_L2_8, EV_L2_I, l2_got_iframe},
1817 {ST_L2_5, EV_L2_T200, l2_st5_tout_200}, 1851 {ST_L2_5, EV_L2_T200, l2_timeout},
1818 {ST_L2_6, EV_L2_T200, l2_st6_tout_200}, 1852 {ST_L2_6, EV_L2_T200, l2_timeout},
1819 {ST_L2_7, EV_L2_T200, l2_st7_tout_200}, 1853 {ST_L2_7, EV_L2_T200, l2_timeout},
1820 {ST_L2_8, EV_L2_T200, l2_st8_tout_200}, 1854 {ST_L2_8, EV_L2_T200, l2_timeout},
1821 {ST_L2_7, EV_L2_T203, l2_st7_tout_203}, 1855 {ST_L2_7, EV_L2_T203, l2_timeout},
1856 {ST_L2_5, EV_L2_T200I, l2_st5_tout_200},
1857 {ST_L2_6, EV_L2_T200I, l2_st6_tout_200},
1858 {ST_L2_7, EV_L2_T200I, l2_st7_tout_200},
1859 {ST_L2_8, EV_L2_T200I, l2_st8_tout_200},
1860 {ST_L2_7, EV_L2_T203I, l2_st7_tout_203},
1822 {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue}, 1861 {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
1823 {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy}, 1862 {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
1824 {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy}, 1863 {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
@@ -1858,7 +1897,8 @@ ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
1858 ptei = *datap++; 1897 ptei = *datap++;
1859 if ((psapi & 1) || !(ptei & 1)) { 1898 if ((psapi & 1) || !(ptei & 1)) {
1860 printk(KERN_WARNING 1899 printk(KERN_WARNING
1861 "l2 D-channel frame wrong EA0/EA1\n"); 1900 "%s l2 D-channel frame wrong EA0/EA1\n",
1901 mISDNDevName4ch(&l2->ch));
1862 return ret; 1902 return ret;
1863 } 1903 }
1864 psapi >>= 2; 1904 psapi >>= 2;
@@ -1867,7 +1907,8 @@ ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
1867 /* not our business */ 1907 /* not our business */
1868 if (*debug & DEBUG_L2) 1908 if (*debug & DEBUG_L2)
1869 printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n", 1909 printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n",
1870 __func__, psapi, l2->sapi); 1910 mISDNDevName4ch(&l2->ch), psapi,
1911 l2->sapi);
1871 dev_kfree_skb(skb); 1912 dev_kfree_skb(skb);
1872 return 0; 1913 return 0;
1873 } 1914 }
@@ -1875,7 +1916,7 @@ ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
1875 /* not our business */ 1916 /* not our business */
1876 if (*debug & DEBUG_L2) 1917 if (*debug & DEBUG_L2)
1877 printk(KERN_DEBUG "%s: tei %d/%d mismatch\n", 1918 printk(KERN_DEBUG "%s: tei %d/%d mismatch\n",
1878 __func__, ptei, l2->tei); 1919 mISDNDevName4ch(&l2->ch), ptei, l2->tei);
1879 dev_kfree_skb(skb); 1920 dev_kfree_skb(skb);
1880 return 0; 1921 return 0;
1881 } 1922 }
@@ -1916,7 +1957,8 @@ ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
1916 } else 1957 } else
1917 c = 'L'; 1958 c = 'L';
1918 if (c) { 1959 if (c) {
1919 printk(KERN_WARNING "l2 D-channel frame error %c\n", c); 1960 printk(KERN_WARNING "%s:l2 D-channel frame error %c\n",
1961 mISDNDevName4ch(&l2->ch), c);
1920 mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c); 1962 mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
1921 } 1963 }
1922 return ret; 1964 return ret;
@@ -1930,8 +1972,17 @@ l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
1930 int ret = -EINVAL; 1972 int ret = -EINVAL;
1931 1973
1932 if (*debug & DEBUG_L2_RECV) 1974 if (*debug & DEBUG_L2_RECV)
1933 printk(KERN_DEBUG "%s: prim(%x) id(%x) sapi(%d) tei(%d)\n", 1975 printk(KERN_DEBUG "%s: %s prim(%x) id(%x) sapi(%d) tei(%d)\n",
1934 __func__, hh->prim, hh->id, l2->sapi, l2->tei); 1976 __func__, mISDNDevName4ch(&l2->ch), hh->prim, hh->id,
1977 l2->sapi, l2->tei);
1978 if (hh->prim == DL_INTERN_MSG) {
1979 struct mISDNhead *chh = hh + 1; /* saved copy */
1980
1981 *hh = *chh;
1982 if (*debug & DEBUG_L2_RECV)
1983 printk(KERN_DEBUG "%s: prim(%x) id(%x) internal msg\n",
1984 mISDNDevName4ch(&l2->ch), hh->prim, hh->id);
1985 }
1935 switch (hh->prim) { 1986 switch (hh->prim) {
1936 case PH_DATA_IND: 1987 case PH_DATA_IND:
1937 ret = ph_data_indication(l2, hh, skb); 1988 ret = ph_data_indication(l2, hh, skb);
@@ -1987,6 +2038,12 @@ l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
1987 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ, 2038 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ,
1988 skb); 2039 skb);
1989 break; 2040 break;
2041 case DL_TIMER200_IND:
2042 mISDN_FsmEvent(&l2->l2m, EV_L2_T200I, NULL);
2043 break;
2044 case DL_TIMER203_IND:
2045 mISDN_FsmEvent(&l2->l2m, EV_L2_T203I, NULL);
2046 break;
1990 default: 2047 default:
1991 if (*debug & DEBUG_L2) 2048 if (*debug & DEBUG_L2)
1992 l2m_debug(&l2->l2m, "l2 unknown pr %04x", 2049 l2m_debug(&l2->l2m, "l2 unknown pr %04x",
@@ -2005,7 +2062,8 @@ tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
2005 int ret = -EINVAL; 2062 int ret = -EINVAL;
2006 2063
2007 if (*debug & DEBUG_L2_TEI) 2064 if (*debug & DEBUG_L2_TEI)
2008 printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd); 2065 printk(KERN_DEBUG "%s: cmd(%x) in %s\n",
2066 mISDNDevName4ch(&l2->ch), cmd, __func__);
2009 switch (cmd) { 2067 switch (cmd) {
2010 case (MDL_ASSIGN_REQ): 2068 case (MDL_ASSIGN_REQ):
2011 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg); 2069 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg);
@@ -2018,7 +2076,8 @@ tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
2018 break; 2076 break;
2019 case (MDL_ERROR_RSP): 2077 case (MDL_ERROR_RSP):
2020 /* ETS 300-125 5.3.2.1 Test: TC13010 */ 2078 /* ETS 300-125 5.3.2.1 Test: TC13010 */
2021 printk(KERN_NOTICE "MDL_ERROR|REQ (tei_l2)\n"); 2079 printk(KERN_NOTICE "%s: MDL_ERROR|REQ (tei_l2)\n",
2080 mISDNDevName4ch(&l2->ch));
2022 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL); 2081 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
2023 break; 2082 break;
2024 } 2083 }
@@ -2050,7 +2109,8 @@ l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
2050 u_int info; 2109 u_int info;
2051 2110
2052 if (*debug & DEBUG_L2_CTRL) 2111 if (*debug & DEBUG_L2_CTRL)
2053 printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd); 2112 printk(KERN_DEBUG "%s: %s cmd(%x)\n",
2113 mISDNDevName4ch(ch), __func__, cmd);
2054 2114
2055 switch (cmd) { 2115 switch (cmd) {
2056 case OPEN_CHANNEL: 2116 case OPEN_CHANNEL:
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
index ba2bc0c776e2..be88728f1106 100644
--- a/drivers/isdn/mISDN/tei.c
+++ b/drivers/isdn/mISDN/tei.c
@@ -790,18 +790,23 @@ tei_ph_data_ind(struct teimgr *tm, u_int mt, u_char *dp, int len)
790static struct layer2 * 790static struct layer2 *
791create_new_tei(struct manager *mgr, int tei, int sapi) 791create_new_tei(struct manager *mgr, int tei, int sapi)
792{ 792{
793 u_long opt = 0; 793 unsigned long opt = 0;
794 u_long flags; 794 unsigned long flags;
795 int id; 795 int id;
796 struct layer2 *l2; 796 struct layer2 *l2;
797 struct channel_req rq;
797 798
798 if (!mgr->up) 799 if (!mgr->up)
799 return NULL; 800 return NULL;
800 if ((tei >= 0) && (tei < 64)) 801 if ((tei >= 0) && (tei < 64))
801 test_and_set_bit(OPTION_L2_FIXEDTEI, &opt); 802 test_and_set_bit(OPTION_L2_FIXEDTEI, &opt);
802 if (mgr->ch.st->dev->Dprotocols 803 if (mgr->ch.st->dev->Dprotocols & ((1 << ISDN_P_TE_E1) |
803 & ((1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1))) 804 (1 << ISDN_P_NT_E1))) {
804 test_and_set_bit(OPTION_L2_PMX, &opt); 805 test_and_set_bit(OPTION_L2_PMX, &opt);
806 rq.protocol = ISDN_P_NT_E1;
807 } else {
808 rq.protocol = ISDN_P_NT_S0;
809 }
805 l2 = create_l2(mgr->up, ISDN_P_LAPD_NT, opt, tei, sapi); 810 l2 = create_l2(mgr->up, ISDN_P_LAPD_NT, opt, tei, sapi);
806 if (!l2) { 811 if (!l2) {
807 printk(KERN_WARNING "%s:no memory for layer2\n", __func__); 812 printk(KERN_WARNING "%s:no memory for layer2\n", __func__);
@@ -836,6 +841,14 @@ create_new_tei(struct manager *mgr, int tei, int sapi)
836 l2->ch.recv = mgr->ch.recv; 841 l2->ch.recv = mgr->ch.recv;
837 l2->ch.peer = mgr->ch.peer; 842 l2->ch.peer = mgr->ch.peer;
838 l2->ch.ctrl(&l2->ch, OPEN_CHANNEL, NULL); 843 l2->ch.ctrl(&l2->ch, OPEN_CHANNEL, NULL);
844 /* We need open here L1 for the manager as well (refcounting) */
845 rq.adr.dev = mgr->ch.st->dev->id;
846 id = mgr->ch.st->own.ctrl(&mgr->ch.st->own, OPEN_CHANNEL, &rq);
847 if (id < 0) {
848 printk(KERN_WARNING "%s: cannot open L1\n", __func__);
849 l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
850 l2 = NULL;
851 }
839 } 852 }
840 return l2; 853 return l2;
841} 854}
@@ -978,10 +991,11 @@ TEIrelease(struct layer2 *l2)
978static int 991static int
979create_teimgr(struct manager *mgr, struct channel_req *crq) 992create_teimgr(struct manager *mgr, struct channel_req *crq)
980{ 993{
981 struct layer2 *l2; 994 struct layer2 *l2;
982 u_long opt = 0; 995 unsigned long opt = 0;
983 u_long flags; 996 unsigned long flags;
984 int id; 997 int id;
998 struct channel_req l1rq;
985 999
986 if (*debug & DEBUG_L2_TEI) 1000 if (*debug & DEBUG_L2_TEI)
987 printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n", 1001 printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
@@ -1016,6 +1030,7 @@ create_teimgr(struct manager *mgr, struct channel_req *crq)
1016 if (crq->protocol == ISDN_P_LAPD_TE) 1030 if (crq->protocol == ISDN_P_LAPD_TE)
1017 test_and_set_bit(MGR_OPT_USER, &mgr->options); 1031 test_and_set_bit(MGR_OPT_USER, &mgr->options);
1018 } 1032 }
1033 l1rq.adr = crq->adr;
1019 if (mgr->ch.st->dev->Dprotocols 1034 if (mgr->ch.st->dev->Dprotocols
1020 & ((1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1))) 1035 & ((1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1)))
1021 test_and_set_bit(OPTION_L2_PMX, &opt); 1036 test_and_set_bit(OPTION_L2_PMX, &opt);
@@ -1023,6 +1038,8 @@ create_teimgr(struct manager *mgr, struct channel_req *crq)
1023 mgr->up = crq->ch; 1038 mgr->up = crq->ch;
1024 id = DL_INFO_L2_CONNECT; 1039 id = DL_INFO_L2_CONNECT;
1025 teiup_create(mgr, DL_INFORMATION_IND, sizeof(id), &id); 1040 teiup_create(mgr, DL_INFORMATION_IND, sizeof(id), &id);
1041 if (test_bit(MGR_PH_ACTIVE, &mgr->options))
1042 teiup_create(mgr, PH_ACTIVATE_IND, 0, NULL);
1026 crq->ch = NULL; 1043 crq->ch = NULL;
1027 if (!list_empty(&mgr->layer2)) { 1044 if (!list_empty(&mgr->layer2)) {
1028 read_lock_irqsave(&mgr->lock, flags); 1045 read_lock_irqsave(&mgr->lock, flags);
@@ -1053,24 +1070,34 @@ create_teimgr(struct manager *mgr, struct channel_req *crq)
1053 l2->tm->tei_m.fsm = &teifsmu; 1070 l2->tm->tei_m.fsm = &teifsmu;
1054 l2->tm->tei_m.state = ST_TEI_NOP; 1071 l2->tm->tei_m.state = ST_TEI_NOP;
1055 l2->tm->tval = 1000; /* T201 1 sec */ 1072 l2->tm->tval = 1000; /* T201 1 sec */
1073 if (test_bit(OPTION_L2_PMX, &opt))
1074 l1rq.protocol = ISDN_P_TE_E1;
1075 else
1076 l1rq.protocol = ISDN_P_TE_S0;
1056 } else { 1077 } else {
1057 l2->tm->tei_m.fsm = &teifsmn; 1078 l2->tm->tei_m.fsm = &teifsmn;
1058 l2->tm->tei_m.state = ST_TEI_NOP; 1079 l2->tm->tei_m.state = ST_TEI_NOP;
1059 l2->tm->tval = 2000; /* T202 2 sec */ 1080 l2->tm->tval = 2000; /* T202 2 sec */
1081 if (test_bit(OPTION_L2_PMX, &opt))
1082 l1rq.protocol = ISDN_P_NT_E1;
1083 else
1084 l1rq.protocol = ISDN_P_NT_S0;
1060 } 1085 }
1061 mISDN_FsmInitTimer(&l2->tm->tei_m, &l2->tm->timer); 1086 mISDN_FsmInitTimer(&l2->tm->tei_m, &l2->tm->timer);
1062 write_lock_irqsave(&mgr->lock, flags); 1087 write_lock_irqsave(&mgr->lock, flags);
1063 id = get_free_id(mgr); 1088 id = get_free_id(mgr);
1064 list_add_tail(&l2->list, &mgr->layer2); 1089 list_add_tail(&l2->list, &mgr->layer2);
1065 write_unlock_irqrestore(&mgr->lock, flags); 1090 write_unlock_irqrestore(&mgr->lock, flags);
1066 if (id < 0) { 1091 if (id >= 0) {
1067 l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
1068 } else {
1069 l2->ch.nr = id; 1092 l2->ch.nr = id;
1070 l2->up->nr = id; 1093 l2->up->nr = id;
1071 crq->ch = &l2->ch; 1094 crq->ch = &l2->ch;
1072 id = 0; 1095 /* We need open here L1 for the manager as well (refcounting) */
1096 id = mgr->ch.st->own.ctrl(&mgr->ch.st->own, OPEN_CHANNEL,
1097 &l1rq);
1073 } 1098 }
1099 if (id < 0)
1100 l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
1074 return id; 1101 return id;
1075} 1102}
1076 1103
@@ -1096,12 +1123,16 @@ mgr_send(struct mISDNchannel *ch, struct sk_buff *skb)
1096 break; 1123 break;
1097 case PH_ACTIVATE_IND: 1124 case PH_ACTIVATE_IND:
1098 test_and_set_bit(MGR_PH_ACTIVE, &mgr->options); 1125 test_and_set_bit(MGR_PH_ACTIVE, &mgr->options);
1126 if (mgr->up)
1127 teiup_create(mgr, PH_ACTIVATE_IND, 0, NULL);
1099 mISDN_FsmEvent(&mgr->deact, EV_ACTIVATE_IND, NULL); 1128 mISDN_FsmEvent(&mgr->deact, EV_ACTIVATE_IND, NULL);
1100 do_send(mgr); 1129 do_send(mgr);
1101 ret = 0; 1130 ret = 0;
1102 break; 1131 break;
1103 case PH_DEACTIVATE_IND: 1132 case PH_DEACTIVATE_IND:
1104 test_and_clear_bit(MGR_PH_ACTIVE, &mgr->options); 1133 test_and_clear_bit(MGR_PH_ACTIVE, &mgr->options);
1134 if (mgr->up)
1135 teiup_create(mgr, PH_DEACTIVATE_IND, 0, NULL);
1105 mISDN_FsmEvent(&mgr->deact, EV_DEACTIVATE_IND, NULL); 1136 mISDN_FsmEvent(&mgr->deact, EV_DEACTIVATE_IND, NULL);
1106 ret = 0; 1137 ret = 0;
1107 break; 1138 break;
@@ -1263,7 +1294,7 @@ static int
1263mgr_bcast(struct mISDNchannel *ch, struct sk_buff *skb) 1294mgr_bcast(struct mISDNchannel *ch, struct sk_buff *skb)
1264{ 1295{
1265 struct manager *mgr = container_of(ch, struct manager, bcast); 1296 struct manager *mgr = container_of(ch, struct manager, bcast);
1266 struct mISDNhead *hh = mISDN_HEAD_P(skb); 1297 struct mISDNhead *hhc, *hh = mISDN_HEAD_P(skb);
1267 struct sk_buff *cskb = NULL; 1298 struct sk_buff *cskb = NULL;
1268 struct layer2 *l2; 1299 struct layer2 *l2;
1269 u_long flags; 1300 u_long flags;
@@ -1278,10 +1309,17 @@ mgr_bcast(struct mISDNchannel *ch, struct sk_buff *skb)
1278 skb = NULL; 1309 skb = NULL;
1279 } else { 1310 } else {
1280 if (!cskb) 1311 if (!cskb)
1281 cskb = skb_copy(skb, GFP_KERNEL); 1312 cskb = skb_copy(skb, GFP_ATOMIC);
1282 } 1313 }
1283 if (cskb) { 1314 if (cskb) {
1284 ret = l2->ch.send(&l2->ch, cskb); 1315 hhc = mISDN_HEAD_P(cskb);
1316 /* save original header behind normal header */
1317 hhc++;
1318 *hhc = *hh;
1319 hhc--;
1320 hhc->prim = DL_INTERN_MSG;
1321 hhc->id = l2->ch.nr;
1322 ret = ch->st->own.recv(&ch->st->own, cskb);
1285 if (ret) { 1323 if (ret) {
1286 if (*debug & DEBUG_SEND_ERR) 1324 if (*debug & DEBUG_SEND_ERR)
1287 printk(KERN_DEBUG 1325 printk(KERN_DEBUG
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index d8433f2d53bc..73973fdbd8be 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -112,7 +112,7 @@ err_free_addr:
112 return err; 112 return err;
113} 113}
114 114
115static void __devexit gpio_ext_free(struct netxbig_gpio_ext *gpio_ext) 115static void gpio_ext_free(struct netxbig_gpio_ext *gpio_ext)
116{ 116{
117 int i; 117 int i;
118 118
@@ -294,7 +294,7 @@ static ssize_t netxbig_led_sata_show(struct device *dev,
294 294
295static DEVICE_ATTR(sata, 0644, netxbig_led_sata_show, netxbig_led_sata_store); 295static DEVICE_ATTR(sata, 0644, netxbig_led_sata_show, netxbig_led_sata_store);
296 296
297static void __devexit delete_netxbig_led(struct netxbig_led_data *led_dat) 297static void delete_netxbig_led(struct netxbig_led_data *led_dat)
298{ 298{
299 if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) 299 if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
300 device_remove_file(led_dat->cdev.dev, &dev_attr_sata); 300 device_remove_file(led_dat->cdev.dev, &dev_attr_sata);
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 2f0a14421a73..01cf89ec6944 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -255,7 +255,7 @@ err_free_cmd:
255 return ret; 255 return ret;
256} 256}
257 257
258static void __devexit delete_ns2_led(struct ns2_led_data *led_dat) 258static void delete_ns2_led(struct ns2_led_data *led_dat)
259{ 259{
260 device_remove_file(led_dat->cdev.dev, &dev_attr_sata); 260 device_remove_file(led_dat->cdev.dev, &dev_attr_sata);
261 led_classdev_unregister(&led_dat->cdev); 261 led_classdev_unregister(&led_dat->cdev);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 97e73e555d11..17e2b472e16d 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1727,8 +1727,7 @@ int bitmap_create(struct mddev *mddev)
1727 bitmap->chunkshift = (ffz(~mddev->bitmap_info.chunksize) 1727 bitmap->chunkshift = (ffz(~mddev->bitmap_info.chunksize)
1728 - BITMAP_BLOCK_SHIFT); 1728 - BITMAP_BLOCK_SHIFT);
1729 1729
1730 /* now that chunksize and chunkshift are set, we can use these macros */ 1730 chunks = (blocks + (1 << bitmap->chunkshift) - 1) >>
1731 chunks = (blocks + bitmap->chunkshift - 1) >>
1732 bitmap->chunkshift; 1731 bitmap->chunkshift;
1733 pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO; 1732 pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
1734 1733
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index 55ca5aec84e4..b44b0aba2d47 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -101,9 +101,6 @@ typedef __u16 bitmap_counter_t;
101 101
102#define BITMAP_BLOCK_SHIFT 9 102#define BITMAP_BLOCK_SHIFT 9
103 103
104/* how many blocks per chunk? (this is variable) */
105#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->mddev->bitmap_info.chunksize >> BITMAP_BLOCK_SHIFT)
106
107#endif 104#endif
108 105
109/* 106/*
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 1f23e048f077..08d9a207259a 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -134,7 +134,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
134{ 134{
135 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1); 135 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
136 136
137 if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) 137 if (!capable(CAP_SYS_ADMIN))
138 return; 138 return;
139 139
140 spin_lock(&receiving_list_lock); 140 spin_lock(&receiving_list_lock);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 922a3385eead..754f38f8a692 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -718,8 +718,8 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
718 return 0; 718 return 0;
719 719
720 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL); 720 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
721 request_module("scsi_dh_%s", m->hw_handler_name); 721 if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
722 if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { 722 "scsi_dh_%s", m->hw_handler_name)) {
723 ti->error = "unknown hardware handler type"; 723 ti->error = "unknown hardware handler type";
724 ret = -EINVAL; 724 ret = -EINVAL;
725 goto fail; 725 goto fail;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index b0ba52459ed7..68965e663248 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -859,7 +859,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
859 int ret; 859 int ret;
860 unsigned redundancy = 0; 860 unsigned redundancy = 0;
861 struct raid_dev *dev; 861 struct raid_dev *dev;
862 struct md_rdev *rdev, *freshest; 862 struct md_rdev *rdev, *tmp, *freshest;
863 struct mddev *mddev = &rs->md; 863 struct mddev *mddev = &rs->md;
864 864
865 switch (rs->raid_type->level) { 865 switch (rs->raid_type->level) {
@@ -877,7 +877,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
877 } 877 }
878 878
879 freshest = NULL; 879 freshest = NULL;
880 rdev_for_each(rdev, mddev) { 880 rdev_for_each_safe(rdev, tmp, mddev) {
881 if (!rdev->meta_bdev) 881 if (!rdev->meta_bdev)
882 continue; 882 continue;
883 883
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 213ae32a0fc4..eb3d138ff55a 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -279,8 +279,10 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates)
279 279
280 hlist_del(&cell->list); 280 hlist_del(&cell->list);
281 281
282 bio_list_add(inmates, cell->holder); 282 if (inmates) {
283 bio_list_merge(inmates, &cell->bios); 283 bio_list_add(inmates, cell->holder);
284 bio_list_merge(inmates, &cell->bios);
285 }
284 286
285 mempool_free(cell, prison->cell_pool); 287 mempool_free(cell, prison->cell_pool);
286} 288}
@@ -303,9 +305,10 @@ static void cell_release(struct cell *cell, struct bio_list *bios)
303 */ 305 */
304static void __cell_release_singleton(struct cell *cell, struct bio *bio) 306static void __cell_release_singleton(struct cell *cell, struct bio *bio)
305{ 307{
306 hlist_del(&cell->list);
307 BUG_ON(cell->holder != bio); 308 BUG_ON(cell->holder != bio);
308 BUG_ON(!bio_list_empty(&cell->bios)); 309 BUG_ON(!bio_list_empty(&cell->bios));
310
311 __cell_release(cell, NULL);
309} 312}
310 313
311static void cell_release_singleton(struct cell *cell, struct bio *bio) 314static void cell_release_singleton(struct cell *cell, struct bio *bio)
@@ -1177,6 +1180,7 @@ static void no_space(struct cell *cell)
1177static void process_discard(struct thin_c *tc, struct bio *bio) 1180static void process_discard(struct thin_c *tc, struct bio *bio)
1178{ 1181{
1179 int r; 1182 int r;
1183 unsigned long flags;
1180 struct pool *pool = tc->pool; 1184 struct pool *pool = tc->pool;
1181 struct cell *cell, *cell2; 1185 struct cell *cell, *cell2;
1182 struct cell_key key, key2; 1186 struct cell_key key, key2;
@@ -1218,7 +1222,9 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1218 m->bio = bio; 1222 m->bio = bio;
1219 1223
1220 if (!ds_add_work(&pool->all_io_ds, &m->list)) { 1224 if (!ds_add_work(&pool->all_io_ds, &m->list)) {
1225 spin_lock_irqsave(&pool->lock, flags);
1221 list_add(&m->list, &pool->prepared_discards); 1226 list_add(&m->list, &pool->prepared_discards);
1227 spin_unlock_irqrestore(&pool->lock, flags);
1222 wake_worker(pool); 1228 wake_worker(pool);
1223 } 1229 }
1224 } else { 1230 } else {
@@ -1626,6 +1632,21 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
1626 pool->low_water_blocks = pt->low_water_blocks; 1632 pool->low_water_blocks = pt->low_water_blocks;
1627 pool->pf = pt->pf; 1633 pool->pf = pt->pf;
1628 1634
1635 /*
1636 * If discard_passdown was enabled verify that the data device
1637 * supports discards. Disable discard_passdown if not; otherwise
1638 * -EOPNOTSUPP will be returned.
1639 */
1640 if (pt->pf.discard_passdown) {
1641 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1642 if (!q || !blk_queue_discard(q)) {
1643 char buf[BDEVNAME_SIZE];
1644 DMWARN("Discard unsupported by data device (%s): Disabling discard passdown.",
1645 bdevname(pt->data_dev->bdev, buf));
1646 pool->pf.discard_passdown = 0;
1647 }
1648 }
1649
1629 return 0; 1650 return 0;
1630} 1651}
1631 1652
@@ -1982,19 +2003,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1982 goto out_flags_changed; 2003 goto out_flags_changed;
1983 } 2004 }
1984 2005
1985 /*
1986 * If discard_passdown was enabled verify that the data device
1987 * supports discards. Disable discard_passdown if not; otherwise
1988 * -EOPNOTSUPP will be returned.
1989 */
1990 if (pf.discard_passdown) {
1991 struct request_queue *q = bdev_get_queue(data_dev->bdev);
1992 if (!q || !blk_queue_discard(q)) {
1993 DMWARN("Discard unsupported by data device: Disabling discard passdown.");
1994 pf.discard_passdown = 0;
1995 }
1996 }
1997
1998 pt->pool = pool; 2006 pt->pool = pool;
1999 pt->ti = ti; 2007 pt->ti = ti;
2000 pt->metadata_dev = metadata_dev; 2008 pt->metadata_dev = metadata_dev;
@@ -2379,7 +2387,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
2379 (unsigned long long)pt->low_water_blocks); 2387 (unsigned long long)pt->low_water_blocks);
2380 2388
2381 count = !pool->pf.zero_new_blocks + !pool->pf.discard_enabled + 2389 count = !pool->pf.zero_new_blocks + !pool->pf.discard_enabled +
2382 !pool->pf.discard_passdown; 2390 !pt->pf.discard_passdown;
2383 DMEMIT("%u ", count); 2391 DMEMIT("%u ", count);
2384 2392
2385 if (!pool->pf.zero_new_blocks) 2393 if (!pool->pf.zero_new_blocks)
@@ -2388,7 +2396,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
2388 if (!pool->pf.discard_enabled) 2396 if (!pool->pf.discard_enabled)
2389 DMEMIT("ignore_discard "); 2397 DMEMIT("ignore_discard ");
2390 2398
2391 if (!pool->pf.discard_passdown) 2399 if (!pt->pf.discard_passdown)
2392 DMEMIT("no_discard_passdown "); 2400 DMEMIT("no_discard_passdown ");
2393 2401
2394 break; 2402 break;
@@ -2626,8 +2634,10 @@ static int thin_endio(struct dm_target *ti,
2626 if (h->all_io_entry) { 2634 if (h->all_io_entry) {
2627 INIT_LIST_HEAD(&work); 2635 INIT_LIST_HEAD(&work);
2628 ds_dec(h->all_io_entry, &work); 2636 ds_dec(h->all_io_entry, &work);
2637 spin_lock_irqsave(&pool->lock, flags);
2629 list_for_each_entry_safe(m, tmp, &work, list) 2638 list_for_each_entry_safe(m, tmp, &work, list)
2630 list_add(&m->list, &pool->prepared_discards); 2639 list_add(&m->list, &pool->prepared_discards);
2640 spin_unlock_irqrestore(&pool->lock, flags);
2631 } 2641 }
2632 2642
2633 mempool_free(h, pool->endio_hook_pool); 2643 mempool_free(h, pool->endio_hook_pool);
@@ -2759,6 +2769,6 @@ static void dm_thin_exit(void)
2759module_init(dm_thin_init); 2769module_init(dm_thin_init);
2760module_exit(dm_thin_exit); 2770module_exit(dm_thin_exit);
2761 2771
2762MODULE_DESCRIPTION(DM_NAME "device-mapper thin provisioning target"); 2772MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
2763MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 2773MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2764MODULE_LICENSE("GPL"); 2774MODULE_LICENSE("GPL");
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b572e1e386ce..01233d855eb2 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -391,6 +391,8 @@ void mddev_suspend(struct mddev *mddev)
391 synchronize_rcu(); 391 synchronize_rcu();
392 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 392 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
393 mddev->pers->quiesce(mddev, 1); 393 mddev->pers->quiesce(mddev, 1);
394
395 del_timer_sync(&mddev->safemode_timer);
394} 396}
395EXPORT_SYMBOL_GPL(mddev_suspend); 397EXPORT_SYMBOL_GPL(mddev_suspend);
396 398
@@ -7560,14 +7562,14 @@ void md_check_recovery(struct mddev *mddev)
7560 * any transients in the value of "sync_action". 7562 * any transients in the value of "sync_action".
7561 */ 7563 */
7562 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7564 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7563 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7564 /* Clear some bits that don't mean anything, but 7565 /* Clear some bits that don't mean anything, but
7565 * might be left set 7566 * might be left set
7566 */ 7567 */
7567 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 7568 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
7568 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 7569 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
7569 7570
7570 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 7571 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
7572 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
7571 goto unlock; 7573 goto unlock;
7572 /* no recovery is running. 7574 /* no recovery is running.
7573 * remove any failed drives, then 7575 * remove any failed drives, then
@@ -8140,7 +8142,8 @@ static int md_notify_reboot(struct notifier_block *this,
8140 8142
8141 for_each_mddev(mddev, tmp) { 8143 for_each_mddev(mddev, tmp) {
8142 if (mddev_trylock(mddev)) { 8144 if (mddev_trylock(mddev)) {
8143 __md_stop_writes(mddev); 8145 if (mddev->pers)
8146 __md_stop_writes(mddev);
8144 mddev->safemode = 2; 8147 mddev->safemode = 2;
8145 mddev_unlock(mddev); 8148 mddev_unlock(mddev);
8146 } 8149 }
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c8dbb84d5357..3f91c2e1dfe7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3164,12 +3164,40 @@ raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3164 return size << conf->chunk_shift; 3164 return size << conf->chunk_shift;
3165} 3165}
3166 3166
3167static void calc_sectors(struct r10conf *conf, sector_t size)
3168{
3169 /* Calculate the number of sectors-per-device that will
3170 * actually be used, and set conf->dev_sectors and
3171 * conf->stride
3172 */
3173
3174 size = size >> conf->chunk_shift;
3175 sector_div(size, conf->far_copies);
3176 size = size * conf->raid_disks;
3177 sector_div(size, conf->near_copies);
3178 /* 'size' is now the number of chunks in the array */
3179 /* calculate "used chunks per device" */
3180 size = size * conf->copies;
3181
3182 /* We need to round up when dividing by raid_disks to
3183 * get the stride size.
3184 */
3185 size = DIV_ROUND_UP_SECTOR_T(size, conf->raid_disks);
3186
3187 conf->dev_sectors = size << conf->chunk_shift;
3188
3189 if (conf->far_offset)
3190 conf->stride = 1 << conf->chunk_shift;
3191 else {
3192 sector_div(size, conf->far_copies);
3193 conf->stride = size << conf->chunk_shift;
3194 }
3195}
3167 3196
3168static struct r10conf *setup_conf(struct mddev *mddev) 3197static struct r10conf *setup_conf(struct mddev *mddev)
3169{ 3198{
3170 struct r10conf *conf = NULL; 3199 struct r10conf *conf = NULL;
3171 int nc, fc, fo; 3200 int nc, fc, fo;
3172 sector_t stride, size;
3173 int err = -EINVAL; 3201 int err = -EINVAL;
3174 3202
3175 if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) || 3203 if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) ||
@@ -3219,28 +3247,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
3219 if (!conf->r10bio_pool) 3247 if (!conf->r10bio_pool)
3220 goto out; 3248 goto out;
3221 3249
3222 size = mddev->dev_sectors >> conf->chunk_shift; 3250 calc_sectors(conf, mddev->dev_sectors);
3223 sector_div(size, fc);
3224 size = size * conf->raid_disks;
3225 sector_div(size, nc);
3226 /* 'size' is now the number of chunks in the array */
3227 /* calculate "used chunks per device" in 'stride' */
3228 stride = size * conf->copies;
3229
3230 /* We need to round up when dividing by raid_disks to
3231 * get the stride size.
3232 */
3233 stride += conf->raid_disks - 1;
3234 sector_div(stride, conf->raid_disks);
3235
3236 conf->dev_sectors = stride << conf->chunk_shift;
3237
3238 if (fo)
3239 stride = 1;
3240 else
3241 sector_div(stride, fc);
3242 conf->stride = stride << conf->chunk_shift;
3243
3244 3251
3245 spin_lock_init(&conf->device_lock); 3252 spin_lock_init(&conf->device_lock);
3246 INIT_LIST_HEAD(&conf->retry_list); 3253 INIT_LIST_HEAD(&conf->retry_list);
@@ -3468,7 +3475,8 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
3468 mddev->recovery_cp = oldsize; 3475 mddev->recovery_cp = oldsize;
3469 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3476 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3470 } 3477 }
3471 mddev->dev_sectors = sectors; 3478 calc_sectors(conf, sectors);
3479 mddev->dev_sectors = conf->dev_sectors;
3472 mddev->resync_max_sectors = size; 3480 mddev->resync_max_sectors = size;
3473 return 0; 3481 return 0;
3474} 3482}
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 0f64d7182657..cb888d835a89 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -1921,6 +1921,10 @@ static int dtv_set_frontend(struct dvb_frontend *fe)
1921 } else { 1921 } else {
1922 /* default values */ 1922 /* default values */
1923 switch (c->delivery_system) { 1923 switch (c->delivery_system) {
1924 case SYS_DVBS:
1925 case SYS_DVBS2:
1926 case SYS_ISDBS:
1927 case SYS_TURBO:
1924 case SYS_DVBC_ANNEX_A: 1928 case SYS_DVBC_ANNEX_A:
1925 case SYS_DVBC_ANNEX_C: 1929 case SYS_DVBC_ANNEX_C:
1926 fepriv->min_delay = HZ / 20; 1930 fepriv->min_delay = HZ / 20;
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index 860c112e0fd2..bef5296173c9 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -1018,22 +1018,6 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
1018 1018
1019 spin_lock_init(&dev->hw_lock); 1019 spin_lock_init(&dev->hw_lock);
1020 1020
1021 /* claim the resources */
1022 error = -EBUSY;
1023 dev->hw_io = pnp_port_start(pnp_dev, 0);
1024 if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
1025 dev->hw_io = -1;
1026 dev->irq = -1;
1027 goto error;
1028 }
1029
1030 dev->irq = pnp_irq(pnp_dev, 0);
1031 if (request_irq(dev->irq, ene_isr,
1032 IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
1033 dev->irq = -1;
1034 goto error;
1035 }
1036
1037 pnp_set_drvdata(pnp_dev, dev); 1021 pnp_set_drvdata(pnp_dev, dev);
1038 dev->pnp_dev = pnp_dev; 1022 dev->pnp_dev = pnp_dev;
1039 1023
@@ -1086,6 +1070,22 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
1086 device_set_wakeup_capable(&pnp_dev->dev, true); 1070 device_set_wakeup_capable(&pnp_dev->dev, true);
1087 device_set_wakeup_enable(&pnp_dev->dev, true); 1071 device_set_wakeup_enable(&pnp_dev->dev, true);
1088 1072
1073 /* claim the resources */
1074 error = -EBUSY;
1075 dev->hw_io = pnp_port_start(pnp_dev, 0);
1076 if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
1077 dev->hw_io = -1;
1078 dev->irq = -1;
1079 goto error;
1080 }
1081
1082 dev->irq = pnp_irq(pnp_dev, 0);
1083 if (request_irq(dev->irq, ene_isr,
1084 IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
1085 dev->irq = -1;
1086 goto error;
1087 }
1088
1089 error = rc_register_device(rdev); 1089 error = rc_register_device(rdev);
1090 if (error < 0) 1090 if (error < 0)
1091 goto error; 1091 goto error;
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index 392d4be91f8f..4a3a238bcfbc 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -197,7 +197,7 @@ static int fintek_hw_detect(struct fintek_dev *fintek)
197 /* 197 /*
198 * Newer reviews of this chipset uses port 8 instead of 5 198 * Newer reviews of this chipset uses port 8 instead of 5
199 */ 199 */
200 if ((chip != 0x0408) || (chip != 0x0804)) 200 if ((chip != 0x0408) && (chip != 0x0804))
201 fintek->logical_dev_cir = LOGICAL_DEV_CIR_REV2; 201 fintek->logical_dev_cir = LOGICAL_DEV_CIR_REV2;
202 else 202 else
203 fintek->logical_dev_cir = LOGICAL_DEV_CIR_REV1; 203 fintek->logical_dev_cir = LOGICAL_DEV_CIR_REV1;
@@ -514,16 +514,6 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
514 514
515 spin_lock_init(&fintek->fintek_lock); 515 spin_lock_init(&fintek->fintek_lock);
516 516
517 ret = -EBUSY;
518 /* now claim resources */
519 if (!request_region(fintek->cir_addr,
520 fintek->cir_port_len, FINTEK_DRIVER_NAME))
521 goto failure;
522
523 if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED,
524 FINTEK_DRIVER_NAME, (void *)fintek))
525 goto failure;
526
527 pnp_set_drvdata(pdev, fintek); 517 pnp_set_drvdata(pdev, fintek);
528 fintek->pdev = pdev; 518 fintek->pdev = pdev;
529 519
@@ -558,6 +548,16 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
558 /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */ 548 /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
559 rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD); 549 rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
560 550
551 ret = -EBUSY;
552 /* now claim resources */
553 if (!request_region(fintek->cir_addr,
554 fintek->cir_port_len, FINTEK_DRIVER_NAME))
555 goto failure;
556
557 if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED,
558 FINTEK_DRIVER_NAME, (void *)fintek))
559 goto failure;
560
561 ret = rc_register_device(rdev); 561 ret = rc_register_device(rdev);
562 if (ret) 562 if (ret)
563 goto failure; 563 goto failure;
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 682009d76cdf..0e49c99abf68 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1515,16 +1515,6 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
1515 /* initialize raw event */ 1515 /* initialize raw event */
1516 init_ir_raw_event(&itdev->rawir); 1516 init_ir_raw_event(&itdev->rawir);
1517 1517
1518 ret = -EBUSY;
1519 /* now claim resources */
1520 if (!request_region(itdev->cir_addr,
1521 dev_desc->io_region_size, ITE_DRIVER_NAME))
1522 goto failure;
1523
1524 if (request_irq(itdev->cir_irq, ite_cir_isr, IRQF_SHARED,
1525 ITE_DRIVER_NAME, (void *)itdev))
1526 goto failure;
1527
1528 /* set driver data into the pnp device */ 1518 /* set driver data into the pnp device */
1529 pnp_set_drvdata(pdev, itdev); 1519 pnp_set_drvdata(pdev, itdev);
1530 itdev->pdev = pdev; 1520 itdev->pdev = pdev;
@@ -1600,6 +1590,16 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
1600 rdev->driver_name = ITE_DRIVER_NAME; 1590 rdev->driver_name = ITE_DRIVER_NAME;
1601 rdev->map_name = RC_MAP_RC6_MCE; 1591 rdev->map_name = RC_MAP_RC6_MCE;
1602 1592
1593 ret = -EBUSY;
1594 /* now claim resources */
1595 if (!request_region(itdev->cir_addr,
1596 dev_desc->io_region_size, ITE_DRIVER_NAME))
1597 goto failure;
1598
1599 if (request_irq(itdev->cir_irq, ite_cir_isr, IRQF_SHARED,
1600 ITE_DRIVER_NAME, (void *)itdev))
1601 goto failure;
1602
1603 ret = rc_register_device(rdev); 1603 ret = rc_register_device(rdev);
1604 if (ret) 1604 if (ret)
1605 goto failure; 1605 goto failure;
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 144f3f55d765..8b2c071ac0ab 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -1021,24 +1021,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
1021 spin_lock_init(&nvt->nvt_lock); 1021 spin_lock_init(&nvt->nvt_lock);
1022 spin_lock_init(&nvt->tx.lock); 1022 spin_lock_init(&nvt->tx.lock);
1023 1023
1024 ret = -EBUSY;
1025 /* now claim resources */
1026 if (!request_region(nvt->cir_addr,
1027 CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
1028 goto failure;
1029
1030 if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
1031 NVT_DRIVER_NAME, (void *)nvt))
1032 goto failure;
1033
1034 if (!request_region(nvt->cir_wake_addr,
1035 CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
1036 goto failure;
1037
1038 if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
1039 NVT_DRIVER_NAME, (void *)nvt))
1040 goto failure;
1041
1042 pnp_set_drvdata(pdev, nvt); 1024 pnp_set_drvdata(pdev, nvt);
1043 nvt->pdev = pdev; 1025 nvt->pdev = pdev;
1044 1026
@@ -1085,6 +1067,24 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
1085 rdev->tx_resolution = XYZ; 1067 rdev->tx_resolution = XYZ;
1086#endif 1068#endif
1087 1069
1070 ret = -EBUSY;
1071 /* now claim resources */
1072 if (!request_region(nvt->cir_addr,
1073 CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
1074 goto failure;
1075
1076 if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
1077 NVT_DRIVER_NAME, (void *)nvt))
1078 goto failure;
1079
1080 if (!request_region(nvt->cir_wake_addr,
1081 CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
1082 goto failure;
1083
1084 if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
1085 NVT_DRIVER_NAME, (void *)nvt))
1086 goto failure;
1087
1088 ret = rc_register_device(rdev); 1088 ret = rc_register_device(rdev);
1089 if (ret) 1089 if (ret)
1090 goto failure; 1090 goto failure;
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index af526586fa26..342c2c8c1ddf 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -991,39 +991,10 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
991 "(w: 0x%lX, e: 0x%lX, s: 0x%lX, i: %u)\n", 991 "(w: 0x%lX, e: 0x%lX, s: 0x%lX, i: %u)\n",
992 data->wbase, data->ebase, data->sbase, data->irq); 992 data->wbase, data->ebase, data->sbase, data->irq);
993 993
994 if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
995 dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
996 data->wbase, data->wbase + WAKEUP_IOMEM_LEN - 1);
997 err = -EBUSY;
998 goto exit_free_data;
999 }
1000
1001 if (!request_region(data->ebase, EHFUNC_IOMEM_LEN, DRVNAME)) {
1002 dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
1003 data->ebase, data->ebase + EHFUNC_IOMEM_LEN - 1);
1004 err = -EBUSY;
1005 goto exit_release_wbase;
1006 }
1007
1008 if (!request_region(data->sbase, SP_IOMEM_LEN, DRVNAME)) {
1009 dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
1010 data->sbase, data->sbase + SP_IOMEM_LEN - 1);
1011 err = -EBUSY;
1012 goto exit_release_ebase;
1013 }
1014
1015 err = request_irq(data->irq, wbcir_irq_handler,
1016 IRQF_DISABLED, DRVNAME, device);
1017 if (err) {
1018 dev_err(dev, "Failed to claim IRQ %u\n", data->irq);
1019 err = -EBUSY;
1020 goto exit_release_sbase;
1021 }
1022
1023 led_trigger_register_simple("cir-tx", &data->txtrigger); 994 led_trigger_register_simple("cir-tx", &data->txtrigger);
1024 if (!data->txtrigger) { 995 if (!data->txtrigger) {
1025 err = -ENOMEM; 996 err = -ENOMEM;
1026 goto exit_free_irq; 997 goto exit_free_data;
1027 } 998 }
1028 999
1029 led_trigger_register_simple("cir-rx", &data->rxtrigger); 1000 led_trigger_register_simple("cir-rx", &data->rxtrigger);
@@ -1062,9 +1033,38 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
1062 data->dev->priv = data; 1033 data->dev->priv = data;
1063 data->dev->dev.parent = &device->dev; 1034 data->dev->dev.parent = &device->dev;
1064 1035
1036 if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
1037 dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
1038 data->wbase, data->wbase + WAKEUP_IOMEM_LEN - 1);
1039 err = -EBUSY;
1040 goto exit_free_rc;
1041 }
1042
1043 if (!request_region(data->ebase, EHFUNC_IOMEM_LEN, DRVNAME)) {
1044 dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
1045 data->ebase, data->ebase + EHFUNC_IOMEM_LEN - 1);
1046 err = -EBUSY;
1047 goto exit_release_wbase;
1048 }
1049
1050 if (!request_region(data->sbase, SP_IOMEM_LEN, DRVNAME)) {
1051 dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
1052 data->sbase, data->sbase + SP_IOMEM_LEN - 1);
1053 err = -EBUSY;
1054 goto exit_release_ebase;
1055 }
1056
1057 err = request_irq(data->irq, wbcir_irq_handler,
1058 IRQF_DISABLED, DRVNAME, device);
1059 if (err) {
1060 dev_err(dev, "Failed to claim IRQ %u\n", data->irq);
1061 err = -EBUSY;
1062 goto exit_release_sbase;
1063 }
1064
1065 err = rc_register_device(data->dev); 1065 err = rc_register_device(data->dev);
1066 if (err) 1066 if (err)
1067 goto exit_free_rc; 1067 goto exit_free_irq;
1068 1068
1069 device_init_wakeup(&device->dev, 1); 1069 device_init_wakeup(&device->dev, 1);
1070 1070
@@ -1072,14 +1072,6 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
1072 1072
1073 return 0; 1073 return 0;
1074 1074
1075exit_free_rc:
1076 rc_free_device(data->dev);
1077exit_unregister_led:
1078 led_classdev_unregister(&data->led);
1079exit_unregister_rxtrigger:
1080 led_trigger_unregister_simple(data->rxtrigger);
1081exit_unregister_txtrigger:
1082 led_trigger_unregister_simple(data->txtrigger);
1083exit_free_irq: 1075exit_free_irq:
1084 free_irq(data->irq, device); 1076 free_irq(data->irq, device);
1085exit_release_sbase: 1077exit_release_sbase:
@@ -1088,6 +1080,14 @@ exit_release_ebase:
1088 release_region(data->ebase, EHFUNC_IOMEM_LEN); 1080 release_region(data->ebase, EHFUNC_IOMEM_LEN);
1089exit_release_wbase: 1081exit_release_wbase:
1090 release_region(data->wbase, WAKEUP_IOMEM_LEN); 1082 release_region(data->wbase, WAKEUP_IOMEM_LEN);
1083exit_free_rc:
1084 rc_free_device(data->dev);
1085exit_unregister_led:
1086 led_classdev_unregister(&data->led);
1087exit_unregister_rxtrigger:
1088 led_trigger_unregister_simple(data->rxtrigger);
1089exit_unregister_txtrigger:
1090 led_trigger_unregister_simple(data->txtrigger);
1091exit_free_data: 1091exit_free_data:
1092 kfree(data); 1092 kfree(data);
1093 pnp_set_drvdata(device, NULL); 1093 pnp_set_drvdata(device, NULL);
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index db8e5084df06..863c755dd2b7 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -2923,6 +2923,10 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
2923 * not the JPEG end of frame ('ff d9'). 2923 * not the JPEG end of frame ('ff d9').
2924 */ 2924 */
2925 2925
2926 /* count the packets and their size */
2927 sd->npkt++;
2928 sd->pktsz += len;
2929
2926/*fixme: assumption about the following code: 2930/*fixme: assumption about the following code:
2927 * - there can be only one marker in a packet 2931 * - there can be only one marker in a packet
2928 */ 2932 */
@@ -2945,10 +2949,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
2945 data += i; 2949 data += i;
2946 } 2950 }
2947 2951
2948 /* count the packets and their size */
2949 sd->npkt++;
2950 sd->pktsz += len;
2951
2952 /* search backwards if there is a marker in the packet */ 2952 /* search backwards if there is a marker in the packet */
2953 for (i = len - 1; --i >= 0; ) { 2953 for (i = len - 1; --i >= 0; ) {
2954 if (data[i] != 0xff) { 2954 if (data[i] != 0xff) {
diff --git a/drivers/media/video/marvell-ccic/mmp-driver.c b/drivers/media/video/marvell-ccic/mmp-driver.c
index d23552323f45..c4c17fe76c0d 100644
--- a/drivers/media/video/marvell-ccic/mmp-driver.c
+++ b/drivers/media/video/marvell-ccic/mmp-driver.c
@@ -181,7 +181,6 @@ static int mmpcam_probe(struct platform_device *pdev)
181 INIT_LIST_HEAD(&cam->devlist); 181 INIT_LIST_HEAD(&cam->devlist);
182 182
183 mcam = &cam->mcam; 183 mcam = &cam->mcam;
184 mcam->platform = MHP_Armada610;
185 mcam->plat_power_up = mmpcam_power_up; 184 mcam->plat_power_up = mmpcam_power_up;
186 mcam->plat_power_down = mmpcam_power_down; 185 mcam->plat_power_down = mmpcam_power_down;
187 mcam->dev = &pdev->dev; 186 mcam->dev = &pdev->dev;
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index b06efd208328..7e9b2c612b03 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -246,28 +246,37 @@ int fimc_capture_resume(struct fimc_dev *fimc)
246 246
247} 247}
248 248
249static unsigned int get_plane_size(struct fimc_frame *fr, unsigned int plane) 249static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
250{
251 if (!fr || plane >= fr->fmt->memplanes)
252 return 0;
253 return fr->f_width * fr->f_height * fr->fmt->depth[plane] / 8;
254}
255
256static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
257 unsigned int *num_buffers, unsigned int *num_planes, 250 unsigned int *num_buffers, unsigned int *num_planes,
258 unsigned int sizes[], void *allocators[]) 251 unsigned int sizes[], void *allocators[])
259{ 252{
253 const struct v4l2_pix_format_mplane *pixm = NULL;
260 struct fimc_ctx *ctx = vq->drv_priv; 254 struct fimc_ctx *ctx = vq->drv_priv;
261 struct fimc_fmt *fmt = ctx->d_frame.fmt; 255 struct fimc_frame *frame = &ctx->d_frame;
256 struct fimc_fmt *fmt = frame->fmt;
257 unsigned long wh;
262 int i; 258 int i;
263 259
264 if (!fmt) 260 if (pfmt) {
261 pixm = &pfmt->fmt.pix_mp;
262 fmt = fimc_find_format(&pixm->pixelformat, NULL,
263 FMT_FLAGS_CAM | FMT_FLAGS_M2M, -1);
264 wh = pixm->width * pixm->height;
265 } else {
266 wh = frame->f_width * frame->f_height;
267 }
268
269 if (fmt == NULL)
265 return -EINVAL; 270 return -EINVAL;
266 271
267 *num_planes = fmt->memplanes; 272 *num_planes = fmt->memplanes;
268 273
269 for (i = 0; i < fmt->memplanes; i++) { 274 for (i = 0; i < fmt->memplanes; i++) {
270 sizes[i] = get_plane_size(&ctx->d_frame, i); 275 unsigned int size = (wh * fmt->depth[i]) / 8;
276 if (pixm)
277 sizes[i] = max(size, pixm->plane_fmt[i].sizeimage);
278 else
279 sizes[i] = size;
271 allocators[i] = ctx->fimc_dev->alloc_ctx; 280 allocators[i] = ctx->fimc_dev->alloc_ctx;
272 } 281 }
273 282
@@ -1383,7 +1392,7 @@ static int fimc_subdev_set_crop(struct v4l2_subdev *sd,
1383 fimc_capture_try_crop(ctx, r, crop->pad); 1392 fimc_capture_try_crop(ctx, r, crop->pad);
1384 1393
1385 if (crop->which == V4L2_SUBDEV_FORMAT_TRY) { 1394 if (crop->which == V4L2_SUBDEV_FORMAT_TRY) {
1386 mutex_lock(&fimc->lock); 1395 mutex_unlock(&fimc->lock);
1387 *v4l2_subdev_get_try_crop(fh, crop->pad) = *r; 1396 *v4l2_subdev_get_try_crop(fh, crop->pad) = *r;
1388 return 0; 1397 return 0;
1389 } 1398 }
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index e184e650022a..e09ba7b0076e 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -1048,14 +1048,14 @@ static int fimc_m2m_g_fmt_mplane(struct file *file, void *fh,
1048 * @mask: the color flags to match 1048 * @mask: the color flags to match
1049 * @index: offset in the fimc_formats array, ignored if negative 1049 * @index: offset in the fimc_formats array, ignored if negative
1050 */ 1050 */
1051struct fimc_fmt *fimc_find_format(u32 *pixelformat, u32 *mbus_code, 1051struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code,
1052 unsigned int mask, int index) 1052 unsigned int mask, int index)
1053{ 1053{
1054 struct fimc_fmt *fmt, *def_fmt = NULL; 1054 struct fimc_fmt *fmt, *def_fmt = NULL;
1055 unsigned int i; 1055 unsigned int i;
1056 int id = 0; 1056 int id = 0;
1057 1057
1058 if (index >= ARRAY_SIZE(fimc_formats)) 1058 if (index >= (int)ARRAY_SIZE(fimc_formats))
1059 return NULL; 1059 return NULL;
1060 1060
1061 for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) { 1061 for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) {
diff --git a/drivers/media/video/s5p-fimc/fimc-core.h b/drivers/media/video/s5p-fimc/fimc-core.h
index a18291e648e2..84fd83550bd7 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.h
+++ b/drivers/media/video/s5p-fimc/fimc-core.h
@@ -718,7 +718,7 @@ void fimc_alpha_ctrl_update(struct fimc_ctx *ctx);
718int fimc_fill_format(struct fimc_frame *frame, struct v4l2_format *f); 718int fimc_fill_format(struct fimc_frame *frame, struct v4l2_format *f);
719void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height, 719void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
720 struct v4l2_pix_format_mplane *pix); 720 struct v4l2_pix_format_mplane *pix);
721struct fimc_fmt *fimc_find_format(u32 *pixelformat, u32 *mbus_code, 721struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code,
722 unsigned int mask, int index); 722 unsigned int mask, int index);
723 723
724int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh, 724int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh,
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index eb25756a07af..aedb970d13f6 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -530,7 +530,10 @@ static int soc_camera_open(struct file *file)
530 if (icl->reset) 530 if (icl->reset)
531 icl->reset(icd->pdev); 531 icl->reset(icd->pdev);
532 532
533 /* Don't mess with the host during probe */
534 mutex_lock(&ici->host_lock);
533 ret = ici->ops->add(icd); 535 ret = ici->ops->add(icd);
536 mutex_unlock(&ici->host_lock);
534 if (ret < 0) { 537 if (ret < 0) {
535 dev_err(icd->pdev, "Couldn't activate the camera: %d\n", ret); 538 dev_err(icd->pdev, "Couldn't activate the camera: %d\n", ret);
536 goto eiciadd; 539 goto eiciadd;
@@ -956,7 +959,7 @@ static void scan_add_host(struct soc_camera_host *ici)
956{ 959{
957 struct soc_camera_device *icd; 960 struct soc_camera_device *icd;
958 961
959 mutex_lock(&list_lock); 962 mutex_lock(&ici->host_lock);
960 963
961 list_for_each_entry(icd, &devices, list) { 964 list_for_each_entry(icd, &devices, list) {
962 if (icd->iface == ici->nr) { 965 if (icd->iface == ici->nr) {
@@ -967,7 +970,7 @@ static void scan_add_host(struct soc_camera_host *ici)
967 } 970 }
968 } 971 }
969 972
970 mutex_unlock(&list_lock); 973 mutex_unlock(&ici->host_lock);
971} 974}
972 975
973#ifdef CONFIG_I2C_BOARDINFO 976#ifdef CONFIG_I2C_BOARDINFO
@@ -1313,6 +1316,7 @@ int soc_camera_host_register(struct soc_camera_host *ici)
1313 list_add_tail(&ici->list, &hosts); 1316 list_add_tail(&ici->list, &hosts);
1314 mutex_unlock(&list_lock); 1317 mutex_unlock(&list_lock);
1315 1318
1319 mutex_init(&ici->host_lock);
1316 scan_add_host(ici); 1320 scan_add_host(ici);
1317 1321
1318 return 0; 1322 return 0;
diff --git a/drivers/media/video/videobuf2-dma-contig.c b/drivers/media/video/videobuf2-dma-contig.c
index f17ad98fcc5f..4b7132660a93 100644
--- a/drivers/media/video/videobuf2-dma-contig.c
+++ b/drivers/media/video/videobuf2-dma-contig.c
@@ -15,6 +15,7 @@
15#include <linux/dma-mapping.h> 15#include <linux/dma-mapping.h>
16 16
17#include <media/videobuf2-core.h> 17#include <media/videobuf2-core.h>
18#include <media/videobuf2-dma-contig.h>
18#include <media/videobuf2-memops.h> 19#include <media/videobuf2-memops.h>
19 20
20struct vb2_dc_conf { 21struct vb2_dc_conf {
@@ -85,7 +86,7 @@ static void *vb2_dma_contig_vaddr(void *buf_priv)
85{ 86{
86 struct vb2_dc_buf *buf = buf_priv; 87 struct vb2_dc_buf *buf = buf_priv;
87 if (!buf) 88 if (!buf)
88 return 0; 89 return NULL;
89 90
90 return buf->vaddr; 91 return buf->vaddr;
91} 92}
diff --git a/drivers/media/video/videobuf2-memops.c b/drivers/media/video/videobuf2-memops.c
index c41cb60245d6..504cd4cbe29e 100644
--- a/drivers/media/video/videobuf2-memops.c
+++ b/drivers/media/video/videobuf2-memops.c
@@ -55,6 +55,7 @@ struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
55 55
56 return vma_copy; 56 return vma_copy;
57} 57}
58EXPORT_SYMBOL_GPL(vb2_get_vma);
58 59
59/** 60/**
60 * vb2_put_userptr() - release a userspace virtual memory area 61 * vb2_put_userptr() - release a userspace virtual memory area
diff --git a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h
index c171afa93239..69e9d5463564 100644
--- a/drivers/message/fusion/mptlan.h
+++ b/drivers/message/fusion/mptlan.h
@@ -69,7 +69,6 @@
69#include <linux/spinlock.h> 69#include <linux/spinlock.h>
70#include <linux/workqueue.h> 70#include <linux/workqueue.h>
71#include <linux/delay.h> 71#include <linux/delay.h>
72// #include <linux/trdevice.h>
73 72
74#include <asm/uaccess.h> 73#include <asm/uaccess.h>
75#include <asm/io.h> 74#include <asm/io.h>
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index c8aae6640e64..7e96bb229724 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -25,6 +25,7 @@
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <plat/cpu.h>
28#include <plat/usb.h> 29#include <plat/usb.h>
29#include <linux/pm_runtime.h> 30#include <linux/pm_runtime.h>
30 31
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 032b84791a16..b6f38421d541 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -94,6 +94,17 @@ static struct variant_data variant_u300 = {
94 .signal_direction = true, 94 .signal_direction = true,
95}; 95};
96 96
97static struct variant_data variant_nomadik = {
98 .fifosize = 16 * 4,
99 .fifohalfsize = 8 * 4,
100 .clkreg = MCI_CLK_ENABLE,
101 .datalength_bits = 24,
102 .sdio = true,
103 .st_clkdiv = true,
104 .pwrreg_powerup = MCI_PWR_ON,
105 .signal_direction = true,
106};
107
97static struct variant_data variant_ux500 = { 108static struct variant_data variant_ux500 = {
98 .fifosize = 30 * 4, 109 .fifosize = 30 * 4,
99 .fifohalfsize = 8 * 4, 110 .fifohalfsize = 8 * 4,
@@ -1397,7 +1408,7 @@ static int __devinit mmci_probe(struct amba_device *dev,
1397 if (ret) 1408 if (ret)
1398 goto unmap; 1409 goto unmap;
1399 1410
1400 if (dev->irq[1] == NO_IRQ || !dev->irq[1]) 1411 if (!dev->irq[1])
1401 host->singleirq = true; 1412 host->singleirq = true;
1402 else { 1413 else {
1403 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, 1414 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
@@ -1569,6 +1580,11 @@ static struct amba_id mmci_ids[] = {
1569 .data = &variant_u300, 1580 .data = &variant_u300,
1570 }, 1581 },
1571 { 1582 {
1583 .id = 0x10180180,
1584 .mask = 0xf0ffffff,
1585 .data = &variant_nomadik,
1586 },
1587 {
1572 .id = 0x00280180, 1588 .id = 0x00280180,
1573 .mask = 0x00ffffff, 1589 .mask = 0x00ffffff,
1574 .data = &variant_u300, 1590 .data = &variant_u300,
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index b0f2ef988188..e3f5af96ab87 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -363,6 +363,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host)
363 goto out; 363 goto out;
364 364
365 dmaengine_submit(desc); 365 dmaengine_submit(desc);
366 dma_async_issue_pending(host->dmach);
366 return; 367 return;
367 368
368out: 369out:
@@ -403,6 +404,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
403 goto out; 404 goto out;
404 405
405 dmaengine_submit(desc); 406 dmaengine_submit(desc);
407 dma_async_issue_pending(host->dmach);
406 return; 408 return;
407 409
408out: 410out:
@@ -531,6 +533,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
531 goto out; 533 goto out;
532 534
533 dmaengine_submit(desc); 535 dmaengine_submit(desc);
536 dma_async_issue_pending(host->dmach);
534 return; 537 return;
535out: 538out:
536 dev_warn(mmc_dev(host->mmc), 539 dev_warn(mmc_dev(host->mmc),
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 58fc65f5c817..f2f482bec573 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -376,7 +376,7 @@ static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
376 * Make a fake call to mtd_read_fact_prot_reg() to check if OTP 376 * Make a fake call to mtd_read_fact_prot_reg() to check if OTP
377 * operations are supported. 377 * operations are supported.
378 */ 378 */
379 if (mtd_read_fact_prot_reg(mtd, -1, -1, &retlen, NULL) == -EOPNOTSUPP) 379 if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) == -EOPNOTSUPP)
380 return -EOPNOTSUPP; 380 return -EOPNOTSUPP;
381 381
382 switch (mode) { 382 switch (mode) {
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 73416951f4c1..861ca8f7e47d 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -212,18 +212,17 @@ static int __devinit ams_delta_init(struct platform_device *pdev)
212 /* Link the private data with the MTD structure */ 212 /* Link the private data with the MTD structure */
213 ams_delta_mtd->priv = this; 213 ams_delta_mtd->priv = this;
214 214
215 if (!request_mem_region(res->start, resource_size(res), 215 /*
216 dev_name(&pdev->dev))) { 216 * Don't try to request the memory region from here,
217 dev_err(&pdev->dev, "request_mem_region failed\n"); 217 * it should have been already requested from the
218 err = -EBUSY; 218 * gpio-omap driver and requesting it again would fail.
219 goto out_free; 219 */
220 }
221 220
222 io_base = ioremap(res->start, resource_size(res)); 221 io_base = ioremap(res->start, resource_size(res));
223 if (io_base == NULL) { 222 if (io_base == NULL) {
224 dev_err(&pdev->dev, "ioremap failed\n"); 223 dev_err(&pdev->dev, "ioremap failed\n");
225 err = -EIO; 224 err = -EIO;
226 goto out_release_io; 225 goto out_free;
227 } 226 }
228 227
229 this->priv = io_base; 228 this->priv = io_base;
@@ -271,8 +270,6 @@ out_gpio:
271 platform_set_drvdata(pdev, NULL); 270 platform_set_drvdata(pdev, NULL);
272 gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB); 271 gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
273 iounmap(io_base); 272 iounmap(io_base);
274out_release_io:
275 release_mem_region(res->start, resource_size(res));
276out_free: 273out_free:
277 kfree(ams_delta_mtd); 274 kfree(ams_delta_mtd);
278 out: 275 out:
@@ -285,7 +282,6 @@ out_free:
285static int __devexit ams_delta_cleanup(struct platform_device *pdev) 282static int __devexit ams_delta_cleanup(struct platform_device *pdev)
286{ 283{
287 void __iomem *io_base = platform_get_drvdata(pdev); 284 void __iomem *io_base = platform_get_drvdata(pdev);
288 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
289 285
290 /* Release resources, unregister device */ 286 /* Release resources, unregister device */
291 nand_release(ams_delta_mtd); 287 nand_release(ams_delta_mtd);
@@ -293,7 +289,6 @@ static int __devexit ams_delta_cleanup(struct platform_device *pdev)
293 gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio)); 289 gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
294 gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB); 290 gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
295 iounmap(io_base); 291 iounmap(io_base);
296 release_mem_region(res->start, resource_size(res));
297 292
298 /* Free the MTD device structure */ 293 /* Free the MTD device structure */
299 kfree(ams_delta_mtd); 294 kfree(ams_delta_mtd);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 75b1dde16358..9ec51cec2e14 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -266,6 +266,7 @@ int start_dma_without_bch_irq(struct gpmi_nand_data *this,
266 desc->callback = dma_irq_callback; 266 desc->callback = dma_irq_callback;
267 desc->callback_param = this; 267 desc->callback_param = this;
268 dmaengine_submit(desc); 268 dmaengine_submit(desc);
269 dma_async_issue_pending(get_dma_chan(this));
269 270
270 /* Wait for the interrupt from the DMA block. */ 271 /* Wait for the interrupt from the DMA block. */
271 err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000)); 272 err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b98285446a5a..0c2bd806950e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -66,10 +66,7 @@ config DUMMY
66 <http://www.tldp.org/docs.html#guide>. 66 <http://www.tldp.org/docs.html#guide>.
67 67
68 To compile this driver as a module, choose M here: the module 68 To compile this driver as a module, choose M here: the module
69 will be called dummy. If you want to use more than one dummy 69 will be called dummy.
70 device at a time, you need to compile this driver as a module.
71 Instead of 'dummy', the devices will then be called 'dummy0',
72 'dummy1' etc.
73 70
74config EQUALIZER 71config EQUALIZER
75 tristate "EQL (serial line load balancing) support" 72 tristate "EQL (serial line load balancing) support"
@@ -285,8 +282,6 @@ source "drivers/net/slip/Kconfig"
285 282
286source "drivers/s390/net/Kconfig" 283source "drivers/s390/net/Kconfig"
287 284
288source "drivers/net/tokenring/Kconfig"
289
290source "drivers/net/usb/Kconfig" 285source "drivers/net/usb/Kconfig"
291 286
292source "drivers/net/wireless/Kconfig" 287source "drivers/net/wireless/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index a6b8ce11a22f..3d375ca128a6 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -50,7 +50,6 @@ obj-$(CONFIG_SLIP) += slip/
50obj-$(CONFIG_SLHC) += slip/ 50obj-$(CONFIG_SLHC) += slip/
51obj-$(CONFIG_NET_SB1000) += sb1000.o 51obj-$(CONFIG_NET_SB1000) += sb1000.o
52obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o 52obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
53obj-$(CONFIG_TR) += tokenring/
54obj-$(CONFIG_WAN) += wan/ 53obj-$(CONFIG_WAN) += wan/
55obj-$(CONFIG_WLAN) += wireless/ 54obj-$(CONFIG_WLAN) += wireless/
56obj-$(CONFIG_WIMAX) += wimax/ 55obj-$(CONFIG_WIMAX) += wimax/
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 88bbd8ffa7fe..e3f0faca98d0 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -29,7 +29,6 @@
29 */ 29 */
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/trdevice.h>
33#include <linux/errno.h> 32#include <linux/errno.h>
34#include <linux/init.h> 33#include <linux/init.h>
35#include <linux/netlink.h> 34#include <linux/netlink.h>
@@ -134,22 +133,9 @@ static struct devprobe2 eisa_probes[] __initdata = {
134 {NULL, 0}, 133 {NULL, 0},
135}; 134};
136 135
137static struct devprobe2 mca_probes[] __initdata = {
138#ifdef CONFIG_NE2_MCA
139 {ne2_probe, 0},
140#endif
141#ifdef CONFIG_ELMC /* 3c523 */
142 {elmc_probe, 0},
143#endif
144#ifdef CONFIG_ELMC_II /* 3c527 */
145 {mc32_probe, 0},
146#endif
147 {NULL, 0},
148};
149
150/* 136/*
151 * ISA probes that touch addresses < 0x400 (including those that also 137 * ISA probes that touch addresses < 0x400 (including those that also
152 * look for EISA/PCI/MCA cards in addition to ISA cards). 138 * look for EISA/PCI cards in addition to ISA cards).
153 */ 139 */
154static struct devprobe2 isa_probes[] __initdata = { 140static struct devprobe2 isa_probes[] __initdata = {
155#if defined(CONFIG_HP100) && defined(CONFIG_ISA) /* ISA, EISA */ 141#if defined(CONFIG_HP100) && defined(CONFIG_ISA) /* ISA, EISA */
@@ -279,51 +265,10 @@ static void __init ethif_probe2(int unit)
279 265
280 (void)( probe_list2(unit, m68k_probes, base_addr == 0) && 266 (void)( probe_list2(unit, m68k_probes, base_addr == 0) &&
281 probe_list2(unit, eisa_probes, base_addr == 0) && 267 probe_list2(unit, eisa_probes, base_addr == 0) &&
282 probe_list2(unit, mca_probes, base_addr == 0) &&
283 probe_list2(unit, isa_probes, base_addr == 0) && 268 probe_list2(unit, isa_probes, base_addr == 0) &&
284 probe_list2(unit, parport_probes, base_addr == 0)); 269 probe_list2(unit, parport_probes, base_addr == 0));
285} 270}
286 271
287#ifdef CONFIG_TR
288/* Token-ring device probe */
289extern int ibmtr_probe_card(struct net_device *);
290extern struct net_device *smctr_probe(int unit);
291
292static struct devprobe2 tr_probes2[] __initdata = {
293#ifdef CONFIG_SMCTR
294 {smctr_probe, 0},
295#endif
296 {NULL, 0},
297};
298
299static __init int trif_probe(int unit)
300{
301 int err = -ENODEV;
302#ifdef CONFIG_IBMTR
303 struct net_device *dev = alloc_trdev(0);
304 if (!dev)
305 return -ENOMEM;
306
307 sprintf(dev->name, "tr%d", unit);
308 netdev_boot_setup_check(dev);
309 err = ibmtr_probe_card(dev);
310 if (err)
311 free_netdev(dev);
312#endif
313 return err;
314}
315
316static void __init trif_probe2(int unit)
317{
318 unsigned long base_addr = netdev_boot_base("tr", unit);
319
320 if (base_addr == 1)
321 return;
322 probe_list2(unit, tr_probes2, base_addr == 0);
323}
324#endif
325
326
327/* Statically configured drivers -- order matters here. */ 272/* Statically configured drivers -- order matters here. */
328static int __init net_olddevs_init(void) 273static int __init net_olddevs_init(void)
329{ 274{
@@ -333,11 +278,6 @@ static int __init net_olddevs_init(void)
333 for (num = 0; num < 8; ++num) 278 for (num = 0; num < 8; ++num)
334 sbni_probe(num); 279 sbni_probe(num);
335#endif 280#endif
336#ifdef CONFIG_TR
337 for (num = 0; num < 8; ++num)
338 if (!trif_probe(num))
339 trif_probe2(num);
340#endif
341 for (num = 0; num < 8; ++num) 281 for (num = 0; num < 8; ++num)
342 ethif_probe2(num); 282 ethif_probe2(num);
343 283
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 25197b698dd6..b8b4c7ba884f 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -89,16 +89,16 @@ static int __init arcrimi_probe(struct net_device *dev)
89 BUGLVL(D_NORMAL) printk(VERSION); 89 BUGLVL(D_NORMAL) printk(VERSION);
90 BUGLVL(D_NORMAL) printk("E-mail me if you actually test the RIM I driver, please!\n"); 90 BUGLVL(D_NORMAL) printk("E-mail me if you actually test the RIM I driver, please!\n");
91 91
92 BUGMSG(D_NORMAL, "Given: node %02Xh, shmem %lXh, irq %d\n", 92 BUGLVL(D_NORMAL) printk("Given: node %02Xh, shmem %lXh, irq %d\n",
93 dev->dev_addr[0], dev->mem_start, dev->irq); 93 dev->dev_addr[0], dev->mem_start, dev->irq);
94 94
95 if (dev->mem_start <= 0 || dev->irq <= 0) { 95 if (dev->mem_start <= 0 || dev->irq <= 0) {
96 BUGMSG(D_NORMAL, "No autoprobe for RIM I; you " 96 BUGLVL(D_NORMAL) printk("No autoprobe for RIM I; you "
97 "must specify the shmem and irq!\n"); 97 "must specify the shmem and irq!\n");
98 return -ENODEV; 98 return -ENODEV;
99 } 99 }
100 if (dev->dev_addr[0] == 0) { 100 if (dev->dev_addr[0] == 0) {
101 BUGMSG(D_NORMAL, "You need to specify your card's station " 101 BUGLVL(D_NORMAL) printk("You need to specify your card's station "
102 "ID!\n"); 102 "ID!\n");
103 return -ENODEV; 103 return -ENODEV;
104 } 104 }
@@ -109,7 +109,7 @@ static int __init arcrimi_probe(struct net_device *dev)
109 * will be taken. 109 * will be taken.
110 */ 110 */
111 if (!request_mem_region(dev->mem_start, MIRROR_SIZE, "arcnet (90xx)")) { 111 if (!request_mem_region(dev->mem_start, MIRROR_SIZE, "arcnet (90xx)")) {
112 BUGMSG(D_NORMAL, "Card memory already allocated\n"); 112 BUGLVL(D_NORMAL) printk("Card memory already allocated\n");
113 return -ENODEV; 113 return -ENODEV;
114 } 114 }
115 return arcrimi_found(dev); 115 return arcrimi_found(dev);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 793b00138275..3463b469e657 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2173,9 +2173,10 @@ re_arm:
2173 * received frames (loopback). Since only the payload is given to this 2173 * received frames (loopback). Since only the payload is given to this
2174 * function, it check for loopback. 2174 * function, it check for loopback.
2175 */ 2175 */
2176static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length) 2176static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length)
2177{ 2177{
2178 struct port *port; 2178 struct port *port;
2179 int ret = RX_HANDLER_ANOTHER;
2179 2180
2180 if (length >= sizeof(struct lacpdu)) { 2181 if (length >= sizeof(struct lacpdu)) {
2181 2182
@@ -2184,11 +2185,12 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
2184 if (!port->slave) { 2185 if (!port->slave) {
2185 pr_warning("%s: Warning: port of slave %s is uninitialized\n", 2186 pr_warning("%s: Warning: port of slave %s is uninitialized\n",
2186 slave->dev->name, slave->dev->master->name); 2187 slave->dev->name, slave->dev->master->name);
2187 return; 2188 return ret;
2188 } 2189 }
2189 2190
2190 switch (lacpdu->subtype) { 2191 switch (lacpdu->subtype) {
2191 case AD_TYPE_LACPDU: 2192 case AD_TYPE_LACPDU:
2193 ret = RX_HANDLER_CONSUMED;
2192 pr_debug("Received LACPDU on port %d\n", 2194 pr_debug("Received LACPDU on port %d\n",
2193 port->actor_port_number); 2195 port->actor_port_number);
2194 /* Protect against concurrent state machines */ 2196 /* Protect against concurrent state machines */
@@ -2198,6 +2200,7 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
2198 break; 2200 break;
2199 2201
2200 case AD_TYPE_MARKER: 2202 case AD_TYPE_MARKER:
2203 ret = RX_HANDLER_CONSUMED;
2201 // No need to convert fields to Little Endian since we don't use the marker's fields. 2204 // No need to convert fields to Little Endian since we don't use the marker's fields.
2202 2205
2203 switch (((struct bond_marker *)lacpdu)->tlv_type) { 2206 switch (((struct bond_marker *)lacpdu)->tlv_type) {
@@ -2219,6 +2222,7 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
2219 } 2222 }
2220 } 2223 }
2221 } 2224 }
2225 return ret;
2222} 2226}
2223 2227
2224/** 2228/**
@@ -2456,18 +2460,20 @@ out:
2456 return NETDEV_TX_OK; 2460 return NETDEV_TX_OK;
2457} 2461}
2458 2462
2459void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, 2463int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
2460 struct slave *slave) 2464 struct slave *slave)
2461{ 2465{
2466 int ret = RX_HANDLER_ANOTHER;
2462 if (skb->protocol != PKT_TYPE_LACPDU) 2467 if (skb->protocol != PKT_TYPE_LACPDU)
2463 return; 2468 return ret;
2464 2469
2465 if (!pskb_may_pull(skb, sizeof(struct lacpdu))) 2470 if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
2466 return; 2471 return ret;
2467 2472
2468 read_lock(&bond->lock); 2473 read_lock(&bond->lock);
2469 bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len); 2474 ret = bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
2470 read_unlock(&bond->lock); 2475 read_unlock(&bond->lock);
2476 return ret;
2471} 2477}
2472 2478
2473/* 2479/*
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 235b2cc58b28..5ee7e3c45db7 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -274,7 +274,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave);
274void bond_3ad_handle_link_change(struct slave *slave, char link); 274void bond_3ad_handle_link_change(struct slave *slave, char link);
275int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); 275int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
276int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); 276int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
277void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, 277int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
278 struct slave *slave); 278 struct slave *slave);
279int bond_3ad_set_carrier(struct bonding *bond); 279int bond_3ad_set_carrier(struct bonding *bond);
280void bond_3ad_update_lacp_rate(struct bonding *bond); 280void bond_3ad_update_lacp_rate(struct bonding *bond);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9abfde479316..0f59c1564e53 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -332,7 +332,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
332 if ((client_info->assigned) && 332 if ((client_info->assigned) &&
333 (client_info->ip_src == arp->ip_dst) && 333 (client_info->ip_src == arp->ip_dst) &&
334 (client_info->ip_dst == arp->ip_src) && 334 (client_info->ip_dst == arp->ip_src) &&
335 (compare_ether_addr_64bits(client_info->mac_dst, arp->mac_src))) { 335 (!ether_addr_equal_64bits(client_info->mac_dst, arp->mac_src))) {
336 /* update the clients MAC address */ 336 /* update the clients MAC address */
337 memcpy(client_info->mac_dst, arp->mac_src, ETH_ALEN); 337 memcpy(client_info->mac_dst, arp->mac_src, ETH_ALEN);
338 client_info->ntt = 1; 338 client_info->ntt = 1;
@@ -342,26 +342,26 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
342 _unlock_rx_hashtbl_bh(bond); 342 _unlock_rx_hashtbl_bh(bond);
343} 343}
344 344
345static void rlb_arp_recv(struct sk_buff *skb, struct bonding *bond, 345static int rlb_arp_recv(struct sk_buff *skb, struct bonding *bond,
346 struct slave *slave) 346 struct slave *slave)
347{ 347{
348 struct arp_pkt *arp; 348 struct arp_pkt *arp;
349 349
350 if (skb->protocol != cpu_to_be16(ETH_P_ARP)) 350 if (skb->protocol != cpu_to_be16(ETH_P_ARP))
351 return; 351 goto out;
352 352
353 arp = (struct arp_pkt *) skb->data; 353 arp = (struct arp_pkt *) skb->data;
354 if (!arp) { 354 if (!arp) {
355 pr_debug("Packet has no ARP data\n"); 355 pr_debug("Packet has no ARP data\n");
356 return; 356 goto out;
357 } 357 }
358 358
359 if (!pskb_may_pull(skb, arp_hdr_len(bond->dev))) 359 if (!pskb_may_pull(skb, arp_hdr_len(bond->dev)))
360 return; 360 goto out;
361 361
362 if (skb->len < sizeof(struct arp_pkt)) { 362 if (skb->len < sizeof(struct arp_pkt)) {
363 pr_debug("Packet is too small to be an ARP\n"); 363 pr_debug("Packet is too small to be an ARP\n");
364 return; 364 goto out;
365 } 365 }
366 366
367 if (arp->op_code == htons(ARPOP_REPLY)) { 367 if (arp->op_code == htons(ARPOP_REPLY)) {
@@ -369,6 +369,8 @@ static void rlb_arp_recv(struct sk_buff *skb, struct bonding *bond,
369 rlb_update_entry_from_arp(bond, arp); 369 rlb_update_entry_from_arp(bond, arp);
370 pr_debug("Server received an ARP Reply from client\n"); 370 pr_debug("Server received an ARP Reply from client\n");
371 } 371 }
372out:
373 return RX_HANDLER_ANOTHER;
372} 374}
373 375
374/* Caller must hold bond lock for read */ 376/* Caller must hold bond lock for read */
@@ -448,8 +450,8 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
448 450
449 if (assigned_slave) { 451 if (assigned_slave) {
450 rx_hash_table[index].slave = assigned_slave; 452 rx_hash_table[index].slave = assigned_slave;
451 if (compare_ether_addr_64bits(rx_hash_table[index].mac_dst, 453 if (!ether_addr_equal_64bits(rx_hash_table[index].mac_dst,
452 mac_bcast)) { 454 mac_bcast)) {
453 bond_info->rx_hashtbl[index].ntt = 1; 455 bond_info->rx_hashtbl[index].ntt = 1;
454 bond_info->rx_ntt = 1; 456 bond_info->rx_ntt = 1;
455 /* A slave has been removed from the 457 /* A slave has been removed from the
@@ -561,7 +563,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
561 client_info = &(bond_info->rx_hashtbl[hash_index]); 563 client_info = &(bond_info->rx_hashtbl[hash_index]);
562 564
563 if ((client_info->slave == slave) && 565 if ((client_info->slave == slave) &&
564 compare_ether_addr_64bits(client_info->mac_dst, mac_bcast)) { 566 !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
565 client_info->ntt = 1; 567 client_info->ntt = 1;
566 ntt = 1; 568 ntt = 1;
567 } 569 }
@@ -600,9 +602,9 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
600 * unicast mac address. 602 * unicast mac address.
601 */ 603 */
602 if ((client_info->ip_src == src_ip) && 604 if ((client_info->ip_src == src_ip) &&
603 compare_ether_addr_64bits(client_info->slave->dev->dev_addr, 605 !ether_addr_equal_64bits(client_info->slave->dev->dev_addr,
604 bond->dev->dev_addr) && 606 bond->dev->dev_addr) &&
605 compare_ether_addr_64bits(client_info->mac_dst, mac_bcast)) { 607 !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
606 client_info->ntt = 1; 608 client_info->ntt = 1;
607 bond_info->rx_ntt = 1; 609 bond_info->rx_ntt = 1;
608 } 610 }
@@ -629,7 +631,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
629 if ((client_info->ip_src == arp->ip_src) && 631 if ((client_info->ip_src == arp->ip_src) &&
630 (client_info->ip_dst == arp->ip_dst)) { 632 (client_info->ip_dst == arp->ip_dst)) {
631 /* the entry is already assigned to this client */ 633 /* the entry is already assigned to this client */
632 if (compare_ether_addr_64bits(arp->mac_dst, mac_bcast)) { 634 if (!ether_addr_equal_64bits(arp->mac_dst, mac_bcast)) {
633 /* update mac address from arp */ 635 /* update mac address from arp */
634 memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN); 636 memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
635 } 637 }
@@ -664,7 +666,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
664 memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN); 666 memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
665 client_info->slave = assigned_slave; 667 client_info->slave = assigned_slave;
666 668
667 if (compare_ether_addr_64bits(client_info->mac_dst, mac_bcast)) { 669 if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
668 client_info->ntt = 1; 670 client_info->ntt = 1;
669 bond->alb_info.rx_ntt = 1; 671 bond->alb_info.rx_ntt = 1;
670 } else { 672 } else {
@@ -1009,18 +1011,18 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
1009 int perm_curr_diff; 1011 int perm_curr_diff;
1010 int perm_bond_diff; 1012 int perm_bond_diff;
1011 1013
1012 perm_curr_diff = compare_ether_addr_64bits(slave->perm_hwaddr, 1014 perm_curr_diff = !ether_addr_equal_64bits(slave->perm_hwaddr,
1013 slave->dev->dev_addr); 1015 slave->dev->dev_addr);
1014 perm_bond_diff = compare_ether_addr_64bits(slave->perm_hwaddr, 1016 perm_bond_diff = !ether_addr_equal_64bits(slave->perm_hwaddr,
1015 bond->dev->dev_addr); 1017 bond->dev->dev_addr);
1016 1018
1017 if (perm_curr_diff && perm_bond_diff) { 1019 if (perm_curr_diff && perm_bond_diff) {
1018 struct slave *tmp_slave; 1020 struct slave *tmp_slave;
1019 int i, found = 0; 1021 int i, found = 0;
1020 1022
1021 bond_for_each_slave(bond, tmp_slave, i) { 1023 bond_for_each_slave(bond, tmp_slave, i) {
1022 if (!compare_ether_addr_64bits(slave->perm_hwaddr, 1024 if (ether_addr_equal_64bits(slave->perm_hwaddr,
1023 tmp_slave->dev->dev_addr)) { 1025 tmp_slave->dev->dev_addr)) {
1024 found = 1; 1026 found = 1;
1025 break; 1027 break;
1026 } 1028 }
@@ -1074,10 +1076,10 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
1074 * check uniqueness of slave's mac address against the other 1076 * check uniqueness of slave's mac address against the other
1075 * slaves in the bond. 1077 * slaves in the bond.
1076 */ 1078 */
1077 if (compare_ether_addr_64bits(slave->perm_hwaddr, bond->dev->dev_addr)) { 1079 if (!ether_addr_equal_64bits(slave->perm_hwaddr, bond->dev->dev_addr)) {
1078 bond_for_each_slave(bond, tmp_slave1, i) { 1080 bond_for_each_slave(bond, tmp_slave1, i) {
1079 if (!compare_ether_addr_64bits(tmp_slave1->dev->dev_addr, 1081 if (ether_addr_equal_64bits(tmp_slave1->dev->dev_addr,
1080 slave->dev->dev_addr)) { 1082 slave->dev->dev_addr)) {
1081 found = 1; 1083 found = 1;
1082 break; 1084 break;
1083 } 1085 }
@@ -1099,8 +1101,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
1099 bond_for_each_slave(bond, tmp_slave1, i) { 1101 bond_for_each_slave(bond, tmp_slave1, i) {
1100 found = 0; 1102 found = 0;
1101 bond_for_each_slave(bond, tmp_slave2, j) { 1103 bond_for_each_slave(bond, tmp_slave2, j) {
1102 if (!compare_ether_addr_64bits(tmp_slave1->perm_hwaddr, 1104 if (ether_addr_equal_64bits(tmp_slave1->perm_hwaddr,
1103 tmp_slave2->dev->dev_addr)) { 1105 tmp_slave2->dev->dev_addr)) {
1104 found = 1; 1106 found = 1;
1105 break; 1107 break;
1106 } 1108 }
@@ -1115,8 +1117,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
1115 } 1117 }
1116 1118
1117 if (!has_bond_addr) { 1119 if (!has_bond_addr) {
1118 if (!compare_ether_addr_64bits(tmp_slave1->dev->dev_addr, 1120 if (ether_addr_equal_64bits(tmp_slave1->dev->dev_addr,
1119 bond->dev->dev_addr)) { 1121 bond->dev->dev_addr)) {
1120 1122
1121 has_bond_addr = tmp_slave1; 1123 has_bond_addr = tmp_slave1;
1122 } 1124 }
@@ -1257,7 +1259,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1257 case ETH_P_IP: { 1259 case ETH_P_IP: {
1258 const struct iphdr *iph = ip_hdr(skb); 1260 const struct iphdr *iph = ip_hdr(skb);
1259 1261
1260 if (!compare_ether_addr_64bits(eth_data->h_dest, mac_bcast) || 1262 if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) ||
1261 (iph->daddr == ip_bcast) || 1263 (iph->daddr == ip_bcast) ||
1262 (iph->protocol == IPPROTO_IGMP)) { 1264 (iph->protocol == IPPROTO_IGMP)) {
1263 do_tx_balance = 0; 1265 do_tx_balance = 0;
@@ -1271,7 +1273,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1271 /* IPv6 doesn't really use broadcast mac address, but leave 1273 /* IPv6 doesn't really use broadcast mac address, but leave
1272 * that here just in case. 1274 * that here just in case.
1273 */ 1275 */
1274 if (!compare_ether_addr_64bits(eth_data->h_dest, mac_bcast)) { 1276 if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) {
1275 do_tx_balance = 0; 1277 do_tx_balance = 0;
1276 break; 1278 break;
1277 } 1279 }
@@ -1279,7 +1281,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1279 /* IPv6 uses all-nodes multicast as an equivalent to 1281 /* IPv6 uses all-nodes multicast as an equivalent to
1280 * broadcasts in IPv4. 1282 * broadcasts in IPv4.
1281 */ 1283 */
1282 if (!compare_ether_addr_64bits(eth_data->h_dest, mac_v6_allmcast)) { 1284 if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) {
1283 do_tx_balance = 0; 1285 do_tx_balance = 0;
1284 break; 1286 break;
1285 } 1287 }
@@ -1603,8 +1605,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1603 struct slave *tmp_slave; 1605 struct slave *tmp_slave;
1604 /* find slave that is holding the bond's mac address */ 1606 /* find slave that is holding the bond's mac address */
1605 bond_for_each_slave(bond, tmp_slave, i) { 1607 bond_for_each_slave(bond, tmp_slave, i) {
1606 if (!compare_ether_addr_64bits(tmp_slave->dev->dev_addr, 1608 if (ether_addr_equal_64bits(tmp_slave->dev->dev_addr,
1607 bond->dev->dev_addr)) { 1609 bond->dev->dev_addr)) {
1608 swap_slave = tmp_slave; 1610 swap_slave = tmp_slave;
1609 break; 1611 break;
1610 } 1612 }
@@ -1681,8 +1683,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
1681 swap_slave = NULL; 1683 swap_slave = NULL;
1682 1684
1683 bond_for_each_slave(bond, slave, i) { 1685 bond_for_each_slave(bond, slave, i) {
1684 if (!compare_ether_addr_64bits(slave->dev->dev_addr, 1686 if (ether_addr_equal_64bits(slave->dev->dev_addr,
1685 bond_dev->dev_addr)) { 1687 bond_dev->dev_addr)) {
1686 swap_slave = slave; 1688 swap_slave = slave;
1687 break; 1689 break;
1688 } 1690 }
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 62d2409bb293..2ee8cf9e8a3b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -549,9 +549,9 @@ down:
549 * Get link speed and duplex from the slave's base driver 549 * Get link speed and duplex from the slave's base driver
550 * using ethtool. If for some reason the call fails or the 550 * using ethtool. If for some reason the call fails or the
551 * values are invalid, set speed and duplex to -1, 551 * values are invalid, set speed and duplex to -1,
552 * and return error. 552 * and return.
553 */ 553 */
554static int bond_update_speed_duplex(struct slave *slave) 554static void bond_update_speed_duplex(struct slave *slave)
555{ 555{
556 struct net_device *slave_dev = slave->dev; 556 struct net_device *slave_dev = slave->dev;
557 struct ethtool_cmd ecmd; 557 struct ethtool_cmd ecmd;
@@ -563,24 +563,24 @@ static int bond_update_speed_duplex(struct slave *slave)
563 563
564 res = __ethtool_get_settings(slave_dev, &ecmd); 564 res = __ethtool_get_settings(slave_dev, &ecmd);
565 if (res < 0) 565 if (res < 0)
566 return -1; 566 return;
567 567
568 slave_speed = ethtool_cmd_speed(&ecmd); 568 slave_speed = ethtool_cmd_speed(&ecmd);
569 if (slave_speed == 0 || slave_speed == ((__u32) -1)) 569 if (slave_speed == 0 || slave_speed == ((__u32) -1))
570 return -1; 570 return;
571 571
572 switch (ecmd.duplex) { 572 switch (ecmd.duplex) {
573 case DUPLEX_FULL: 573 case DUPLEX_FULL:
574 case DUPLEX_HALF: 574 case DUPLEX_HALF:
575 break; 575 break;
576 default: 576 default:
577 return -1; 577 return;
578 } 578 }
579 579
580 slave->speed = slave_speed; 580 slave->speed = slave_speed;
581 slave->duplex = ecmd.duplex; 581 slave->duplex = ecmd.duplex;
582 582
583 return 0; 583 return;
584} 584}
585 585
586/* 586/*
@@ -1444,8 +1444,9 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1444 struct sk_buff *skb = *pskb; 1444 struct sk_buff *skb = *pskb;
1445 struct slave *slave; 1445 struct slave *slave;
1446 struct bonding *bond; 1446 struct bonding *bond;
1447 void (*recv_probe)(struct sk_buff *, struct bonding *, 1447 int (*recv_probe)(struct sk_buff *, struct bonding *,
1448 struct slave *); 1448 struct slave *);
1449 int ret = RX_HANDLER_ANOTHER;
1449 1450
1450 skb = skb_share_check(skb, GFP_ATOMIC); 1451 skb = skb_share_check(skb, GFP_ATOMIC);
1451 if (unlikely(!skb)) 1452 if (unlikely(!skb))
@@ -1464,8 +1465,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1464 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 1465 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1465 1466
1466 if (likely(nskb)) { 1467 if (likely(nskb)) {
1467 recv_probe(nskb, bond, slave); 1468 ret = recv_probe(nskb, bond, slave);
1468 dev_kfree_skb(nskb); 1469 dev_kfree_skb(nskb);
1470 if (ret == RX_HANDLER_CONSUMED) {
1471 consume_skb(skb);
1472 return ret;
1473 }
1469 } 1474 }
1470 } 1475 }
1471 1476
@@ -1487,7 +1492,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1487 memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN); 1492 memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
1488 } 1493 }
1489 1494
1490 return RX_HANDLER_ANOTHER; 1495 return ret;
1491} 1496}
1492 1497
1493/* enslave device <slave> to bond device <master> */ 1498/* enslave device <slave> to bond device <master> */
@@ -1726,7 +1731,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1726 1731
1727 read_lock(&bond->lock); 1732 read_lock(&bond->lock);
1728 1733
1729 new_slave->last_arp_rx = jiffies; 1734 new_slave->last_arp_rx = jiffies -
1735 (msecs_to_jiffies(bond->params.arp_interval) + 1);
1730 1736
1731 if (bond->params.miimon && !bond->params.use_carrier) { 1737 if (bond->params.miimon && !bond->params.use_carrier) {
1732 link_reporting = bond_check_dev_link(bond, slave_dev, 1); 1738 link_reporting = bond_check_dev_link(bond, slave_dev, 1);
@@ -1751,22 +1757,30 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1751 } 1757 }
1752 1758
1753 /* check for initial state */ 1759 /* check for initial state */
1754 if (!bond->params.miimon || 1760 if (bond->params.miimon) {
1755 (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS)) { 1761 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
1756 if (bond->params.updelay) { 1762 if (bond->params.updelay) {
1757 pr_debug("Initial state of slave_dev is BOND_LINK_BACK\n"); 1763 new_slave->link = BOND_LINK_BACK;
1758 new_slave->link = BOND_LINK_BACK; 1764 new_slave->delay = bond->params.updelay;
1759 new_slave->delay = bond->params.updelay; 1765 } else {
1766 new_slave->link = BOND_LINK_UP;
1767 }
1760 } else { 1768 } else {
1761 pr_debug("Initial state of slave_dev is BOND_LINK_UP\n"); 1769 new_slave->link = BOND_LINK_DOWN;
1762 new_slave->link = BOND_LINK_UP;
1763 } 1770 }
1764 new_slave->jiffies = jiffies; 1771 } else if (bond->params.arp_interval) {
1772 new_slave->link = (netif_carrier_ok(slave_dev) ?
1773 BOND_LINK_UP : BOND_LINK_DOWN);
1765 } else { 1774 } else {
1766 pr_debug("Initial state of slave_dev is BOND_LINK_DOWN\n"); 1775 new_slave->link = BOND_LINK_UP;
1767 new_slave->link = BOND_LINK_DOWN;
1768 } 1776 }
1769 1777
1778 if (new_slave->link != BOND_LINK_DOWN)
1779 new_slave->jiffies = jiffies;
1780 pr_debug("Initial state of slave_dev is BOND_LINK_%s\n",
1781 new_slave->link == BOND_LINK_DOWN ? "DOWN" :
1782 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
1783
1770 bond_update_speed_duplex(new_slave); 1784 bond_update_speed_duplex(new_slave);
1771 1785
1772 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { 1786 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
@@ -1952,7 +1966,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1952 write_lock_bh(&bond->lock); 1966 write_lock_bh(&bond->lock);
1953 1967
1954 if (!bond->params.fail_over_mac) { 1968 if (!bond->params.fail_over_mac) {
1955 if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) && 1969 if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
1956 bond->slave_cnt > 1) 1970 bond->slave_cnt > 1)
1957 pr_warning("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n", 1971 pr_warning("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
1958 bond_dev->name, slave_dev->name, 1972 bond_dev->name, slave_dev->name,
@@ -2723,7 +2737,7 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
2723 } 2737 }
2724} 2738}
2725 2739
2726static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond, 2740static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
2727 struct slave *slave) 2741 struct slave *slave)
2728{ 2742{
2729 struct arphdr *arp; 2743 struct arphdr *arp;
@@ -2731,7 +2745,7 @@ static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
2731 __be32 sip, tip; 2745 __be32 sip, tip;
2732 2746
2733 if (skb->protocol != __cpu_to_be16(ETH_P_ARP)) 2747 if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
2734 return; 2748 return RX_HANDLER_ANOTHER;
2735 2749
2736 read_lock(&bond->lock); 2750 read_lock(&bond->lock);
2737 2751
@@ -2776,6 +2790,7 @@ static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
2776 2790
2777out_unlock: 2791out_unlock:
2778 read_unlock(&bond->lock); 2792 read_unlock(&bond->lock);
2793 return RX_HANDLER_ANOTHER;
2779} 2794}
2780 2795
2781/* 2796/*
@@ -4820,12 +4835,9 @@ static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
4820 return 0; 4835 return 0;
4821} 4836}
4822 4837
4823static int bond_get_tx_queues(struct net *net, struct nlattr *tb[], 4838static int bond_get_tx_queues(struct net *net, struct nlattr *tb[])
4824 unsigned int *num_queues,
4825 unsigned int *real_num_queues)
4826{ 4839{
4827 *num_queues = tx_queues; 4840 return tx_queues;
4828 return 0;
4829} 4841}
4830 4842
4831static struct rtnl_link_ops bond_link_ops __read_mostly = { 4843static struct rtnl_link_ops bond_link_ops __read_mostly = {
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 9f2bae6616d3..4581aa5ccaba 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -218,7 +218,7 @@ struct bonding {
218 struct slave *primary_slave; 218 struct slave *primary_slave;
219 bool force_primary; 219 bool force_primary;
220 s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ 220 s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
221 void (*recv_probe)(struct sk_buff *, struct bonding *, 221 int (*recv_probe)(struct sk_buff *, struct bonding *,
222 struct slave *); 222 struct slave *);
223 rwlock_t lock; 223 rwlock_t lock;
224 rwlock_t curr_slave_lock; 224 rwlock_t curr_slave_lock;
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 9a66e2a910ae..1520814c77c7 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -6,6 +6,8 @@
6 * License terms: GNU General Public License (GPL) version 2. 6 * License terms: GNU General Public License (GPL) version 2.
7 */ 7 */
8 8
9#define pr_fmt(fmt) KBUILD_MODNAME fmt
10
9#include <linux/init.h> 11#include <linux/init.h>
10#include <linux/module.h> 12#include <linux/module.h>
11#include <linux/device.h> 13#include <linux/device.h>
@@ -19,6 +21,7 @@
19#include <linux/if_arp.h> 21#include <linux/if_arp.h>
20#include <linux/timer.h> 22#include <linux/timer.h>
21#include <linux/rtnetlink.h> 23#include <linux/rtnetlink.h>
24#include <linux/pkt_sched.h>
22#include <net/caif/caif_layer.h> 25#include <net/caif/caif_layer.h>
23#include <net/caif/caif_hsi.h> 26#include <net/caif/caif_hsi.h>
24 27
@@ -34,6 +37,10 @@ static int inactivity_timeout = 1000;
34module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR); 37module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
35MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms."); 38MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
36 39
40static int aggregation_timeout = 1;
41module_param(aggregation_timeout, int, S_IRUGO | S_IWUSR);
42MODULE_PARM_DESC(aggregation_timeout, "Aggregation timeout on HSI, ms.");
43
37/* 44/*
38 * HSI padding options. 45 * HSI padding options.
39 * Warning: must be a base of 2 (& operation used) and can not be zero ! 46 * Warning: must be a base of 2 (& operation used) and can not be zero !
@@ -86,24 +93,84 @@ static void cfhsi_inactivity_tout(unsigned long arg)
86 queue_work(cfhsi->wq, &cfhsi->wake_down_work); 93 queue_work(cfhsi->wq, &cfhsi->wake_down_work);
87} 94}
88 95
96static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
97 const struct sk_buff *skb,
98 int direction)
99{
100 struct caif_payload_info *info;
101 int hpad, tpad, len;
102
103 info = (struct caif_payload_info *)&skb->cb;
104 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
105 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
106 len = skb->len + hpad + tpad;
107
108 if (direction > 0)
109 cfhsi->aggregation_len += len;
110 else if (direction < 0)
111 cfhsi->aggregation_len -= len;
112}
113
114static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
115{
116 int i;
117
118 if (cfhsi->aggregation_timeout < 0)
119 return true;
120
121 for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
122 if (cfhsi->qhead[i].qlen)
123 return true;
124 }
125
126 /* TODO: Use aggregation_len instead */
127 if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
128 return true;
129
130 return false;
131}
132
133static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
134{
135 struct sk_buff *skb;
136 int i;
137
138 for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
139 skb = skb_dequeue(&cfhsi->qhead[i]);
140 if (skb)
141 break;
142 }
143
144 return skb;
145}
146
147static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
148{
149 int i, len = 0;
150 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
151 len += skb_queue_len(&cfhsi->qhead[i]);
152 return len;
153}
154
89static void cfhsi_abort_tx(struct cfhsi *cfhsi) 155static void cfhsi_abort_tx(struct cfhsi *cfhsi)
90{ 156{
91 struct sk_buff *skb; 157 struct sk_buff *skb;
92 158
93 for (;;) { 159 for (;;) {
94 spin_lock_bh(&cfhsi->lock); 160 spin_lock_bh(&cfhsi->lock);
95 skb = skb_dequeue(&cfhsi->qhead); 161 skb = cfhsi_dequeue(cfhsi);
96 if (!skb) 162 if (!skb)
97 break; 163 break;
98 164
99 cfhsi->ndev->stats.tx_errors++; 165 cfhsi->ndev->stats.tx_errors++;
100 cfhsi->ndev->stats.tx_dropped++; 166 cfhsi->ndev->stats.tx_dropped++;
167 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
101 spin_unlock_bh(&cfhsi->lock); 168 spin_unlock_bh(&cfhsi->lock);
102 kfree_skb(skb); 169 kfree_skb(skb);
103 } 170 }
104 cfhsi->tx_state = CFHSI_TX_STATE_IDLE; 171 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
105 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 172 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
106 mod_timer(&cfhsi->timer, 173 mod_timer(&cfhsi->inactivity_timer,
107 jiffies + cfhsi->inactivity_timeout); 174 jiffies + cfhsi->inactivity_timeout);
108 spin_unlock_bh(&cfhsi->lock); 175 spin_unlock_bh(&cfhsi->lock);
109} 176}
@@ -169,7 +236,7 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
169 struct sk_buff *skb; 236 struct sk_buff *skb;
170 u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; 237 u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
171 238
172 skb = skb_dequeue(&cfhsi->qhead); 239 skb = cfhsi_dequeue(cfhsi);
173 if (!skb) 240 if (!skb)
174 return 0; 241 return 0;
175 242
@@ -196,11 +263,16 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
196 pemb += hpad; 263 pemb += hpad;
197 264
198 /* Update network statistics. */ 265 /* Update network statistics. */
266 spin_lock_bh(&cfhsi->lock);
199 cfhsi->ndev->stats.tx_packets++; 267 cfhsi->ndev->stats.tx_packets++;
200 cfhsi->ndev->stats.tx_bytes += skb->len; 268 cfhsi->ndev->stats.tx_bytes += skb->len;
269 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
270 spin_unlock_bh(&cfhsi->lock);
201 271
202 /* Copy in embedded CAIF frame. */ 272 /* Copy in embedded CAIF frame. */
203 skb_copy_bits(skb, 0, pemb, skb->len); 273 skb_copy_bits(skb, 0, pemb, skb->len);
274
275 /* Consume the SKB */
204 consume_skb(skb); 276 consume_skb(skb);
205 skb = NULL; 277 skb = NULL;
206 } 278 }
@@ -214,7 +286,7 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
214 int tpad = 0; 286 int tpad = 0;
215 287
216 if (!skb) 288 if (!skb)
217 skb = skb_dequeue(&cfhsi->qhead); 289 skb = cfhsi_dequeue(cfhsi);
218 290
219 if (!skb) 291 if (!skb)
220 break; 292 break;
@@ -233,8 +305,11 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
233 pfrm += hpad; 305 pfrm += hpad;
234 306
235 /* Update network statistics. */ 307 /* Update network statistics. */
308 spin_lock_bh(&cfhsi->lock);
236 cfhsi->ndev->stats.tx_packets++; 309 cfhsi->ndev->stats.tx_packets++;
237 cfhsi->ndev->stats.tx_bytes += skb->len; 310 cfhsi->ndev->stats.tx_bytes += skb->len;
311 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
312 spin_unlock_bh(&cfhsi->lock);
238 313
239 /* Copy in CAIF frame. */ 314 /* Copy in CAIF frame. */
240 skb_copy_bits(skb, 0, pfrm, skb->len); 315 skb_copy_bits(skb, 0, pfrm, skb->len);
@@ -244,6 +319,8 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
244 319
245 /* Update frame pointer. */ 320 /* Update frame pointer. */
246 pfrm += skb->len + tpad; 321 pfrm += skb->len + tpad;
322
323 /* Consume the SKB */
247 consume_skb(skb); 324 consume_skb(skb);
248 skb = NULL; 325 skb = NULL;
249 326
@@ -258,8 +335,7 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
258 } 335 }
259 336
260 /* Check if we can piggy-back another descriptor. */ 337 /* Check if we can piggy-back another descriptor. */
261 skb = skb_peek(&cfhsi->qhead); 338 if (cfhsi_can_send_aggregate(cfhsi))
262 if (skb)
263 desc->header |= CFHSI_PIGGY_DESC; 339 desc->header |= CFHSI_PIGGY_DESC;
264 else 340 else
265 desc->header &= ~CFHSI_PIGGY_DESC; 341 desc->header &= ~CFHSI_PIGGY_DESC;
@@ -267,61 +343,71 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
267 return CFHSI_DESC_SZ + pld_len; 343 return CFHSI_DESC_SZ + pld_len;
268} 344}
269 345
270static void cfhsi_tx_done(struct cfhsi *cfhsi) 346static void cfhsi_start_tx(struct cfhsi *cfhsi)
271{ 347{
272 struct cfhsi_desc *desc = NULL; 348 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
273 int len = 0; 349 int len, res;
274 int res;
275 350
276 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); 351 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
277 352
278 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 353 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
279 return; 354 return;
280 355
281 desc = (struct cfhsi_desc *)cfhsi->tx_buf;
282
283 do { 356 do {
284 /*
285 * Send flow on if flow off has been previously signalled
286 * and number of packets is below low water mark.
287 */
288 spin_lock_bh(&cfhsi->lock);
289 if (cfhsi->flow_off_sent &&
290 cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
291 cfhsi->cfdev.flowctrl) {
292
293 cfhsi->flow_off_sent = 0;
294 cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
295 }
296 spin_unlock_bh(&cfhsi->lock);
297
298 /* Create HSI frame. */ 357 /* Create HSI frame. */
299 do { 358 len = cfhsi_tx_frm(desc, cfhsi);
300 len = cfhsi_tx_frm(desc, cfhsi); 359 if (!len) {
301 if (!len) { 360 spin_lock_bh(&cfhsi->lock);
302 spin_lock_bh(&cfhsi->lock); 361 if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
303 if (unlikely(skb_peek(&cfhsi->qhead))) {
304 spin_unlock_bh(&cfhsi->lock);
305 continue;
306 }
307 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
308 /* Start inactivity timer. */
309 mod_timer(&cfhsi->timer,
310 jiffies + cfhsi->inactivity_timeout);
311 spin_unlock_bh(&cfhsi->lock); 362 spin_unlock_bh(&cfhsi->lock);
312 goto done; 363 res = -EAGAIN;
364 continue;
313 } 365 }
314 } while (!len); 366 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
367 /* Start inactivity timer. */
368 mod_timer(&cfhsi->inactivity_timer,
369 jiffies + cfhsi->inactivity_timeout);
370 spin_unlock_bh(&cfhsi->lock);
371 break;
372 }
315 373
316 /* Set up new transfer. */ 374 /* Set up new transfer. */
317 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev); 375 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
318 if (WARN_ON(res < 0)) { 376 if (WARN_ON(res < 0))
319 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n", 377 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
320 __func__, res); 378 __func__, res);
321 }
322 } while (res < 0); 379 } while (res < 0);
380}
381
382static void cfhsi_tx_done(struct cfhsi *cfhsi)
383{
384 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
385
386 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
387 return;
388
389 /*
390 * Send flow on if flow off has been previously signalled
391 * and number of packets is below low water mark.
392 */
393 spin_lock_bh(&cfhsi->lock);
394 if (cfhsi->flow_off_sent &&
395 cfhsi_tx_queue_len(cfhsi) <= cfhsi->q_low_mark &&
396 cfhsi->cfdev.flowctrl) {
397
398 cfhsi->flow_off_sent = 0;
399 cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
400 }
401
402 if (cfhsi_can_send_aggregate(cfhsi)) {
403 spin_unlock_bh(&cfhsi->lock);
404 cfhsi_start_tx(cfhsi);
405 } else {
406 mod_timer(&cfhsi->aggregation_timer,
407 jiffies + cfhsi->aggregation_timeout);
408 spin_unlock_bh(&cfhsi->lock);
409 }
323 410
324done:
325 return; 411 return;
326} 412}
327 413
@@ -560,7 +646,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
560 646
561 /* Update inactivity timer if pending. */ 647 /* Update inactivity timer if pending. */
562 spin_lock_bh(&cfhsi->lock); 648 spin_lock_bh(&cfhsi->lock);
563 mod_timer_pending(&cfhsi->timer, 649 mod_timer_pending(&cfhsi->inactivity_timer,
564 jiffies + cfhsi->inactivity_timeout); 650 jiffies + cfhsi->inactivity_timeout);
565 spin_unlock_bh(&cfhsi->lock); 651 spin_unlock_bh(&cfhsi->lock);
566 652
@@ -744,14 +830,14 @@ static void cfhsi_wake_up(struct work_struct *work)
744 size_t fifo_occupancy = 0; 830 size_t fifo_occupancy = 0;
745 831
746 /* Wakeup timeout */ 832 /* Wakeup timeout */
747 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", 833 dev_dbg(&cfhsi->ndev->dev, "%s: Timeout.\n",
748 __func__); 834 __func__);
749 835
750 /* Check FIFO to check if modem has sent something. */ 836 /* Check FIFO to check if modem has sent something. */
751 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, 837 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
752 &fifo_occupancy)); 838 &fifo_occupancy));
753 839
754 dev_err(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n", 840 dev_dbg(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n",
755 __func__, (unsigned) fifo_occupancy); 841 __func__, (unsigned) fifo_occupancy);
756 842
757 /* Check if we misssed the interrupt. */ 843 /* Check if we misssed the interrupt. */
@@ -793,12 +879,12 @@ wake_ack:
793 879
794 spin_lock_bh(&cfhsi->lock); 880 spin_lock_bh(&cfhsi->lock);
795 881
796 /* Resume transmit if queue is not empty. */ 882 /* Resume transmit if queues are not empty. */
797 if (!skb_peek(&cfhsi->qhead)) { 883 if (!cfhsi_tx_queue_len(cfhsi)) {
798 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n", 884 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
799 __func__); 885 __func__);
800 /* Start inactivity timer. */ 886 /* Start inactivity timer. */
801 mod_timer(&cfhsi->timer, 887 mod_timer(&cfhsi->inactivity_timer,
802 jiffies + cfhsi->inactivity_timeout); 888 jiffies + cfhsi->inactivity_timeout);
803 spin_unlock_bh(&cfhsi->lock); 889 spin_unlock_bh(&cfhsi->lock);
804 return; 890 return;
@@ -934,20 +1020,53 @@ static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
934 wake_up_interruptible(&cfhsi->wake_down_wait); 1020 wake_up_interruptible(&cfhsi->wake_down_wait);
935} 1021}
936 1022
1023static void cfhsi_aggregation_tout(unsigned long arg)
1024{
1025 struct cfhsi *cfhsi = (struct cfhsi *)arg;
1026
1027 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
1028 __func__);
1029
1030 cfhsi_start_tx(cfhsi);
1031}
1032
937static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) 1033static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
938{ 1034{
939 struct cfhsi *cfhsi = NULL; 1035 struct cfhsi *cfhsi = NULL;
940 int start_xfer = 0; 1036 int start_xfer = 0;
941 int timer_active; 1037 int timer_active;
1038 int prio;
942 1039
943 if (!dev) 1040 if (!dev)
944 return -EINVAL; 1041 return -EINVAL;
945 1042
946 cfhsi = netdev_priv(dev); 1043 cfhsi = netdev_priv(dev);
947 1044
1045 switch (skb->priority) {
1046 case TC_PRIO_BESTEFFORT:
1047 case TC_PRIO_FILLER:
1048 case TC_PRIO_BULK:
1049 prio = CFHSI_PRIO_BEBK;
1050 break;
1051 case TC_PRIO_INTERACTIVE_BULK:
1052 prio = CFHSI_PRIO_VI;
1053 break;
1054 case TC_PRIO_INTERACTIVE:
1055 prio = CFHSI_PRIO_VO;
1056 break;
1057 case TC_PRIO_CONTROL:
1058 default:
1059 prio = CFHSI_PRIO_CTL;
1060 break;
1061 }
1062
948 spin_lock_bh(&cfhsi->lock); 1063 spin_lock_bh(&cfhsi->lock);
949 1064
950 skb_queue_tail(&cfhsi->qhead, skb); 1065 /* Update aggregation statistics */
1066 cfhsi_update_aggregation_stats(cfhsi, skb, 1);
1067
1068 /* Queue the SKB */
1069 skb_queue_tail(&cfhsi->qhead[prio], skb);
951 1070
952 /* Sanity check; xmit should not be called after unregister_netdev */ 1071 /* Sanity check; xmit should not be called after unregister_netdev */
953 if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) { 1072 if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
@@ -958,7 +1077,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
958 1077
959 /* Send flow off if number of packets is above high water mark. */ 1078 /* Send flow off if number of packets is above high water mark. */
960 if (!cfhsi->flow_off_sent && 1079 if (!cfhsi->flow_off_sent &&
961 cfhsi->qhead.qlen > cfhsi->q_high_mark && 1080 cfhsi_tx_queue_len(cfhsi) > cfhsi->q_high_mark &&
962 cfhsi->cfdev.flowctrl) { 1081 cfhsi->cfdev.flowctrl) {
963 cfhsi->flow_off_sent = 1; 1082 cfhsi->flow_off_sent = 1;
964 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF); 1083 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
@@ -970,12 +1089,18 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
970 } 1089 }
971 1090
972 if (!start_xfer) { 1091 if (!start_xfer) {
1092 /* Send aggregate if it is possible */
1093 bool aggregate_ready =
1094 cfhsi_can_send_aggregate(cfhsi) &&
1095 del_timer(&cfhsi->aggregation_timer) > 0;
973 spin_unlock_bh(&cfhsi->lock); 1096 spin_unlock_bh(&cfhsi->lock);
1097 if (aggregate_ready)
1098 cfhsi_start_tx(cfhsi);
974 return 0; 1099 return 0;
975 } 1100 }
976 1101
977 /* Delete inactivity timer if started. */ 1102 /* Delete inactivity timer if started. */
978 timer_active = del_timer_sync(&cfhsi->timer); 1103 timer_active = del_timer_sync(&cfhsi->inactivity_timer);
979 1104
980 spin_unlock_bh(&cfhsi->lock); 1105 spin_unlock_bh(&cfhsi->lock);
981 1106
@@ -1004,28 +1129,11 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
1004 return 0; 1129 return 0;
1005} 1130}
1006 1131
1007static int cfhsi_open(struct net_device *dev) 1132static const struct net_device_ops cfhsi_ops;
1008{
1009 netif_wake_queue(dev);
1010
1011 return 0;
1012}
1013
1014static int cfhsi_close(struct net_device *dev)
1015{
1016 netif_stop_queue(dev);
1017
1018 return 0;
1019}
1020
1021static const struct net_device_ops cfhsi_ops = {
1022 .ndo_open = cfhsi_open,
1023 .ndo_stop = cfhsi_close,
1024 .ndo_start_xmit = cfhsi_xmit
1025};
1026 1133
1027static void cfhsi_setup(struct net_device *dev) 1134static void cfhsi_setup(struct net_device *dev)
1028{ 1135{
1136 int i;
1029 struct cfhsi *cfhsi = netdev_priv(dev); 1137 struct cfhsi *cfhsi = netdev_priv(dev);
1030 dev->features = 0; 1138 dev->features = 0;
1031 dev->netdev_ops = &cfhsi_ops; 1139 dev->netdev_ops = &cfhsi_ops;
@@ -1034,7 +1142,8 @@ static void cfhsi_setup(struct net_device *dev)
1034 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; 1142 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
1035 dev->tx_queue_len = 0; 1143 dev->tx_queue_len = 0;
1036 dev->destructor = free_netdev; 1144 dev->destructor = free_netdev;
1037 skb_queue_head_init(&cfhsi->qhead); 1145 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
1146 skb_queue_head_init(&cfhsi->qhead[i]);
1038 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; 1147 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
1039 cfhsi->cfdev.use_frag = false; 1148 cfhsi->cfdev.use_frag = false;
1040 cfhsi->cfdev.use_stx = false; 1149 cfhsi->cfdev.use_stx = false;
@@ -1046,7 +1155,7 @@ int cfhsi_probe(struct platform_device *pdev)
1046{ 1155{
1047 struct cfhsi *cfhsi = NULL; 1156 struct cfhsi *cfhsi = NULL;
1048 struct net_device *ndev; 1157 struct net_device *ndev;
1049 struct cfhsi_dev *dev; 1158
1050 int res; 1159 int res;
1051 1160
1052 ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup); 1161 ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
@@ -1057,6 +1166,34 @@ int cfhsi_probe(struct platform_device *pdev)
1057 cfhsi->ndev = ndev; 1166 cfhsi->ndev = ndev;
1058 cfhsi->pdev = pdev; 1167 cfhsi->pdev = pdev;
1059 1168
1169 /* Assign the HSI device. */
1170 cfhsi->dev = pdev->dev.platform_data;
1171
1172 /* Assign the driver to this HSI device. */
1173 cfhsi->dev->drv = &cfhsi->drv;
1174
1175 /* Register network device. */
1176 res = register_netdev(ndev);
1177 if (res) {
1178 dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1179 __func__, res);
1180 free_netdev(ndev);
1181 }
1182 /* Add CAIF HSI device to list. */
1183 spin_lock(&cfhsi_list_lock);
1184 list_add_tail(&cfhsi->list, &cfhsi_list);
1185 spin_unlock(&cfhsi_list_lock);
1186
1187 return res;
1188}
1189
1190static int cfhsi_open(struct net_device *ndev)
1191{
1192 struct cfhsi *cfhsi = netdev_priv(ndev);
1193 int res;
1194
1195 clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1196
1060 /* Initialize state vaiables. */ 1197 /* Initialize state vaiables. */
1061 cfhsi->tx_state = CFHSI_TX_STATE_IDLE; 1198 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
1062 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC; 1199 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
@@ -1066,12 +1203,6 @@ int cfhsi_probe(struct platform_device *pdev)
1066 cfhsi->q_low_mark = LOW_WATER_MARK; 1203 cfhsi->q_low_mark = LOW_WATER_MARK;
1067 cfhsi->q_high_mark = HIGH_WATER_MARK; 1204 cfhsi->q_high_mark = HIGH_WATER_MARK;
1068 1205
1069 /* Assign the HSI device. */
1070 dev = (struct cfhsi_dev *)pdev->dev.platform_data;
1071 cfhsi->dev = dev;
1072
1073 /* Assign the driver to this HSI device. */
1074 dev->drv = &cfhsi->drv;
1075 1206
1076 /* 1207 /*
1077 * Allocate a TX buffer with the size of a HSI packet descriptors 1208 * Allocate a TX buffer with the size of a HSI packet descriptors
@@ -1111,6 +1242,9 @@ int cfhsi_probe(struct platform_device *pdev)
1111 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA; 1242 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1112 } 1243 }
1113 1244
1245 /* Initialize aggregation timeout */
1246 cfhsi->aggregation_timeout = aggregation_timeout;
1247
1114 /* Initialize recieve vaiables. */ 1248 /* Initialize recieve vaiables. */
1115 cfhsi->rx_ptr = cfhsi->rx_buf; 1249 cfhsi->rx_ptr = cfhsi->rx_buf;
1116 cfhsi->rx_len = CFHSI_DESC_SZ; 1250 cfhsi->rx_len = CFHSI_DESC_SZ;
@@ -1136,9 +1270,9 @@ int cfhsi_probe(struct platform_device *pdev)
1136 clear_bit(CFHSI_AWAKE, &cfhsi->bits); 1270 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
1137 1271
1138 /* Create work thread. */ 1272 /* Create work thread. */
1139 cfhsi->wq = create_singlethread_workqueue(pdev->name); 1273 cfhsi->wq = create_singlethread_workqueue(cfhsi->pdev->name);
1140 if (!cfhsi->wq) { 1274 if (!cfhsi->wq) {
1141 dev_err(&ndev->dev, "%s: Failed to create work queue.\n", 1275 dev_err(&cfhsi->ndev->dev, "%s: Failed to create work queue.\n",
1142 __func__); 1276 __func__);
1143 res = -ENODEV; 1277 res = -ENODEV;
1144 goto err_create_wq; 1278 goto err_create_wq;
@@ -1150,18 +1284,17 @@ int cfhsi_probe(struct platform_device *pdev)
1150 init_waitqueue_head(&cfhsi->flush_fifo_wait); 1284 init_waitqueue_head(&cfhsi->flush_fifo_wait);
1151 1285
1152 /* Setup the inactivity timer. */ 1286 /* Setup the inactivity timer. */
1153 init_timer(&cfhsi->timer); 1287 init_timer(&cfhsi->inactivity_timer);
1154 cfhsi->timer.data = (unsigned long)cfhsi; 1288 cfhsi->inactivity_timer.data = (unsigned long)cfhsi;
1155 cfhsi->timer.function = cfhsi_inactivity_tout; 1289 cfhsi->inactivity_timer.function = cfhsi_inactivity_tout;
1156 /* Setup the slowpath RX timer. */ 1290 /* Setup the slowpath RX timer. */
1157 init_timer(&cfhsi->rx_slowpath_timer); 1291 init_timer(&cfhsi->rx_slowpath_timer);
1158 cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi; 1292 cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
1159 cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath; 1293 cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
1160 1294 /* Setup the aggregation timer. */
1161 /* Add CAIF HSI device to list. */ 1295 init_timer(&cfhsi->aggregation_timer);
1162 spin_lock(&cfhsi_list_lock); 1296 cfhsi->aggregation_timer.data = (unsigned long)cfhsi;
1163 list_add_tail(&cfhsi->list, &cfhsi_list); 1297 cfhsi->aggregation_timer.function = cfhsi_aggregation_tout;
1164 spin_unlock(&cfhsi_list_lock);
1165 1298
1166 /* Activate HSI interface. */ 1299 /* Activate HSI interface. */
1167 res = cfhsi->dev->cfhsi_up(cfhsi->dev); 1300 res = cfhsi->dev->cfhsi_up(cfhsi->dev);
@@ -1175,21 +1308,10 @@ int cfhsi_probe(struct platform_device *pdev)
1175 /* Flush FIFO */ 1308 /* Flush FIFO */
1176 res = cfhsi_flush_fifo(cfhsi); 1309 res = cfhsi_flush_fifo(cfhsi);
1177 if (res) { 1310 if (res) {
1178 dev_err(&ndev->dev, "%s: Can't flush FIFO: %d.\n", 1311 dev_err(&cfhsi->ndev->dev, "%s: Can't flush FIFO: %d.\n",
1179 __func__, res); 1312 __func__, res);
1180 goto err_net_reg; 1313 goto err_net_reg;
1181 } 1314 }
1182
1183 /* Register network device. */
1184 res = register_netdev(ndev);
1185 if (res) {
1186 dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1187 __func__, res);
1188 goto err_net_reg;
1189 }
1190
1191 netif_stop_queue(ndev);
1192
1193 return res; 1315 return res;
1194 1316
1195 err_net_reg: 1317 err_net_reg:
@@ -1203,17 +1325,13 @@ int cfhsi_probe(struct platform_device *pdev)
1203 err_alloc_rx: 1325 err_alloc_rx:
1204 kfree(cfhsi->tx_buf); 1326 kfree(cfhsi->tx_buf);
1205 err_alloc_tx: 1327 err_alloc_tx:
1206 free_netdev(ndev);
1207
1208 return res; 1328 return res;
1209} 1329}
1210 1330
1211static void cfhsi_shutdown(struct cfhsi *cfhsi) 1331static int cfhsi_close(struct net_device *ndev)
1212{ 1332{
1213 u8 *tx_buf, *rx_buf; 1333 struct cfhsi *cfhsi = netdev_priv(ndev);
1214 1334 u8 *tx_buf, *rx_buf, *flip_buf;
1215 /* Stop TXing */
1216 netif_tx_stop_all_queues(cfhsi->ndev);
1217 1335
1218 /* going to shutdown driver */ 1336 /* going to shutdown driver */
1219 set_bit(CFHSI_SHUTDOWN, &cfhsi->bits); 1337 set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
@@ -1222,8 +1340,9 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi)
1222 flush_workqueue(cfhsi->wq); 1340 flush_workqueue(cfhsi->wq);
1223 1341
1224 /* Delete timers if pending */ 1342 /* Delete timers if pending */
1225 del_timer_sync(&cfhsi->timer); 1343 del_timer_sync(&cfhsi->inactivity_timer);
1226 del_timer_sync(&cfhsi->rx_slowpath_timer); 1344 del_timer_sync(&cfhsi->rx_slowpath_timer);
1345 del_timer_sync(&cfhsi->aggregation_timer);
1227 1346
1228 /* Cancel pending RX request (if any) */ 1347 /* Cancel pending RX request (if any) */
1229 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev); 1348 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
@@ -1234,21 +1353,26 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi)
1234 /* Store bufferes: will be freed later. */ 1353 /* Store bufferes: will be freed later. */
1235 tx_buf = cfhsi->tx_buf; 1354 tx_buf = cfhsi->tx_buf;
1236 rx_buf = cfhsi->rx_buf; 1355 rx_buf = cfhsi->rx_buf;
1237 1356 flip_buf = cfhsi->rx_flip_buf;
1238 /* Flush transmit queues. */ 1357 /* Flush transmit queues. */
1239 cfhsi_abort_tx(cfhsi); 1358 cfhsi_abort_tx(cfhsi);
1240 1359
1241 /* Deactivate interface */ 1360 /* Deactivate interface */
1242 cfhsi->dev->cfhsi_down(cfhsi->dev); 1361 cfhsi->dev->cfhsi_down(cfhsi->dev);
1243 1362
1244 /* Finally unregister the network device. */
1245 unregister_netdev(cfhsi->ndev);
1246
1247 /* Free buffers. */ 1363 /* Free buffers. */
1248 kfree(tx_buf); 1364 kfree(tx_buf);
1249 kfree(rx_buf); 1365 kfree(rx_buf);
1366 kfree(flip_buf);
1367 return 0;
1250} 1368}
1251 1369
1370static const struct net_device_ops cfhsi_ops = {
1371 .ndo_open = cfhsi_open,
1372 .ndo_stop = cfhsi_close,
1373 .ndo_start_xmit = cfhsi_xmit
1374};
1375
1252int cfhsi_remove(struct platform_device *pdev) 1376int cfhsi_remove(struct platform_device *pdev)
1253{ 1377{
1254 struct list_head *list_node; 1378 struct list_head *list_node;
@@ -1265,10 +1389,6 @@ int cfhsi_remove(struct platform_device *pdev)
1265 /* Remove from list. */ 1389 /* Remove from list. */
1266 list_del(list_node); 1390 list_del(list_node);
1267 spin_unlock(&cfhsi_list_lock); 1391 spin_unlock(&cfhsi_list_lock);
1268
1269 /* Shutdown driver. */
1270 cfhsi_shutdown(cfhsi);
1271
1272 return 0; 1392 return 0;
1273 } 1393 }
1274 } 1394 }
@@ -1299,8 +1419,7 @@ static void __exit cfhsi_exit_module(void)
1299 list_del(list_node); 1419 list_del(list_node);
1300 spin_unlock(&cfhsi_list_lock); 1420 spin_unlock(&cfhsi_list_lock);
1301 1421
1302 /* Shutdown driver. */ 1422 unregister_netdevice(cfhsi->ndev);
1303 cfhsi_shutdown(cfhsi);
1304 1423
1305 spin_lock(&cfhsi_list_lock); 1424 spin_lock(&cfhsi_list_lock);
1306 } 1425 }
@@ -1325,8 +1444,6 @@ static int __init cfhsi_init_module(void)
1325 goto err_dev_register; 1444 goto err_dev_register;
1326 } 1445 }
1327 1446
1328 return result;
1329
1330 err_dev_register: 1447 err_dev_register:
1331 return result; 1448 return result;
1332} 1449}
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index 5b2041319a32..bc497d718858 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -13,6 +13,7 @@
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/if_arp.h> 15#include <linux/if_arp.h>
16#include <linux/io.h>
16 17
17#include <net/caif/caif_device.h> 18#include <net/caif/caif_device.h>
18#include <net/caif/caif_shm.h> 19#include <net/caif/caif_shm.h>
@@ -647,6 +648,9 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
647 if (pshm_dev->shm_loopback) 648 if (pshm_dev->shm_loopback)
648 tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr; 649 tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
649 else 650 else
651 /*
652 * FIXME: the result of ioremap is not a pointer - arnd
653 */
650 tx_buf->desc_vptr = 654 tx_buf->desc_vptr =
651 ioremap(tx_buf->phy_addr, TX_BUF_SZ); 655 ioremap(tx_buf->phy_addr, TX_BUF_SZ);
652 656
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index c5fe3a3db8c9..f03d7a481a80 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -687,18 +687,19 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
687 687
688 if (priv->do_get_state) 688 if (priv->do_get_state)
689 priv->do_get_state(dev, &state); 689 priv->do_get_state(dev, &state);
690 NLA_PUT_U32(skb, IFLA_CAN_STATE, state); 690 if (nla_put_u32(skb, IFLA_CAN_STATE, state) ||
691 NLA_PUT(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm); 691 nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
692 NLA_PUT_U32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms); 692 nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
693 NLA_PUT(skb, IFLA_CAN_BITTIMING, 693 nla_put(skb, IFLA_CAN_BITTIMING,
694 sizeof(priv->bittiming), &priv->bittiming); 694 sizeof(priv->bittiming), &priv->bittiming) ||
695 NLA_PUT(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock); 695 nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) ||
696 if (priv->do_get_berr_counter && !priv->do_get_berr_counter(dev, &bec)) 696 (priv->do_get_berr_counter &&
697 NLA_PUT(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec); 697 !priv->do_get_berr_counter(dev, &bec) &&
698 if (priv->bittiming_const) 698 nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
699 NLA_PUT(skb, IFLA_CAN_BITTIMING_CONST, 699 (priv->bittiming_const &&
700 sizeof(*priv->bittiming_const), priv->bittiming_const); 700 nla_put(skb, IFLA_CAN_BITTIMING_CONST,
701 701 sizeof(*priv->bittiming_const), priv->bittiming_const)))
702 goto nla_put_failure;
702 return 0; 703 return 0;
703 704
704nla_put_failure: 705nla_put_failure:
@@ -714,9 +715,9 @@ static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
714{ 715{
715 struct can_priv *priv = netdev_priv(dev); 716 struct can_priv *priv = netdev_priv(dev);
716 717
717 NLA_PUT(skb, IFLA_INFO_XSTATS, 718 if (nla_put(skb, IFLA_INFO_XSTATS,
718 sizeof(priv->can_stats), &priv->can_stats); 719 sizeof(priv->can_stats), &priv->can_stats))
719 720 goto nla_put_failure;
720 return 0; 721 return 0;
721 722
722nla_put_failure: 723nla_put_failure:
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 2bb215e00eb1..1226297e7676 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -1274,17 +1274,7 @@ static struct pci_driver pch_can_pci_driver = {
1274 .resume = pch_can_resume, 1274 .resume = pch_can_resume,
1275}; 1275};
1276 1276
1277static int __init pch_can_pci_init(void) 1277module_pci_driver(pch_can_pci_driver);
1278{
1279 return pci_register_driver(&pch_can_pci_driver);
1280}
1281module_init(pch_can_pci_init);
1282
1283static void __exit pch_can_pci_exit(void)
1284{
1285 pci_unregister_driver(&pch_can_pci_driver);
1286}
1287module_exit(pch_can_pci_exit);
1288 1278
1289MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver"); 1279MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver");
1290MODULE_LICENSE("GPL v2"); 1280MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 36f4f9780c30..5c6d412bafb5 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -371,16 +371,4 @@ static struct pci_driver ems_pci_driver = {
371 .remove = ems_pci_del_card, 371 .remove = ems_pci_del_card,
372}; 372};
373 373
374static int __init ems_pci_init(void) 374module_pci_driver(ems_pci_driver);
375{
376 return pci_register_driver(&ems_pci_driver);
377}
378
379static void __exit ems_pci_exit(void)
380{
381 pci_unregister_driver(&ems_pci_driver);
382}
383
384module_init(ems_pci_init);
385module_exit(ems_pci_exit);
386
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index ed004cebd31f..23ed6ea4c7c3 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -397,15 +397,4 @@ static struct pci_driver kvaser_pci_driver = {
397 .remove = __devexit_p(kvaser_pci_remove_one), 397 .remove = __devexit_p(kvaser_pci_remove_one),
398}; 398};
399 399
400static int __init kvaser_pci_init(void) 400module_pci_driver(kvaser_pci_driver);
401{
402 return pci_register_driver(&kvaser_pci_driver);
403}
404
405static void __exit kvaser_pci_exit(void)
406{
407 pci_unregister_driver(&kvaser_pci_driver);
408}
409
410module_init(kvaser_pci_init);
411module_exit(kvaser_pci_exit);
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 5f92b865f64b..f0a12962f7b6 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -749,14 +749,4 @@ static struct pci_driver peak_pci_driver = {
749 .remove = __devexit_p(peak_pci_remove), 749 .remove = __devexit_p(peak_pci_remove),
750}; 750};
751 751
752static int __init peak_pci_init(void) 752module_pci_driver(peak_pci_driver);
753{
754 return pci_register_driver(&peak_pci_driver);
755}
756module_init(peak_pci_init);
757
758static void __exit peak_pci_exit(void)
759{
760 pci_unregister_driver(&peak_pci_driver);
761}
762module_exit(peak_pci_exit);
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index a227586ddd52..8bc95982840f 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -609,15 +609,4 @@ static struct pci_driver plx_pci_driver = {
609 .remove = plx_pci_del_card, 609 .remove = plx_pci_del_card,
610}; 610};
611 611
612static int __init plx_pci_init(void) 612module_pci_driver(plx_pci_driver);
613{
614 return pci_register_driver(&plx_pci_driver);
615}
616
617static void __exit plx_pci_exit(void)
618{
619 pci_unregister_driver(&plx_pci_driver);
620}
621
622module_init(plx_pci_init);
623module_exit(plx_pci_exit);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 5234586dff15..629c4ba5d49d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -875,6 +875,7 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
875 PCAN_USBPRO_INFO_FW, 875 PCAN_USBPRO_INFO_FW,
876 &fi, sizeof(fi)); 876 &fi, sizeof(fi));
877 if (err) { 877 if (err) {
878 kfree(usb_if);
878 dev_err(dev->netdev->dev.parent, 879 dev_err(dev->netdev->dev.parent,
879 "unable to read %s firmware info (err %d)\n", 880 "unable to read %s firmware info (err %d)\n",
880 pcan_usb_pro.name, err); 881 pcan_usb_pro.name, err);
@@ -885,6 +886,7 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
885 PCAN_USBPRO_INFO_BL, 886 PCAN_USBPRO_INFO_BL,
886 &bi, sizeof(bi)); 887 &bi, sizeof(bi));
887 if (err) { 888 if (err) {
889 kfree(usb_if);
888 dev_err(dev->netdev->dev.parent, 890 dev_err(dev->netdev->dev.parent,
889 "unable to read %s bootloader info (err %d)\n", 891 "unable to read %s bootloader info (err %d)\n",
890 pcan_usb_pro.name, err); 892 pcan_usb_pro.name, err);
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index d5c6d92f1ee7..442d91a2747b 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -107,14 +107,14 @@ static int dummy_dev_init(struct net_device *dev)
107 return 0; 107 return 0;
108} 108}
109 109
110static void dummy_dev_free(struct net_device *dev) 110static void dummy_dev_uninit(struct net_device *dev)
111{ 111{
112 free_percpu(dev->dstats); 112 free_percpu(dev->dstats);
113 free_netdev(dev);
114} 113}
115 114
116static const struct net_device_ops dummy_netdev_ops = { 115static const struct net_device_ops dummy_netdev_ops = {
117 .ndo_init = dummy_dev_init, 116 .ndo_init = dummy_dev_init,
117 .ndo_uninit = dummy_dev_uninit,
118 .ndo_start_xmit = dummy_xmit, 118 .ndo_start_xmit = dummy_xmit,
119 .ndo_validate_addr = eth_validate_addr, 119 .ndo_validate_addr = eth_validate_addr,
120 .ndo_set_rx_mode = set_multicast_list, 120 .ndo_set_rx_mode = set_multicast_list,
@@ -128,7 +128,7 @@ static void dummy_setup(struct net_device *dev)
128 128
129 /* Initialize the device structure. */ 129 /* Initialize the device structure. */
130 dev->netdev_ops = &dummy_netdev_ops; 130 dev->netdev_ops = &dummy_netdev_ops;
131 dev->destructor = dummy_dev_free; 131 dev->destructor = free_netdev;
132 132
133 /* Fill in device structure with ethernet-generic values. */ 133 /* Fill in device structure with ethernet-generic values. */
134 dev->tx_queue_len = 0; 134 dev->tx_queue_len = 0;
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 41719da2e178..1a8eef2c3d58 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -69,7 +69,6 @@
69#define TX_TIMEOUT (400*HZ/1000) 69#define TX_TIMEOUT (400*HZ/1000)
70 70
71#include <linux/module.h> 71#include <linux/module.h>
72#include <linux/mca.h>
73#include <linux/isa.h> 72#include <linux/isa.h>
74#include <linux/pnp.h> 73#include <linux/pnp.h>
75#include <linux/string.h> 74#include <linux/string.h>
@@ -102,7 +101,7 @@ static int el3_debug = 2;
102#endif 101#endif
103 102
104/* Used to do a global count of all the cards in the system. Must be 103/* Used to do a global count of all the cards in the system. Must be
105 * a global variable so that the mca/eisa probe routines can increment 104 * a global variable so that the eisa probe routines can increment
106 * it */ 105 * it */
107static int el3_cards = 0; 106static int el3_cards = 0;
108#define EL3_MAX_CARDS 8 107#define EL3_MAX_CARDS 8
@@ -163,7 +162,7 @@ enum RxFilter {
163 */ 162 */
164#define SKB_QUEUE_SIZE 64 163#define SKB_QUEUE_SIZE 64
165 164
166enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_MCA, EL3_EISA }; 165enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_EISA };
167 166
168struct el3_private { 167struct el3_private {
169 spinlock_t lock; 168 spinlock_t lock;
@@ -505,41 +504,6 @@ static struct eisa_driver el3_eisa_driver = {
505static int eisa_registered; 504static int eisa_registered;
506#endif 505#endif
507 506
508#ifdef CONFIG_MCA
509static int el3_mca_probe(struct device *dev);
510
511static short el3_mca_adapter_ids[] __initdata = {
512 0x627c,
513 0x627d,
514 0x62db,
515 0x62f6,
516 0x62f7,
517 0x0000
518};
519
520static char *el3_mca_adapter_names[] __initdata = {
521 "3Com 3c529 EtherLink III (10base2)",
522 "3Com 3c529 EtherLink III (10baseT)",
523 "3Com 3c529 EtherLink III (test mode)",
524 "3Com 3c529 EtherLink III (TP or coax)",
525 "3Com 3c529 EtherLink III (TP)",
526 NULL
527};
528
529static struct mca_driver el3_mca_driver = {
530 .id_table = el3_mca_adapter_ids,
531 .driver = {
532 .name = "3c529",
533 .bus = &mca_bus_type,
534 .probe = el3_mca_probe,
535 .remove = __devexit_p(el3_device_remove),
536 .suspend = el3_suspend,
537 .resume = el3_resume,
538 },
539};
540static int mca_registered;
541#endif /* CONFIG_MCA */
542
543static const struct net_device_ops netdev_ops = { 507static const struct net_device_ops netdev_ops = {
544 .ndo_open = el3_open, 508 .ndo_open = el3_open,
545 .ndo_stop = el3_close, 509 .ndo_stop = el3_close,
@@ -600,76 +564,6 @@ static void el3_common_remove (struct net_device *dev)
600 free_netdev (dev); 564 free_netdev (dev);
601} 565}
602 566
603#ifdef CONFIG_MCA
604static int __init el3_mca_probe(struct device *device)
605{
606 /* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch,
607 * heavily modified by Chris Beauregard
608 * (cpbeaure@csclub.uwaterloo.ca) to support standard MCA
609 * probing.
610 *
611 * redone for multi-card detection by ZP Gu (zpg@castle.net)
612 * now works as a module */
613
614 short i;
615 int ioaddr, irq, if_port;
616 __be16 phys_addr[3];
617 struct net_device *dev = NULL;
618 u_char pos4, pos5;
619 struct mca_device *mdev = to_mca_device(device);
620 int slot = mdev->slot;
621 int err;
622
623 pos4 = mca_device_read_stored_pos(mdev, 4);
624 pos5 = mca_device_read_stored_pos(mdev, 5);
625
626 ioaddr = ((short)((pos4&0xfc)|0x02)) << 8;
627 irq = pos5 & 0x0f;
628
629
630 pr_info("3c529: found %s at slot %d\n",
631 el3_mca_adapter_names[mdev->index], slot + 1);
632
633 /* claim the slot */
634 strncpy(mdev->name, el3_mca_adapter_names[mdev->index],
635 sizeof(mdev->name));
636 mca_device_set_claim(mdev, 1);
637
638 if_port = pos4 & 0x03;
639
640 irq = mca_device_transform_irq(mdev, irq);
641 ioaddr = mca_device_transform_ioport(mdev, ioaddr);
642 if (el3_debug > 2) {
643 pr_debug("3c529: irq %d ioaddr 0x%x ifport %d\n", irq, ioaddr, if_port);
644 }
645 EL3WINDOW(0);
646 for (i = 0; i < 3; i++)
647 phys_addr[i] = htons(read_eeprom(ioaddr, i));
648
649 dev = alloc_etherdev(sizeof (struct el3_private));
650 if (dev == NULL) {
651 release_region(ioaddr, EL3_IO_EXTENT);
652 return -ENOMEM;
653 }
654
655 netdev_boot_setup_check(dev);
656
657 el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_MCA);
658 dev_set_drvdata(device, dev);
659 err = el3_common_init(dev);
660
661 if (err) {
662 dev_set_drvdata(device, NULL);
663 free_netdev(dev);
664 return -ENOMEM;
665 }
666
667 el3_devs[el3_cards++] = dev;
668 return 0;
669}
670
671#endif /* CONFIG_MCA */
672
673#ifdef CONFIG_EISA 567#ifdef CONFIG_EISA
674static int __init el3_eisa_probe (struct device *device) 568static int __init el3_eisa_probe (struct device *device)
675{ 569{
@@ -1547,11 +1441,6 @@ static int __init el3_init_module(void)
1547 if (!ret) 1441 if (!ret)
1548 eisa_registered = 1; 1442 eisa_registered = 1;
1549#endif 1443#endif
1550#ifdef CONFIG_MCA
1551 ret = mca_register_driver(&el3_mca_driver);
1552 if (!ret)
1553 mca_registered = 1;
1554#endif
1555 1444
1556#ifdef CONFIG_PNP 1445#ifdef CONFIG_PNP
1557 if (pnp_registered) 1446 if (pnp_registered)
@@ -1563,10 +1452,6 @@ static int __init el3_init_module(void)
1563 if (eisa_registered) 1452 if (eisa_registered)
1564 ret = 0; 1453 ret = 0;
1565#endif 1454#endif
1566#ifdef CONFIG_MCA
1567 if (mca_registered)
1568 ret = 0;
1569#endif
1570 return ret; 1455 return ret;
1571} 1456}
1572 1457
@@ -1584,10 +1469,6 @@ static void __exit el3_cleanup_module(void)
1584 if (eisa_registered) 1469 if (eisa_registered)
1585 eisa_driver_unregister(&el3_eisa_driver); 1470 eisa_driver_unregister(&el3_eisa_driver);
1586#endif 1471#endif
1587#ifdef CONFIG_MCA
1588 if (mca_registered)
1589 mca_unregister_driver(&el3_mca_driver);
1590#endif
1591} 1472}
1592 1473
1593module_init (el3_init_module); 1474module_init (el3_init_module);
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index e04ade444247..2e538676924d 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -60,6 +60,7 @@ config PCMCIA_AXNET
60config AX88796 60config AX88796
61 tristate "ASIX AX88796 NE2000 clone support" 61 tristate "ASIX AX88796 NE2000 clone support"
62 depends on (ARM || MIPS || SUPERH) 62 depends on (ARM || MIPS || SUPERH)
63 select CRC32
63 select PHYLIB 64 select PHYLIB
64 select MDIO_BITBANG 65 select MDIO_BITBANG
65 ---help--- 66 ---help---
@@ -181,18 +182,6 @@ config NE2000
181 To compile this driver as a module, choose M here. The module 182 To compile this driver as a module, choose M here. The module
182 will be called ne. 183 will be called ne.
183 184
184config NE2_MCA
185 tristate "NE/2 (ne2000 MCA version) support"
186 depends on MCA_LEGACY
187 select CRC32
188 ---help---
189 If you have a network (Ethernet) card of this type, say Y and read
190 the Ethernet-HOWTO, available from
191 <http://www.tldp.org/docs.html#howto>.
192
193 To compile this driver as a module, choose M here. The module
194 will be called ne2.
195
196config NE2K_PCI 185config NE2K_PCI
197 tristate "PCI NE2000 and clones support (see help)" 186 tristate "PCI NE2000 and clones support (see help)"
198 depends on PCI 187 depends on PCI
@@ -266,18 +255,6 @@ config STNIC
266 255
267 If unsure, say N. 256 If unsure, say N.
268 257
269config ULTRAMCA
270 tristate "SMC Ultra MCA support"
271 depends on MCA
272 select CRC32
273 ---help---
274 If you have a network (Ethernet) card of this type and are running
275 an MCA based system (PS/2), say Y and read the Ethernet-HOWTO,
276 available from <http://www.tldp.org/docs.html#howto>.
277
278 To compile this driver as a module, choose M here. The module
279 will be called smc-mca.
280
281config ULTRA 258config ULTRA
282 tristate "SMC Ultra support" 259 tristate "SMC Ultra support"
283 depends on ISA 260 depends on ISA
diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile
index 3337d7fb4344..d13790b7fd27 100644
--- a/drivers/net/ethernet/8390/Makefile
+++ b/drivers/net/ethernet/8390/Makefile
@@ -24,6 +24,5 @@ obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o
24obj-$(CONFIG_STNIC) += stnic.o 8390.o 24obj-$(CONFIG_STNIC) += stnic.o 8390.o
25obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o 25obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o
26obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o 26obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o
27obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o
28obj-$(CONFIG_WD80x3) += wd.o 8390.o 27obj-$(CONFIG_WD80x3) += wd.o 8390.o
29obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o 28obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 11476ca95e93..203ff9dccadb 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -501,6 +501,7 @@ static const struct ethtool_ops ax_ethtool_ops = {
501 .get_settings = ax_get_settings, 501 .get_settings = ax_get_settings,
502 .set_settings = ax_set_settings, 502 .set_settings = ax_set_settings,
503 .get_link = ethtool_op_get_link, 503 .get_link = ethtool_op_get_link,
504 .get_ts_info = ethtool_op_get_ts_info,
504}; 505};
505 506
506#ifdef CONFIG_AX88796_93CX6 507#ifdef CONFIG_AX88796_93CX6
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index dbefd5658c14..8322c54972f3 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -635,6 +635,7 @@ static const struct ethtool_ops etherh_ethtool_ops = {
635 .get_settings = etherh_get_settings, 635 .get_settings = etherh_get_settings,
636 .set_settings = etherh_set_settings, 636 .set_settings = etherh_set_settings,
637 .get_drvinfo = etherh_get_drvinfo, 637 .get_drvinfo = etherh_get_drvinfo,
638 .get_ts_info = ethtool_op_get_ts_info,
638}; 639};
639 640
640static const struct net_device_ops etherh_netdev_ops = { 641static const struct net_device_ops etherh_netdev_ops = {
diff --git a/drivers/net/ethernet/8390/ne2.c b/drivers/net/ethernet/8390/ne2.c
deleted file mode 100644
index ef85839f43d8..000000000000
--- a/drivers/net/ethernet/8390/ne2.c
+++ /dev/null
@@ -1,798 +0,0 @@
1/* ne2.c: A NE/2 Ethernet Driver for Linux. */
2/*
3 Based on the NE2000 driver written by Donald Becker (1992-94).
4 modified by Wim Dumon (Apr 1996)
5
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
8
9 The author may be reached as wimpie@linux.cc.kuleuven.ac.be
10
11 Currently supported: NE/2
12 This patch was never tested on other MCA-ethernet adapters, but it
13 might work. Just give it a try and let me know if you have problems.
14 Also mail me if it really works, please!
15
16 Changelog:
17 Mon Feb 3 16:26:02 MET 1997
18 - adapted the driver to work with the 2.1.25 kernel
19 - multiple ne2 support (untested)
20 - module support (untested)
21
22 Fri Aug 28 00:18:36 CET 1998 (David Weinehall)
23 - fixed a few minor typos
24 - made the MODULE_PARM conditional (it only works with the v2.1.x kernels)
25 - fixed the module support (Now it's working...)
26
27 Mon Sep 7 19:01:44 CET 1998 (David Weinehall)
28 - added support for Arco Electronics AE/2-card (experimental)
29
30 Mon Sep 14 09:53:42 CET 1998 (David Weinehall)
31 - added support for Compex ENET-16MC/P (experimental)
32
33 Tue Sep 15 16:21:12 CET 1998 (David Weinehall, Magnus Jonsson, Tomas Ogren)
34 - Miscellaneous bugfixes
35
36 Tue Sep 19 16:21:12 CET 1998 (Magnus Jonsson)
37 - Cleanup
38
39 Wed Sep 23 14:33:34 CET 1998 (David Weinehall)
40 - Restructuring and rewriting for v2.1.x compliance
41
42 Wed Oct 14 17:19:21 CET 1998 (David Weinehall)
43 - Added code that unregisters irq and proc-info
44 - Version# bump
45
46 Mon Nov 16 15:28:23 CET 1998 (Wim Dumon)
47 - pass 'dev' as last parameter of request_irq in stead of 'NULL'
48
49 Wed Feb 7 21:24:00 CET 2001 (Alfred Arnold)
50 - added support for the D-Link DE-320CT
51
52 * WARNING
53 -------
54 This is alpha-test software. It is not guaranteed to work. As a
55 matter of fact, I'm quite sure there are *LOTS* of bugs in here. I
56 would like to hear from you if you use this driver, even if it works.
57 If it doesn't work, be sure to send me a mail with the problems !
58*/
59
60static const char *version = "ne2.c:v0.91 Nov 16 1998 Wim Dumon <wimpie@kotnet.org>\n";
61
62#include <linux/module.h>
63#include <linux/kernel.h>
64#include <linux/types.h>
65#include <linux/fcntl.h>
66#include <linux/interrupt.h>
67#include <linux/ioport.h>
68#include <linux/in.h>
69#include <linux/string.h>
70#include <linux/errno.h>
71#include <linux/init.h>
72#include <linux/mca-legacy.h>
73#include <linux/netdevice.h>
74#include <linux/etherdevice.h>
75#include <linux/skbuff.h>
76#include <linux/bitops.h>
77#include <linux/jiffies.h>
78
79#include <asm/io.h>
80#include <asm/dma.h>
81
82#include "8390.h"
83
84#define DRV_NAME "ne2"
85
86/* Some defines that people can play with if so inclined. */
87
88/* Do we perform extra sanity checks on stuff ? */
89/* #define NE_SANITY_CHECK */
90
91/* Do we implement the read before write bugfix ? */
92/* #define NE_RW_BUGFIX */
93
94/* Do we have a non std. amount of memory? (in units of 256 byte pages) */
95/* #define PACKETBUF_MEMSIZE 0x40 */
96
97
98/* ---- No user-serviceable parts below ---- */
99
100#define NE_BASE (dev->base_addr)
101#define NE_CMD 0x00
102#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */
103#define NE_RESET 0x20 /* Issue a read to reset, a write to clear. */
104#define NE_IO_EXTENT 0x30
105
106#define NE1SM_START_PG 0x20 /* First page of TX buffer */
107#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
108#define NESM_START_PG 0x40 /* First page of TX buffer */
109#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
110
111/* From the .ADF file: */
112static unsigned int addresses[7] __initdata =
113 {0x1000, 0x2020, 0x8020, 0xa0a0, 0xb0b0, 0xc0c0, 0xc3d0};
114static int irqs[4] __initdata = {3, 4, 5, 9};
115
116/* From the D-Link ADF file: */
117static unsigned int dlink_addresses[4] __initdata =
118 {0x300, 0x320, 0x340, 0x360};
119static int dlink_irqs[8] __initdata = {3, 4, 5, 9, 10, 11, 14, 15};
120
121struct ne2_adapters_t {
122 unsigned int id;
123 char *name;
124};
125
126static struct ne2_adapters_t ne2_adapters[] __initdata = {
127 { 0x6354, "Arco Ethernet Adapter AE/2" },
128 { 0x70DE, "Compex ENET-16 MC/P" },
129 { 0x7154, "Novell Ethernet Adapter NE/2" },
130 { 0x56ea, "D-Link DE-320CT" },
131 { 0x0000, NULL }
132};
133
134extern int netcard_probe(struct net_device *dev);
135
136static int ne2_probe1(struct net_device *dev, int slot);
137
138static void ne_reset_8390(struct net_device *dev);
139static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
140 int ring_page);
141static void ne_block_input(struct net_device *dev, int count,
142 struct sk_buff *skb, int ring_offset);
143static void ne_block_output(struct net_device *dev, const int count,
144 const unsigned char *buf, const int start_page);
145
146
147/*
148 * special code to read the DE-320's MAC address EEPROM. In contrast to a
149 * standard NE design, this is a serial EEPROM (93C46) that has to be read
150 * bit by bit. The EEPROM cotrol port at base + 0x1e has the following
151 * layout:
152 *
153 * Bit 0 = Data out (read from EEPROM)
154 * Bit 1 = Data in (write to EEPROM)
155 * Bit 2 = Clock
156 * Bit 3 = Chip Select
157 * Bit 7 = ~50 kHz clock for defined delays
158 *
159 */
160
161static void __init dlink_put_eeprom(unsigned char value, unsigned int addr)
162{
163 int z;
164 unsigned char v1, v2;
165
166 /* write the value to the NIC EEPROM register */
167
168 outb(value, addr + 0x1e);
169
170 /* now wait the clock line to toggle twice. Effectively, we are
171 waiting (at least) for one clock cycle */
172
173 for (z = 0; z < 2; z++) {
174 do {
175 v1 = inb(addr + 0x1e);
176 v2 = inb(addr + 0x1e);
177 }
178 while (!((v1 ^ v2) & 0x80));
179 }
180}
181
182static void __init dlink_send_eeprom_bit(unsigned int bit, unsigned int addr)
183{
184 /* shift data bit into correct position */
185
186 bit = bit << 1;
187
188 /* write value, keep clock line high for two cycles */
189
190 dlink_put_eeprom(0x09 | bit, addr);
191 dlink_put_eeprom(0x0d | bit, addr);
192 dlink_put_eeprom(0x0d | bit, addr);
193 dlink_put_eeprom(0x09 | bit, addr);
194}
195
196static void __init dlink_send_eeprom_word(unsigned int value, unsigned int len, unsigned int addr)
197{
198 int z;
199
200 /* adjust bits so that they are left-aligned in a 16-bit-word */
201
202 value = value << (16 - len);
203
204 /* shift bits out to the EEPROM */
205
206 for (z = 0; z < len; z++) {
207 dlink_send_eeprom_bit((value & 0x8000) >> 15, addr);
208 value = value << 1;
209 }
210}
211
212static unsigned int __init dlink_get_eeprom(unsigned int eeaddr, unsigned int addr)
213{
214 int z;
215 unsigned int value = 0;
216
217 /* pull the CS line low for a moment. This resets the EEPROM-
218 internal logic, and makes it ready for a new command. */
219
220 dlink_put_eeprom(0x01, addr);
221 dlink_put_eeprom(0x09, addr);
222
223 /* send one start bit, read command (1 - 0), plus the address to
224 the EEPROM */
225
226 dlink_send_eeprom_word(0x0180 | (eeaddr & 0x3f), 9, addr);
227
228 /* get the data word. We clock by sending 0s to the EEPROM, which
229 get ignored during the read process */
230
231 for (z = 0; z < 16; z++) {
232 dlink_send_eeprom_bit(0, addr);
233 value = (value << 1) | (inb(addr + 0x1e) & 0x01);
234 }
235
236 return value;
237}
238
239/*
240 * Note that at boot, this probe only picks up one card at a time.
241 */
242
243static int __init do_ne2_probe(struct net_device *dev)
244{
245 static int current_mca_slot = -1;
246 int i;
247 int adapter_found = 0;
248
249 /* Do not check any supplied i/o locations.
250 POS registers usually don't fail :) */
251
252 /* MCA cards have POS registers.
253 Autodetecting MCA cards is extremely simple.
254 Just search for the card. */
255
256 for(i = 0; (ne2_adapters[i].name != NULL) && !adapter_found; i++) {
257 current_mca_slot =
258 mca_find_unused_adapter(ne2_adapters[i].id, 0);
259
260 if((current_mca_slot != MCA_NOTFOUND) && !adapter_found) {
261 int res;
262 mca_set_adapter_name(current_mca_slot,
263 ne2_adapters[i].name);
264 mca_mark_as_used(current_mca_slot);
265
266 res = ne2_probe1(dev, current_mca_slot);
267 if (res)
268 mca_mark_as_unused(current_mca_slot);
269 return res;
270 }
271 }
272 return -ENODEV;
273}
274
275#ifndef MODULE
276struct net_device * __init ne2_probe(int unit)
277{
278 struct net_device *dev = alloc_eip_netdev();
279 int err;
280
281 if (!dev)
282 return ERR_PTR(-ENOMEM);
283
284 sprintf(dev->name, "eth%d", unit);
285 netdev_boot_setup_check(dev);
286
287 err = do_ne2_probe(dev);
288 if (err)
289 goto out;
290 return dev;
291out:
292 free_netdev(dev);
293 return ERR_PTR(err);
294}
295#endif
296
297static int ne2_procinfo(char *buf, int slot, struct net_device *dev)
298{
299 int len=0;
300
301 len += sprintf(buf+len, "The NE/2 Ethernet Adapter\n" );
302 len += sprintf(buf+len, "Driver written by Wim Dumon ");
303 len += sprintf(buf+len, "<wimpie@kotnet.org>\n");
304 len += sprintf(buf+len, "Modified by ");
305 len += sprintf(buf+len, "David Weinehall <tao@acc.umu.se>\n");
306 len += sprintf(buf+len, "and by Magnus Jonsson <bigfoot@acc.umu.se>\n");
307 len += sprintf(buf+len, "Based on the original NE2000 drivers\n" );
308 len += sprintf(buf+len, "Base IO: %#x\n", (unsigned int)dev->base_addr);
309 len += sprintf(buf+len, "IRQ : %d\n", dev->irq);
310 len += sprintf(buf+len, "HW addr : %pM\n", dev->dev_addr);
311
312 return len;
313}
314
315static int __init ne2_probe1(struct net_device *dev, int slot)
316{
317 int i, base_addr, irq, retval;
318 unsigned char POS;
319 unsigned char SA_prom[32];
320 const char *name = "NE/2";
321 int start_page, stop_page;
322 static unsigned version_printed;
323
324 if (ei_debug && version_printed++ == 0)
325 printk(version);
326
327 printk("NE/2 ethercard found in slot %d:", slot);
328
329 /* Read base IO and IRQ from the POS-registers */
330 POS = mca_read_stored_pos(slot, 2);
331 if(!(POS % 2)) {
332 printk(" disabled.\n");
333 return -ENODEV;
334 }
335
336 /* handle different POS register structure for D-Link card */
337
338 if (mca_read_stored_pos(slot, 0) == 0xea) {
339 base_addr = dlink_addresses[(POS >> 5) & 0x03];
340 irq = dlink_irqs[(POS >> 2) & 0x07];
341 }
342 else {
343 i = (POS & 0xE)>>1;
344 /* printk("Halleluja sdog, als er na de pijl een 1 staat is 1 - 1 == 0"
345 " en zou het moeten werken -> %d\n", i);
346 The above line was for remote testing, thanx to sdog ... */
347 base_addr = addresses[i - 1];
348 irq = irqs[(POS & 0x60)>>5];
349 }
350
351 if (!request_region(base_addr, NE_IO_EXTENT, DRV_NAME))
352 return -EBUSY;
353
354#ifdef DEBUG
355 printk("POS info : pos 2 = %#x ; base = %#x ; irq = %ld\n", POS,
356 base_addr, irq);
357#endif
358
359#ifndef CRYNWR_WAY
360 /* Reset the card the way they do it in the Crynwr packet driver */
361 for (i=0; i<8; i++)
362 outb(0x0, base_addr + NE_RESET);
363 inb(base_addr + NE_RESET);
364 outb(0x21, base_addr + NE_CMD);
365 if (inb(base_addr + NE_CMD) != 0x21) {
366 printk("NE/2 adapter not responding\n");
367 retval = -ENODEV;
368 goto out;
369 }
370
371 /* In the crynwr sources they do a RAM-test here. I skip it. I suppose
372 my RAM is okay. Suppose your memory is broken. Then this test
373 should fail and you won't be able to use your card. But if I do not
374 test, you won't be able to use your card, neither. So this test
375 won't help you. */
376
377#else /* _I_ never tested it this way .. Go ahead and try ...*/
378 /* Reset card. Who knows what dain-bramaged state it was left in. */
379 {
380 unsigned long reset_start_time = jiffies;
381
382 /* DON'T change these to inb_p/outb_p or reset will fail on
383 clones.. */
384 outb(inb(base_addr + NE_RESET), base_addr + NE_RESET);
385
386 while ((inb_p(base_addr + EN0_ISR) & ENISR_RESET) == 0)
387 if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
388 printk(" not found (no reset ack).\n");
389 retval = -ENODEV;
390 goto out;
391 }
392
393 outb_p(0xff, base_addr + EN0_ISR); /* Ack all intr. */
394 }
395#endif
396
397
398 /* Read the 16 bytes of station address PROM.
399 We must first initialize registers, similar to
400 NS8390p_init(eifdev, 0).
401 We can't reliably read the SAPROM address without this.
402 (I learned the hard way!). */
403 {
404 struct {
405 unsigned char value, offset;
406 } program_seq[] = {
407 /* Select page 0 */
408 {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD},
409 {0x49, EN0_DCFG}, /* Set WORD-wide (0x49) access. */
410 {0x00, EN0_RCNTLO}, /* Clear the count regs. */
411 {0x00, EN0_RCNTHI},
412 {0x00, EN0_IMR}, /* Mask completion irq. */
413 {0xFF, EN0_ISR},
414 {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
415 {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
416 {32, EN0_RCNTLO},
417 {0x00, EN0_RCNTHI},
418 {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
419 {0x00, EN0_RSARHI},
420 {E8390_RREAD+E8390_START, E8390_CMD},
421 };
422
423 for (i = 0; i < ARRAY_SIZE(program_seq); i++)
424 outb_p(program_seq[i].value, base_addr +
425 program_seq[i].offset);
426
427 }
428 for(i = 0; i < 6 /*sizeof(SA_prom)*/; i+=1) {
429 SA_prom[i] = inb(base_addr + NE_DATAPORT);
430 }
431
432 /* I don't know whether the previous sequence includes the general
433 board reset procedure, so better don't omit it and just overwrite
434 the garbage read from a DE-320 with correct stuff. */
435
436 if (mca_read_stored_pos(slot, 0) == 0xea) {
437 unsigned int v;
438
439 for (i = 0; i < 3; i++) {
440 v = dlink_get_eeprom(i, base_addr);
441 SA_prom[(i << 1) ] = v & 0xff;
442 SA_prom[(i << 1) + 1] = (v >> 8) & 0xff;
443 }
444 }
445
446 start_page = NESM_START_PG;
447 stop_page = NESM_STOP_PG;
448
449 dev->irq=irq;
450
451 /* Snarf the interrupt now. There's no point in waiting since we cannot
452 share and the board will usually be enabled. */
453 retval = request_irq(dev->irq, eip_interrupt, 0, DRV_NAME, dev);
454 if (retval) {
455 printk (" unable to get IRQ %d (irqval=%d).\n",
456 dev->irq, retval);
457 goto out;
458 }
459
460 dev->base_addr = base_addr;
461
462 for (i = 0; i < ETH_ALEN; i++)
463 dev->dev_addr[i] = SA_prom[i];
464
465 printk(" %pM\n", dev->dev_addr);
466
467 printk("%s: %s found at %#x, using IRQ %d.\n",
468 dev->name, name, base_addr, dev->irq);
469
470 mca_set_adapter_procfn(slot, (MCA_ProcFn) ne2_procinfo, dev);
471
472 ei_status.name = name;
473 ei_status.tx_start_page = start_page;
474 ei_status.stop_page = stop_page;
475 ei_status.word16 = (2 == 2);
476
477 ei_status.rx_start_page = start_page + TX_PAGES;
478#ifdef PACKETBUF_MEMSIZE
479 /* Allow the packet buffer size to be overridden by know-it-alls. */
480 ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
481#endif
482
483 ei_status.reset_8390 = &ne_reset_8390;
484 ei_status.block_input = &ne_block_input;
485 ei_status.block_output = &ne_block_output;
486 ei_status.get_8390_hdr = &ne_get_8390_hdr;
487
488 ei_status.priv = slot;
489
490 dev->netdev_ops = &eip_netdev_ops;
491 NS8390p_init(dev, 0);
492
493 retval = register_netdev(dev);
494 if (retval)
495 goto out1;
496 return 0;
497out1:
498 mca_set_adapter_procfn( ei_status.priv, NULL, NULL);
499 free_irq(dev->irq, dev);
500out:
501 release_region(base_addr, NE_IO_EXTENT);
502 return retval;
503}
504
505/* Hard reset the card. This used to pause for the same period that a
506 8390 reset command required, but that shouldn't be necessary. */
507static void ne_reset_8390(struct net_device *dev)
508{
509 unsigned long reset_start_time = jiffies;
510
511 if (ei_debug > 1)
512 printk("resetting the 8390 t=%ld...", jiffies);
513
514 /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
515 outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
516
517 ei_status.txing = 0;
518 ei_status.dmaing = 0;
519
520 /* This check _should_not_ be necessary, omit eventually. */
521 while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
522 if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
523 printk("%s: ne_reset_8390() did not complete.\n",
524 dev->name);
525 break;
526 }
527 outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
528}
529
530/* Grab the 8390 specific header. Similar to the block_input routine, but
531 we don't need to be concerned with ring wrap as the header will be at
532 the start of a page, so we optimize accordingly. */
533
534static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
535 int ring_page)
536{
537
538 int nic_base = dev->base_addr;
539
540 /* This *shouldn't* happen.
541 If it does, it's the last thing you'll see */
542 if (ei_status.dmaing) {
543 printk("%s: DMAing conflict in ne_get_8390_hdr "
544 "[DMAstat:%d][irqlock:%d].\n",
545 dev->name, ei_status.dmaing, ei_status.irqlock);
546 return;
547 }
548
549 ei_status.dmaing |= 0x01;
550 outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
551 outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
552 outb_p(0, nic_base + EN0_RCNTHI);
553 outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
554 outb_p(ring_page, nic_base + EN0_RSARHI);
555 outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
556
557 if (ei_status.word16)
558 insw(NE_BASE + NE_DATAPORT, hdr,
559 sizeof(struct e8390_pkt_hdr)>>1);
560 else
561 insb(NE_BASE + NE_DATAPORT, hdr,
562 sizeof(struct e8390_pkt_hdr));
563
564 outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
565 ei_status.dmaing &= ~0x01;
566}
567
568/* Block input and output, similar to the Crynwr packet driver. If you
569 are porting to a new ethercard, look at the packet driver source for
570 hints. The NEx000 doesn't share the on-board packet memory -- you have
571 to put the packet out through the "remote DMA" dataport using outb. */
572
573static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb,
574 int ring_offset)
575{
576#ifdef NE_SANITY_CHECK
577 int xfer_count = count;
578#endif
579 int nic_base = dev->base_addr;
580 char *buf = skb->data;
581
582 /* This *shouldn't* happen.
583 If it does, it's the last thing you'll see */
584 if (ei_status.dmaing) {
585 printk("%s: DMAing conflict in ne_block_input "
586 "[DMAstat:%d][irqlock:%d].\n",
587 dev->name, ei_status.dmaing, ei_status.irqlock);
588 return;
589 }
590 ei_status.dmaing |= 0x01;
591 outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
592 outb_p(count & 0xff, nic_base + EN0_RCNTLO);
593 outb_p(count >> 8, nic_base + EN0_RCNTHI);
594 outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
595 outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
596 outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
597 if (ei_status.word16) {
598 insw(NE_BASE + NE_DATAPORT,buf,count>>1);
599 if (count & 0x01) {
600 buf[count-1] = inb(NE_BASE + NE_DATAPORT);
601#ifdef NE_SANITY_CHECK
602 xfer_count++;
603#endif
604 }
605 } else {
606 insb(NE_BASE + NE_DATAPORT, buf, count);
607 }
608
609#ifdef NE_SANITY_CHECK
610 /* This was for the ALPHA version only, but enough people have
611 been encountering problems so it is still here. If you see
612 this message you either 1) have a slightly incompatible clone
613 or 2) have noise/speed problems with your bus. */
614 if (ei_debug > 1) { /* DMA termination address check... */
615 int addr, tries = 20;
616 do {
617 /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
618 -- it's broken for Rx on some cards! */
619 int high = inb_p(nic_base + EN0_RSARHI);
620 int low = inb_p(nic_base + EN0_RSARLO);
621 addr = (high << 8) + low;
622 if (((ring_offset + xfer_count) & 0xff) == low)
623 break;
624 } while (--tries > 0);
625 if (tries <= 0)
626 printk("%s: RX transfer address mismatch,"
627 "%#4.4x (expected) vs. %#4.4x (actual).\n",
628 dev->name, ring_offset + xfer_count, addr);
629 }
630#endif
631 outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
632 ei_status.dmaing &= ~0x01;
633}
634
635static void ne_block_output(struct net_device *dev, int count,
636 const unsigned char *buf, const int start_page)
637{
638 int nic_base = NE_BASE;
639 unsigned long dma_start;
640#ifdef NE_SANITY_CHECK
641 int retries = 0;
642#endif
643
644 /* Round the count up for word writes. Do we need to do this?
645 What effect will an odd byte count have on the 8390?
646 I should check someday. */
647 if (ei_status.word16 && (count & 0x01))
648 count++;
649
650 /* This *shouldn't* happen.
651 If it does, it's the last thing you'll see */
652 if (ei_status.dmaing) {
653 printk("%s: DMAing conflict in ne_block_output."
654 "[DMAstat:%d][irqlock:%d]\n",
655 dev->name, ei_status.dmaing, ei_status.irqlock);
656 return;
657 }
658 ei_status.dmaing |= 0x01;
659 /* We should already be in page 0, but to be safe... */
660 outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
661
662#ifdef NE_SANITY_CHECK
663retry:
664#endif
665
666#ifdef NE8390_RW_BUGFIX
667 /* Handle the read-before-write bug the same way as the
668 Crynwr packet driver -- the NatSemi method doesn't work.
669 Actually this doesn't always work either, but if you have
670 problems with your NEx000 this is better than nothing! */
671 outb_p(0x42, nic_base + EN0_RCNTLO);
672 outb_p(0x00, nic_base + EN0_RCNTHI);
673 outb_p(0x42, nic_base + EN0_RSARLO);
674 outb_p(0x00, nic_base + EN0_RSARHI);
675 outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
676 /* Make certain that the dummy read has occurred. */
677 SLOW_DOWN_IO;
678 SLOW_DOWN_IO;
679 SLOW_DOWN_IO;
680#endif
681
682 outb_p(ENISR_RDC, nic_base + EN0_ISR);
683
684 /* Now the normal output. */
685 outb_p(count & 0xff, nic_base + EN0_RCNTLO);
686 outb_p(count >> 8, nic_base + EN0_RCNTHI);
687 outb_p(0x00, nic_base + EN0_RSARLO);
688 outb_p(start_page, nic_base + EN0_RSARHI);
689
690 outb_p(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
691 if (ei_status.word16) {
692 outsw(NE_BASE + NE_DATAPORT, buf, count>>1);
693 } else {
694 outsb(NE_BASE + NE_DATAPORT, buf, count);
695 }
696
697 dma_start = jiffies;
698
699#ifdef NE_SANITY_CHECK
700 /* This was for the ALPHA version only, but enough people have
701 been encountering problems so it is still here. */
702
703 if (ei_debug > 1) { /* DMA termination address check... */
704 int addr, tries = 20;
705 do {
706 int high = inb_p(nic_base + EN0_RSARHI);
707 int low = inb_p(nic_base + EN0_RSARLO);
708 addr = (high << 8) + low;
709 if ((start_page << 8) + count == addr)
710 break;
711 } while (--tries > 0);
712 if (tries <= 0) {
713 printk("%s: Tx packet transfer address mismatch,"
714 "%#4.4x (expected) vs. %#4.4x (actual).\n",
715 dev->name, (start_page << 8) + count, addr);
716 if (retries++ == 0)
717 goto retry;
718 }
719 }
720#endif
721
722 while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
723 if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
724 printk("%s: timeout waiting for Tx RDC.\n", dev->name);
725 ne_reset_8390(dev);
726 NS8390p_init(dev, 1);
727 break;
728 }
729
730 outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
731 ei_status.dmaing &= ~0x01;
732}
733
734
735#ifdef MODULE
736#define MAX_NE_CARDS 4 /* Max number of NE cards per module */
737static struct net_device *dev_ne[MAX_NE_CARDS];
738static int io[MAX_NE_CARDS];
739static int irq[MAX_NE_CARDS];
740static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */
741MODULE_LICENSE("GPL");
742
743module_param_array(io, int, NULL, 0);
744module_param_array(irq, int, NULL, 0);
745module_param_array(bad, int, NULL, 0);
746MODULE_PARM_DESC(io, "(ignored)");
747MODULE_PARM_DESC(irq, "(ignored)");
748MODULE_PARM_DESC(bad, "(ignored)");
749
750/* Module code fixed by David Weinehall */
751
752int __init init_module(void)
753{
754 struct net_device *dev;
755 int this_dev, found = 0;
756
757 for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
758 dev = alloc_eip_netdev();
759 if (!dev)
760 break;
761 dev->irq = irq[this_dev];
762 dev->mem_end = bad[this_dev];
763 dev->base_addr = io[this_dev];
764 if (do_ne2_probe(dev) == 0) {
765 dev_ne[found++] = dev;
766 continue;
767 }
768 free_netdev(dev);
769 break;
770 }
771 if (found)
772 return 0;
773 printk(KERN_WARNING "ne2.c: No NE/2 card found\n");
774 return -ENXIO;
775}
776
777static void cleanup_card(struct net_device *dev)
778{
779 mca_mark_as_unused(ei_status.priv);
780 mca_set_adapter_procfn( ei_status.priv, NULL, NULL);
781 free_irq(dev->irq, dev);
782 release_region(dev->base_addr, NE_IO_EXTENT);
783}
784
785void __exit cleanup_module(void)
786{
787 int this_dev;
788
789 for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
790 struct net_device *dev = dev_ne[this_dev];
791 if (dev) {
792 unregister_netdev(dev);
793 cleanup_card(dev);
794 free_netdev(dev);
795 }
796 }
797}
798#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/smc-mca.c b/drivers/net/ethernet/8390/smc-mca.c
deleted file mode 100644
index 7a68590f2804..000000000000
--- a/drivers/net/ethernet/8390/smc-mca.c
+++ /dev/null
@@ -1,575 +0,0 @@
1/* smc-mca.c: A SMC Ultra ethernet driver for linux. */
2/*
3 Most of this driver, except for ultramca_probe is nearly
4 verbatim from smc-ultra.c by Donald Becker. The rest is
5 written and copyright 1996 by David Weis, weisd3458@uni.edu
6
7 This is a driver for the SMC Ultra and SMC EtherEZ ethercards.
8
9 This driver uses the cards in the 8390-compatible, shared memory mode.
10 Most of the run-time complexity is handled by the generic code in
11 8390.c.
12
13 This driver enables the shared memory only when doing the actual data
14 transfers to avoid a bug in early version of the card that corrupted
15 data transferred by a AHA1542.
16
17 This driver does not support the programmed-I/O data transfer mode of
18 the EtherEZ. That support (if available) is smc-ez.c. Nor does it
19 use the non-8390-compatible "Altego" mode. (No support currently planned.)
20
21 Changelog:
22
23 Paul Gortmaker : multiple card support for module users.
24 David Weis : Micro Channel-ized it.
25 Tom Sightler : Added support for IBM PS/2 Ethernet Adapter/A
26 Christopher Turcksin : Changed MCA-probe so that multiple adapters are
27 found correctly (Jul 16, 1997)
28 Chris Beauregard : Tried to merge the two changes above (Dec 15, 1997)
29 Tom Sightler : Fixed minor detection bug caused by above merge
30 Tom Sightler : Added support for three more Western Digital
31 MCA-adapters
32 Tom Sightler : Added support for 2.2.x mca_find_unused_adapter
33 Hartmut Schmidt : - Modified parameter detection to handle each
34 card differently depending on a switch-list
35 - 'card_ver' removed from the adapter list
36 - Some minor bug fixes
37*/
38
39#include <linux/mca.h>
40#include <linux/module.h>
41#include <linux/kernel.h>
42#include <linux/errno.h>
43#include <linux/string.h>
44#include <linux/init.h>
45#include <linux/interrupt.h>
46#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48
49#include <asm/io.h>
50
51#include "8390.h"
52
53#define DRV_NAME "smc-mca"
54
55static int ultramca_open(struct net_device *dev);
56static void ultramca_reset_8390(struct net_device *dev);
57static void ultramca_get_8390_hdr(struct net_device *dev,
58 struct e8390_pkt_hdr *hdr,
59 int ring_page);
60static void ultramca_block_input(struct net_device *dev, int count,
61 struct sk_buff *skb,
62 int ring_offset);
63static void ultramca_block_output(struct net_device *dev, int count,
64 const unsigned char *buf,
65 const int start_page);
66static int ultramca_close_card(struct net_device *dev);
67
68#define START_PG 0x00 /* First page of TX buffer */
69
70#define ULTRA_CMDREG 0 /* Offset to ASIC command register. */
71#define ULTRA_RESET 0x80 /* Board reset, in ULTRA_CMDREG. */
72#define ULTRA_MEMENB 0x40 /* Enable the shared memory. */
73#define ULTRA_NIC_OFFSET 16 /* NIC register offset from the base_addr. */
74#define ULTRA_IO_EXTENT 32
75#define EN0_ERWCNT 0x08 /* Early receive warning count. */
76
77#define _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A 0
78#define _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A 1
79#define _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A 2
80#define _6fc1_WD_Starcard_PLUS_A_WD8003ST_A 3
81#define _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A 4
82#define _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A 5
83#define _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A 6
84#define _efe5_IBM_PS2_Adapter_A_for_Ethernet 7
85
86struct smc_mca_adapters_t {
87 unsigned int id;
88 char *name;
89};
90
91#define MAX_ULTRAMCA_CARDS 4 /* Max number of Ultra cards per module */
92
93static int ultra_io[MAX_ULTRAMCA_CARDS];
94static int ultra_irq[MAX_ULTRAMCA_CARDS];
95MODULE_LICENSE("GPL");
96
97module_param_array(ultra_io, int, NULL, 0);
98module_param_array(ultra_irq, int, NULL, 0);
99MODULE_PARM_DESC(ultra_io, "SMC Ultra/EtherEZ MCA I/O base address(es)");
100MODULE_PARM_DESC(ultra_irq, "SMC Ultra/EtherEZ MCA IRQ number(s)");
101
102static const struct {
103 unsigned int base_addr;
104} addr_table[] = {
105 { 0x0800 },
106 { 0x1800 },
107 { 0x2800 },
108 { 0x3800 },
109 { 0x4800 },
110 { 0x5800 },
111 { 0x6800 },
112 { 0x7800 },
113 { 0x8800 },
114 { 0x9800 },
115 { 0xa800 },
116 { 0xb800 },
117 { 0xc800 },
118 { 0xd800 },
119 { 0xe800 },
120 { 0xf800 }
121};
122
123#define MEM_MASK 64
124
125static const struct {
126 unsigned char mem_index;
127 unsigned long mem_start;
128 unsigned char num_pages;
129} mem_table[] = {
130 { 16, 0x0c0000, 40 },
131 { 18, 0x0c4000, 40 },
132 { 20, 0x0c8000, 40 },
133 { 22, 0x0cc000, 40 },
134 { 24, 0x0d0000, 40 },
135 { 26, 0x0d4000, 40 },
136 { 28, 0x0d8000, 40 },
137 { 30, 0x0dc000, 40 },
138 {144, 0xfc0000, 40 },
139 {148, 0xfc8000, 40 },
140 {154, 0xfd0000, 40 },
141 {156, 0xfd8000, 40 },
142 { 0, 0x0c0000, 20 },
143 { 1, 0x0c2000, 20 },
144 { 2, 0x0c4000, 20 },
145 { 3, 0x0c6000, 20 }
146};
147
148#define IRQ_MASK 243
149static const struct {
150 unsigned char new_irq;
151 unsigned char old_irq;
152} irq_table[] = {
153 { 3, 3 },
154 { 4, 4 },
155 { 10, 10 },
156 { 14, 15 }
157};
158
159static short smc_mca_adapter_ids[] __initdata = {
160 0x61c8,
161 0x61c9,
162 0x6fc0,
163 0x6fc1,
164 0x6fc2,
165 0xefd4,
166 0xefd5,
167 0xefe5,
168 0x0000
169};
170
171static char *smc_mca_adapter_names[] __initdata = {
172 "SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)",
173 "SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)",
174 "WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)",
175 "WD Starcard PLUS/A (WD8003ST/A)",
176 "WD Ethercard PLUS 10T/A (WD8003W/A)",
177 "IBM PS/2 Adapter/A for Ethernet UTP/AUI (WD8013WP/A)",
178 "IBM PS/2 Adapter/A for Ethernet BNC/AUI (WD8013EP/A)",
179 "IBM PS/2 Adapter/A for Ethernet",
180 NULL
181};
182
183static int ultra_found = 0;
184
185
186static const struct net_device_ops ultramca_netdev_ops = {
187 .ndo_open = ultramca_open,
188 .ndo_stop = ultramca_close_card,
189
190 .ndo_start_xmit = ei_start_xmit,
191 .ndo_tx_timeout = ei_tx_timeout,
192 .ndo_get_stats = ei_get_stats,
193 .ndo_set_rx_mode = ei_set_multicast_list,
194 .ndo_validate_addr = eth_validate_addr,
195 .ndo_set_mac_address = eth_mac_addr,
196 .ndo_change_mtu = eth_change_mtu,
197#ifdef CONFIG_NET_POLL_CONTROLLER
198 .ndo_poll_controller = ei_poll,
199#endif
200};
201
202static int __init ultramca_probe(struct device *gen_dev)
203{
204 unsigned short ioaddr;
205 struct net_device *dev;
206 unsigned char reg4, num_pages;
207 struct mca_device *mca_dev = to_mca_device(gen_dev);
208 char slot = mca_dev->slot;
209 unsigned char pos2 = 0xff, pos3 = 0xff, pos4 = 0xff, pos5 = 0xff;
210 int i, rc;
211 int adapter = mca_dev->index;
212 int tbase = 0;
213 int tirq = 0;
214 int base_addr = ultra_io[ultra_found];
215 int irq = ultra_irq[ultra_found];
216
217 if (base_addr || irq) {
218 printk(KERN_INFO "Probing for SMC MCA adapter");
219 if (base_addr) {
220 printk(KERN_INFO " at I/O address 0x%04x%c",
221 base_addr, irq ? ' ' : '\n');
222 }
223 if (irq) {
224 printk(KERN_INFO "using irq %d\n", irq);
225 }
226 }
227
228 tirq = 0;
229 tbase = 0;
230
231 /* If we're trying to match a specificied irq or io address,
232 * we'll reject the adapter found unless it's the one we're
233 * looking for */
234
235 pos2 = mca_device_read_stored_pos(mca_dev, 2); /* io_addr */
236 pos3 = mca_device_read_stored_pos(mca_dev, 3); /* shared mem */
237 pos4 = mca_device_read_stored_pos(mca_dev, 4); /* ROM bios addr range */
238 pos5 = mca_device_read_stored_pos(mca_dev, 5); /* irq, media and RIPL */
239
240 /* Test the following conditions:
241 * - If an irq parameter is supplied, compare it
242 * with the irq of the adapter we found
243 * - If a base_addr paramater is given, compare it
244 * with the base_addr of the adapter we found
245 * - Check that the irq and the base_addr of the
246 * adapter we found is not already in use by
247 * this driver
248 */
249
250 switch (mca_dev->index) {
251 case _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A:
252 case _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A:
253 case _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A:
254 case _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A:
255 {
256 tbase = addr_table[(pos2 & 0xf0) >> 4].base_addr;
257 tirq = irq_table[(pos5 & 0xc) >> 2].new_irq;
258 break;
259 }
260 case _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A:
261 case _6fc1_WD_Starcard_PLUS_A_WD8003ST_A:
262 case _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A:
263 case _efe5_IBM_PS2_Adapter_A_for_Ethernet:
264 {
265 tbase = ((pos2 & 0x0fe) * 0x10);
266 tirq = irq_table[(pos5 & 3)].old_irq;
267 break;
268 }
269 }
270
271 if(!tirq || !tbase ||
272 (irq && irq != tirq) ||
273 (base_addr && tbase != base_addr))
274 /* FIXME: we're trying to force the ordering of the
275 * devices here, there should be a way of getting this
276 * to happen */
277 return -ENXIO;
278
279 /* Adapter found. */
280 dev = alloc_ei_netdev();
281 if(!dev)
282 return -ENODEV;
283
284 SET_NETDEV_DEV(dev, gen_dev);
285 mca_device_set_name(mca_dev, smc_mca_adapter_names[adapter]);
286 mca_device_set_claim(mca_dev, 1);
287
288 printk(KERN_INFO "smc_mca: %s found in slot %d\n",
289 smc_mca_adapter_names[adapter], slot + 1);
290
291 ultra_found++;
292
293 dev->base_addr = ioaddr = mca_device_transform_ioport(mca_dev, tbase);
294 dev->irq = mca_device_transform_irq(mca_dev, tirq);
295 dev->mem_start = 0;
296 num_pages = 40;
297
298 switch (adapter) { /* card-# in const array above [hs] */
299 case _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A:
300 case _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A:
301 {
302 for (i = 0; i < 16; i++) { /* taking 16 counts
303 * up to 15 [hs] */
304 if (mem_table[i].mem_index == (pos3 & ~MEM_MASK)) {
305 dev->mem_start = (unsigned long)
306 mca_device_transform_memory(mca_dev, (void *)mem_table[i].mem_start);
307 num_pages = mem_table[i].num_pages;
308 }
309 }
310 break;
311 }
312 case _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A:
313 case _6fc1_WD_Starcard_PLUS_A_WD8003ST_A:
314 case _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A:
315 case _efe5_IBM_PS2_Adapter_A_for_Ethernet:
316 {
317 dev->mem_start = (unsigned long)
318 mca_device_transform_memory(mca_dev, (void *)((pos3 & 0xfc) * 0x1000));
319 num_pages = 0x40;
320 break;
321 }
322 case _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A:
323 case _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A:
324 {
325 /* courtesy of gamera@quartz.ocn.ne.jp, pos3 indicates
326 * the index of the 0x2000 step.
327 * beware different number of pages [hs]
328 */
329 dev->mem_start = (unsigned long)
330 mca_device_transform_memory(mca_dev, (void *)(0xc0000 + (0x2000 * (pos3 & 0xf))));
331 num_pages = 0x20 + (2 * (pos3 & 0x10));
332 break;
333 }
334 }
335
336 /* sanity check, shouldn't happen */
337 if (dev->mem_start == 0) {
338 rc = -ENODEV;
339 goto err_unclaim;
340 }
341
342 if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME)) {
343 rc = -ENODEV;
344 goto err_unclaim;
345 }
346
347 reg4 = inb(ioaddr + 4) & 0x7f;
348 outb(reg4, ioaddr + 4);
349
350 for (i = 0; i < 6; i++)
351 dev->dev_addr[i] = inb(ioaddr + 8 + i);
352
353 printk(KERN_INFO "smc_mca[%d]: Parameters: %#3x, %pM",
354 slot + 1, ioaddr, dev->dev_addr);
355
356 /* Switch from the station address to the alternate register set
357 * and read the useful registers there.
358 */
359
360 outb(0x80 | reg4, ioaddr + 4);
361
362 /* Enable FINE16 mode to avoid BIOS ROM width mismatches @ reboot.
363 */
364
365 outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c);
366
367 /* Switch back to the station address register set so that
368 * the MS-DOS driver can find the card after a warm boot.
369 */
370
371 outb(reg4, ioaddr + 4);
372
373 dev_set_drvdata(gen_dev, dev);
374
375 /* The 8390 isn't at the base address, so fake the offset
376 */
377
378 dev->base_addr = ioaddr + ULTRA_NIC_OFFSET;
379
380 ei_status.name = "SMC Ultra MCA";
381 ei_status.word16 = 1;
382 ei_status.tx_start_page = START_PG;
383 ei_status.rx_start_page = START_PG + TX_PAGES;
384 ei_status.stop_page = num_pages;
385
386 ei_status.mem = ioremap(dev->mem_start, (ei_status.stop_page - START_PG) * 256);
387 if (!ei_status.mem) {
388 rc = -ENOMEM;
389 goto err_release_region;
390 }
391
392 dev->mem_end = dev->mem_start + (ei_status.stop_page - START_PG) * 256;
393
394 printk(", IRQ %d memory %#lx-%#lx.\n",
395 dev->irq, dev->mem_start, dev->mem_end - 1);
396
397 ei_status.reset_8390 = &ultramca_reset_8390;
398 ei_status.block_input = &ultramca_block_input;
399 ei_status.block_output = &ultramca_block_output;
400 ei_status.get_8390_hdr = &ultramca_get_8390_hdr;
401
402 ei_status.priv = slot;
403
404 dev->netdev_ops = &ultramca_netdev_ops;
405
406 NS8390_init(dev, 0);
407
408 rc = register_netdev(dev);
409 if (rc)
410 goto err_unmap;
411
412 return 0;
413
414err_unmap:
415 iounmap(ei_status.mem);
416err_release_region:
417 release_region(ioaddr, ULTRA_IO_EXTENT);
418err_unclaim:
419 mca_device_set_claim(mca_dev, 0);
420 free_netdev(dev);
421 return rc;
422}
423
424static int ultramca_open(struct net_device *dev)
425{
426 int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
427 int retval;
428
429 if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev)))
430 return retval;
431
432 outb(ULTRA_MEMENB, ioaddr); /* Enable memory */
433 outb(0x80, ioaddr + 5); /* ??? */
434 outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */
435 outb(0x04, ioaddr + 5); /* ??? */
436
437 /* Set the early receive warning level in window 0 high enough not
438 * to receive ERW interrupts.
439 */
440
441 /* outb_p(E8390_NODMA + E8390_PAGE0, dev->base_addr);
442 * outb(0xff, dev->base_addr + EN0_ERWCNT);
443 */
444
445 ei_open(dev);
446 return 0;
447}
448
449static void ultramca_reset_8390(struct net_device *dev)
450{
451 int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
452
453 outb(ULTRA_RESET, ioaddr);
454 if (ei_debug > 1)
455 printk("resetting Ultra, t=%ld...", jiffies);
456 ei_status.txing = 0;
457
458 outb(0x80, ioaddr + 5); /* ??? */
459 outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */
460
461 if (ei_debug > 1)
462 printk("reset done\n");
463}
464
465/* Grab the 8390 specific header. Similar to the block_input routine, but
466 * we don't need to be concerned with ring wrap as the header will be at
467 * the start of a page, so we optimize accordingly.
468 */
469
470static void ultramca_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
471{
472 void __iomem *hdr_start = ei_status.mem + ((ring_page - START_PG) << 8);
473
474#ifdef notdef
475 /* Officially this is what we are doing, but the readl() is faster */
476 memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
477#else
478 ((unsigned int*)hdr)[0] = readl(hdr_start);
479#endif
480}
481
482/* Block input and output are easy on shared memory ethercards, the only
483 * complication is when the ring buffer wraps.
484 */
485
486static void ultramca_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
487{
488 void __iomem *xfer_start = ei_status.mem + ring_offset - START_PG * 256;
489
490 if (ring_offset + count > ei_status.stop_page * 256) {
491 /* We must wrap the input move. */
492 int semi_count = ei_status.stop_page * 256 - ring_offset;
493 memcpy_fromio(skb->data, xfer_start, semi_count);
494 count -= semi_count;
495 memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count);
496 } else {
497 memcpy_fromio(skb->data, xfer_start, count);
498 }
499
500}
501
502static void ultramca_block_output(struct net_device *dev, int count, const unsigned char *buf,
503 int start_page)
504{
505 void __iomem *shmem = ei_status.mem + ((start_page - START_PG) << 8);
506
507 memcpy_toio(shmem, buf, count);
508}
509
510static int ultramca_close_card(struct net_device *dev)
511{
512 int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
513
514 netif_stop_queue(dev);
515
516 if (ei_debug > 1)
517 printk("%s: Shutting down ethercard.\n", dev->name);
518
519 outb(0x00, ioaddr + 6); /* Disable interrupts. */
520 free_irq(dev->irq, dev);
521
522 NS8390_init(dev, 0);
523 /* We should someday disable shared memory and change to 8-bit mode
524 * "just in case"...
525 */
526
527 return 0;
528}
529
530static int ultramca_remove(struct device *gen_dev)
531{
532 struct mca_device *mca_dev = to_mca_device(gen_dev);
533 struct net_device *dev = dev_get_drvdata(gen_dev);
534
535 if (dev) {
536 /* NB: ultra_close_card() does free_irq */
537 int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET;
538
539 unregister_netdev(dev);
540 mca_device_set_claim(mca_dev, 0);
541 release_region(ioaddr, ULTRA_IO_EXTENT);
542 iounmap(ei_status.mem);
543 free_netdev(dev);
544 }
545 return 0;
546}
547
548
549static struct mca_driver ultra_driver = {
550 .id_table = smc_mca_adapter_ids,
551 .driver = {
552 .name = "smc-mca",
553 .bus = &mca_bus_type,
554 .probe = ultramca_probe,
555 .remove = ultramca_remove,
556 }
557};
558
559static int __init ultramca_init_module(void)
560{
561 if(!MCA_bus)
562 return -ENXIO;
563
564 mca_register_driver(&ultra_driver);
565
566 return ultra_found ? 0 : -ENXIO;
567}
568
569static void __exit ultramca_cleanup_module(void)
570{
571 mca_unregister_driver(&ultra_driver);
572}
573module_init(ultramca_init_module);
574module_exit(ultramca_cleanup_module);
575
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index c63a64cb6085..a11af5cc4844 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -174,6 +174,7 @@ source "drivers/net/ethernet/tile/Kconfig"
174source "drivers/net/ethernet/toshiba/Kconfig" 174source "drivers/net/ethernet/toshiba/Kconfig"
175source "drivers/net/ethernet/tundra/Kconfig" 175source "drivers/net/ethernet/tundra/Kconfig"
176source "drivers/net/ethernet/via/Kconfig" 176source "drivers/net/ethernet/via/Kconfig"
177source "drivers/net/ethernet/wiznet/Kconfig"
177source "drivers/net/ethernet/xilinx/Kconfig" 178source "drivers/net/ethernet/xilinx/Kconfig"
178source "drivers/net/ethernet/xircom/Kconfig" 179source "drivers/net/ethernet/xircom/Kconfig"
179 180
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 9676a5109d94..878ad32b93f2 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -73,5 +73,6 @@ obj-$(CONFIG_TILE_NET) += tile/
73obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/ 73obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/
74obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/ 74obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/
75obj-$(CONFIG_NET_VENDOR_VIA) += via/ 75obj-$(CONFIG_NET_VENDOR_VIA) += via/
76obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
76obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ 77obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
77obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ 78obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index d896816512ca..d920a529ba22 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -114,15 +114,6 @@ static int rx_copybreak /* = 0 */;
114#define DMA_BURST_SIZE 128 114#define DMA_BURST_SIZE 128
115#endif 115#endif
116 116
117/* Used to pass the media type, etc.
118 Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
119 The media type is usually passed in 'options[]'.
120 These variables are deprecated, use ethtool instead. -Ion
121*/
122#define MAX_UNITS 8 /* More are supported, limit only on options */
123static int options[MAX_UNITS] = {0, };
124static int full_duplex[MAX_UNITS] = {0, };
125
126/* Operational parameters that are set at compile time. */ 117/* Operational parameters that are set at compile time. */
127 118
128/* The "native" ring sizes are either 256 or 2048. 119/* The "native" ring sizes are either 256 or 2048.
@@ -192,8 +183,6 @@ module_param(debug, int, 0);
192module_param(rx_copybreak, int, 0); 183module_param(rx_copybreak, int, 0);
193module_param(intr_latency, int, 0); 184module_param(intr_latency, int, 0);
194module_param(small_frames, int, 0); 185module_param(small_frames, int, 0);
195module_param_array(options, int, NULL, 0);
196module_param_array(full_duplex, int, NULL, 0);
197module_param(enable_hw_cksum, int, 0); 186module_param(enable_hw_cksum, int, 0);
198MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt"); 187MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
199MODULE_PARM_DESC(mtu, "MTU (all boards)"); 188MODULE_PARM_DESC(mtu, "MTU (all boards)");
@@ -201,8 +190,6 @@ MODULE_PARM_DESC(debug, "Debug level (0-6)");
201MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); 190MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
202MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds"); 191MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
203MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)"); 192MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
204MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex");
205MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)");
206MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)"); 193MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
207 194
208/* 195/*
@@ -657,10 +644,10 @@ static const struct net_device_ops netdev_ops = {
657static int __devinit starfire_init_one(struct pci_dev *pdev, 644static int __devinit starfire_init_one(struct pci_dev *pdev,
658 const struct pci_device_id *ent) 645 const struct pci_device_id *ent)
659{ 646{
647 struct device *d = &pdev->dev;
660 struct netdev_private *np; 648 struct netdev_private *np;
661 int i, irq, option, chip_idx = ent->driver_data; 649 int i, irq, chip_idx = ent->driver_data;
662 struct net_device *dev; 650 struct net_device *dev;
663 static int card_idx = -1;
664 long ioaddr; 651 long ioaddr;
665 void __iomem *base; 652 void __iomem *base;
666 int drv_flags, io_size; 653 int drv_flags, io_size;
@@ -673,15 +660,13 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
673 printk(version); 660 printk(version);
674#endif 661#endif
675 662
676 card_idx++;
677
678 if (pci_enable_device (pdev)) 663 if (pci_enable_device (pdev))
679 return -EIO; 664 return -EIO;
680 665
681 ioaddr = pci_resource_start(pdev, 0); 666 ioaddr = pci_resource_start(pdev, 0);
682 io_size = pci_resource_len(pdev, 0); 667 io_size = pci_resource_len(pdev, 0);
683 if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) { 668 if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
684 printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx); 669 dev_err(d, "no PCI MEM resources, aborting\n");
685 return -ENODEV; 670 return -ENODEV;
686 } 671 }
687 672
@@ -694,14 +679,14 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
694 irq = pdev->irq; 679 irq = pdev->irq;
695 680
696 if (pci_request_regions (pdev, DRV_NAME)) { 681 if (pci_request_regions (pdev, DRV_NAME)) {
697 printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx); 682 dev_err(d, "cannot reserve PCI resources, aborting\n");
698 goto err_out_free_netdev; 683 goto err_out_free_netdev;
699 } 684 }
700 685
701 base = ioremap(ioaddr, io_size); 686 base = ioremap(ioaddr, io_size);
702 if (!base) { 687 if (!base) {
703 printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n", 688 dev_err(d, "cannot remap %#x @ %#lx, aborting\n",
704 card_idx, io_size, ioaddr); 689 io_size, ioaddr);
705 goto err_out_free_res; 690 goto err_out_free_res;
706 } 691 }
707 692
@@ -753,9 +738,6 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
753 /* wait a little longer */ 738 /* wait a little longer */
754 udelay(1000); 739 udelay(1000);
755 740
756 dev->base_addr = (unsigned long)base;
757 dev->irq = irq;
758
759 np = netdev_priv(dev); 741 np = netdev_priv(dev);
760 np->dev = dev; 742 np->dev = dev;
761 np->base = base; 743 np->base = base;
@@ -772,21 +754,6 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
772 754
773 drv_flags = netdrv_tbl[chip_idx].drv_flags; 755 drv_flags = netdrv_tbl[chip_idx].drv_flags;
774 756
775 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
776 if (dev->mem_start)
777 option = dev->mem_start;
778
779 /* The lower four bits are the media type. */
780 if (option & 0x200)
781 np->mii_if.full_duplex = 1;
782
783 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
784 np->mii_if.full_duplex = 1;
785
786 if (np->mii_if.full_duplex)
787 np->mii_if.force_media = 1;
788 else
789 np->mii_if.force_media = 0;
790 np->speed100 = 1; 757 np->speed100 = 1;
791 758
792 /* timer resolution is 128 * 0.8us */ 759 /* timer resolution is 128 * 0.8us */
@@ -909,13 +876,14 @@ static int netdev_open(struct net_device *dev)
909 const __be32 *fw_rx_data, *fw_tx_data; 876 const __be32 *fw_rx_data, *fw_tx_data;
910 struct netdev_private *np = netdev_priv(dev); 877 struct netdev_private *np = netdev_priv(dev);
911 void __iomem *ioaddr = np->base; 878 void __iomem *ioaddr = np->base;
879 const int irq = np->pci_dev->irq;
912 int i, retval; 880 int i, retval;
913 size_t tx_size, rx_size; 881 size_t tx_size, rx_size;
914 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size; 882 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
915 883
916 /* Do we ever need to reset the chip??? */ 884 /* Do we ever need to reset the chip??? */
917 885
918 retval = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); 886 retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
919 if (retval) 887 if (retval)
920 return retval; 888 return retval;
921 889
@@ -924,7 +892,7 @@ static int netdev_open(struct net_device *dev)
924 writel(1, ioaddr + PCIDeviceConfig); 892 writel(1, ioaddr + PCIDeviceConfig);
925 if (debug > 1) 893 if (debug > 1)
926 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", 894 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
927 dev->name, dev->irq); 895 dev->name, irq);
928 896
929 /* Allocate the various queues. */ 897 /* Allocate the various queues. */
930 if (!np->queue_mem) { 898 if (!np->queue_mem) {
@@ -935,7 +903,7 @@ static int netdev_open(struct net_device *dev)
935 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size; 903 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
936 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma); 904 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
937 if (np->queue_mem == NULL) { 905 if (np->queue_mem == NULL) {
938 free_irq(dev->irq, dev); 906 free_irq(irq, dev);
939 return -ENOMEM; 907 return -ENOMEM;
940 } 908 }
941 909
@@ -1962,7 +1930,7 @@ static int netdev_close(struct net_device *dev)
1962 } 1930 }
1963 } 1931 }
1964 1932
1965 free_irq(dev->irq, dev); 1933 free_irq(np->pci_dev->irq, dev);
1966 1934
1967 /* Free all the skbuffs in the Rx queue. */ 1935 /* Free all the skbuffs in the Rx queue. */
1968 for (i = 0; i < RX_RING_SIZE; i++) { 1936 for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index ab4daeccdf98..f816426e1085 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -548,6 +548,25 @@ static int bfin_mac_ethtool_setwol(struct net_device *dev,
548 return 0; 548 return 0;
549} 549}
550 550
551static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
552 struct ethtool_ts_info *info)
553{
554 info->so_timestamping =
555 SOF_TIMESTAMPING_TX_HARDWARE |
556 SOF_TIMESTAMPING_RX_HARDWARE |
557 SOF_TIMESTAMPING_SYS_HARDWARE;
558 info->phc_index = -1;
559 info->tx_types =
560 (1 << HWTSTAMP_TX_OFF) |
561 (1 << HWTSTAMP_TX_ON);
562 info->rx_filters =
563 (1 << HWTSTAMP_FILTER_NONE) |
564 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
565 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
566 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
567 return 0;
568}
569
551static const struct ethtool_ops bfin_mac_ethtool_ops = { 570static const struct ethtool_ops bfin_mac_ethtool_ops = {
552 .get_settings = bfin_mac_ethtool_getsettings, 571 .get_settings = bfin_mac_ethtool_getsettings,
553 .set_settings = bfin_mac_ethtool_setsettings, 572 .set_settings = bfin_mac_ethtool_setsettings,
@@ -555,6 +574,7 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
555 .get_drvinfo = bfin_mac_ethtool_getdrvinfo, 574 .get_drvinfo = bfin_mac_ethtool_getdrvinfo,
556 .get_wol = bfin_mac_ethtool_getwol, 575 .get_wol = bfin_mac_ethtool_getwol,
557 .set_wol = bfin_mac_ethtool_setwol, 576 .set_wol = bfin_mac_ethtool_setwol,
577 .get_ts_info = bfin_mac_ethtool_get_ts_info,
558}; 578};
559 579
560/**************************************************************************/ 580/**************************************************************************/
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index f4c228e4d76c..f2958df9a1e4 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -213,10 +213,10 @@ static int ariadne_rx(struct net_device *dev)
213 (const void *)priv->rx_buff[entry], 213 (const void *)priv->rx_buff[entry],
214 pkt_len); 214 pkt_len);
215 skb->protocol = eth_type_trans(skb, dev); 215 skb->protocol = eth_type_trans(skb, dev);
216 netdev_dbg(dev, "RX pkt type 0x%04x from %pM to %pM data 0x%08x len %d\n", 216 netdev_dbg(dev, "RX pkt type 0x%04x from %pM to %pM data %p len %u\n",
217 ((u_short *)skb->data)[6], 217 ((u_short *)skb->data)[6],
218 skb->data + 6, skb->data, 218 skb->data + 6, skb->data,
219 (int)skb->data, (int)skb->len); 219 skb->data, skb->len);
220 220
221 netif_rx(skb); 221 netif_rx(skb);
222 dev->stats.rx_packets++; 222 dev->stats.rx_packets++;
@@ -566,10 +566,10 @@ static netdev_tx_t ariadne_start_xmit(struct sk_buff *skb,
566 566
567 /* Fill in a Tx ring entry */ 567 /* Fill in a Tx ring entry */
568 568
569 netdev_dbg(dev, "TX pkt type 0x%04x from %pM to %pM data 0x%08x len %d\n", 569 netdev_dbg(dev, "TX pkt type 0x%04x from %pM to %pM data %p len %u\n",
570 ((u_short *)skb->data)[6], 570 ((u_short *)skb->data)[6],
571 skb->data + 6, skb->data, 571 skb->data + 6, skb->data,
572 (int)skb->data, (int)skb->len); 572 skb->data, skb->len);
573 573
574 local_irq_save(flags); 574 local_irq_save(flags);
575 575
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 70ed79c46245..84219df72f51 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -558,21 +558,18 @@ static unsigned long __init lance_probe1( struct net_device *dev,
558 printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 ); 558 printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 );
559 return 0; 559 return 0;
560 } 560 }
561 dev->irq = (unsigned short)IRQ_AUTO_5; 561 dev->irq = IRQ_AUTO_5;
562 } 562 }
563 else { 563 else {
564 /* For VME-RieblCards, request a free VME int; 564 /* For VME-RieblCards, request a free VME int */
565 * (This must be unsigned long, since dev->irq is short and the 565 unsigned int irq = atari_register_vme_int();
566 * IRQ_MACHSPEC bit would be cut off...)
567 */
568 unsigned long irq = atari_register_vme_int();
569 if (!irq) { 566 if (!irq) {
570 printk( "Lance: request for VME interrupt failed\n" ); 567 printk( "Lance: request for VME interrupt failed\n" );
571 return 0; 568 return 0;
572 } 569 }
573 if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO, 570 if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO,
574 "Riebl-VME Ethernet", dev)) { 571 "Riebl-VME Ethernet", dev)) {
575 printk( "Lance: request for irq %ld failed\n", irq ); 572 printk( "Lance: request for irq %u failed\n", irq );
576 return 0; 573 return 0;
577 } 574 }
578 dev->irq = irq; 575 dev->irq = irq;
diff --git a/drivers/net/ethernet/amd/depca.c b/drivers/net/ethernet/amd/depca.c
index 86dd95766a64..c771de71612a 100644
--- a/drivers/net/ethernet/amd/depca.c
+++ b/drivers/net/ethernet/amd/depca.c
@@ -155,23 +155,10 @@
155 2 depca's in a PC). 155 2 depca's in a PC).
156 156
157 ************************************************************************ 157 ************************************************************************
158 Support for MCA EtherWORKS cards added 11-3-98. 158 Support for MCA EtherWORKS cards added 11-3-98. (MCA since deleted)
159 Verified to work with up to 2 DE212 cards in a system (although not 159 Verified to work with up to 2 DE212 cards in a system (although not
160 fully stress-tested). 160 fully stress-tested).
161 161
162 Currently known bugs/limitations:
163
164 Note: with the MCA stuff as a module, it trusts the MCA configuration,
165 not the command line for IRQ and memory address. You can
166 specify them if you want, but it will throw your values out.
167 You still have to pass the IO address it was configured as
168 though.
169
170 ************************************************************************
171 TO DO:
172 ------
173
174
175 Revision History 162 Revision History
176 ---------------- 163 ----------------
177 164
@@ -261,10 +248,6 @@
261#include <asm/io.h> 248#include <asm/io.h>
262#include <asm/dma.h> 249#include <asm/dma.h>
263 250
264#ifdef CONFIG_MCA
265#include <linux/mca.h>
266#endif
267
268#ifdef CONFIG_EISA 251#ifdef CONFIG_EISA
269#include <linux/eisa.h> 252#include <linux/eisa.h>
270#endif 253#endif
@@ -360,44 +343,6 @@ static struct eisa_driver depca_eisa_driver = {
360}; 343};
361#endif 344#endif
362 345
363#ifdef CONFIG_MCA
364/*
365** Adapter ID for the MCA EtherWORKS DE210/212 adapter
366*/
367#define DE210_ID 0x628d
368#define DE212_ID 0x6def
369
370static short depca_mca_adapter_ids[] = {
371 DE210_ID,
372 DE212_ID,
373 0x0000
374};
375
376static char *depca_mca_adapter_name[] = {
377 "DEC EtherWORKS MC Adapter (DE210)",
378 "DEC EtherWORKS MC Adapter (DE212)",
379 NULL
380};
381
382static enum depca_type depca_mca_adapter_type[] = {
383 de210,
384 de212,
385 0
386};
387
388static int depca_mca_probe (struct device *);
389
390static struct mca_driver depca_mca_driver = {
391 .id_table = depca_mca_adapter_ids,
392 .driver = {
393 .name = depca_string,
394 .bus = &mca_bus_type,
395 .probe = depca_mca_probe,
396 .remove = __devexit_p(depca_device_remove),
397 },
398};
399#endif
400
401static int depca_isa_probe (struct platform_device *); 346static int depca_isa_probe (struct platform_device *);
402 347
403static int __devexit depca_isa_remove(struct platform_device *pdev) 348static int __devexit depca_isa_remove(struct platform_device *pdev)
@@ -464,8 +409,7 @@ struct depca_private {
464 char adapter_name[DEPCA_STRLEN]; /* /proc/ioports string */ 409 char adapter_name[DEPCA_STRLEN]; /* /proc/ioports string */
465 enum depca_type adapter; /* Adapter type */ 410 enum depca_type adapter; /* Adapter type */
466 enum { 411 enum {
467 DEPCA_BUS_MCA = 1, 412 DEPCA_BUS_ISA = 1,
468 DEPCA_BUS_ISA,
469 DEPCA_BUS_EISA, 413 DEPCA_BUS_EISA,
470 } depca_bus; /* type of bus */ 414 } depca_bus; /* type of bus */
471 struct depca_init init_block; /* Shadow Initialization block */ 415 struct depca_init init_block; /* Shadow Initialization block */
@@ -624,12 +568,6 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device)
624 dev_name(device), depca_signature[lp->adapter], ioaddr); 568 dev_name(device), depca_signature[lp->adapter], ioaddr);
625 569
626 switch (lp->depca_bus) { 570 switch (lp->depca_bus) {
627#ifdef CONFIG_MCA
628 case DEPCA_BUS_MCA:
629 printk(" (MCA slot %d)", to_mca_device(device)->slot + 1);
630 break;
631#endif
632
633#ifdef CONFIG_EISA 571#ifdef CONFIG_EISA
634 case DEPCA_BUS_EISA: 572 case DEPCA_BUS_EISA:
635 printk(" (EISA slot %d)", to_eisa_device(device)->slot); 573 printk(" (EISA slot %d)", to_eisa_device(device)->slot);
@@ -661,10 +599,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device)
661 if (nicsr & BUF) { 599 if (nicsr & BUF) {
662 nicsr &= ~BS; /* DEPCA RAM in top 32k */ 600 nicsr &= ~BS; /* DEPCA RAM in top 32k */
663 netRAM -= 32; 601 netRAM -= 32;
664 602 mem_start += 0x8000;
665 /* Only EISA/ISA needs start address to be re-computed */
666 if (lp->depca_bus != DEPCA_BUS_MCA)
667 mem_start += 0x8000;
668 } 603 }
669 604
670 if ((mem_len = (NUM_RX_DESC * (sizeof(struct depca_rx_desc) + RX_BUFF_SZ) + NUM_TX_DESC * (sizeof(struct depca_tx_desc) + TX_BUFF_SZ) + sizeof(struct depca_init))) 605 if ((mem_len = (NUM_RX_DESC * (sizeof(struct depca_rx_desc) + RX_BUFF_SZ) + NUM_TX_DESC * (sizeof(struct depca_tx_desc) + TX_BUFF_SZ) + sizeof(struct depca_init)))
@@ -1079,7 +1014,8 @@ static int depca_rx(struct net_device *dev)
1079 } else { 1014 } else {
1080 lp->pktStats.multicast++; 1015 lp->pktStats.multicast++;
1081 } 1016 }
1082 } else if (compare_ether_addr(buf, dev->dev_addr) == 0) { 1017 } else if (ether_addr_equal(buf,
1018 dev->dev_addr)) {
1083 lp->pktStats.unicast++; 1019 lp->pktStats.unicast++;
1084 } 1020 }
1085 1021
@@ -1324,130 +1260,6 @@ static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
1324 return status; 1260 return status;
1325} 1261}
1326 1262
1327#ifdef CONFIG_MCA
1328/*
1329** Microchannel bus I/O device probe
1330*/
1331static int __init depca_mca_probe(struct device *device)
1332{
1333 unsigned char pos[2];
1334 unsigned char where;
1335 unsigned long iobase, mem_start;
1336 int irq, err;
1337 struct mca_device *mdev = to_mca_device (device);
1338 struct net_device *dev;
1339 struct depca_private *lp;
1340
1341 /*
1342 ** Search for the adapter. If an address has been given, search
1343 ** specifically for the card at that address. Otherwise find the
1344 ** first card in the system.
1345 */
1346
1347 pos[0] = mca_device_read_stored_pos(mdev, 2);
1348 pos[1] = mca_device_read_stored_pos(mdev, 3);
1349
1350 /*
1351 ** IO of card is handled by bits 1 and 2 of pos0.
1352 **
1353 ** bit2 bit1 IO
1354 ** 0 0 0x2c00
1355 ** 0 1 0x2c10
1356 ** 1 0 0x2c20
1357 ** 1 1 0x2c30
1358 */
1359 where = (pos[0] & 6) >> 1;
1360 iobase = 0x2c00 + (0x10 * where);
1361
1362 /*
1363 ** Found the adapter we were looking for. Now start setting it up.
1364 **
1365 ** First work on decoding the IRQ. It's stored in the lower 4 bits
1366 ** of pos1. Bits are as follows (from the ADF file):
1367 **
1368 ** Bits
1369 ** 3 2 1 0 IRQ
1370 ** --------------------
1371 ** 0 0 1 0 5
1372 ** 0 0 0 1 9
1373 ** 0 1 0 0 10
1374 ** 1 0 0 0 11
1375 */
1376 where = pos[1] & 0x0f;
1377 switch (where) {
1378 case 1:
1379 irq = 9;
1380 break;
1381 case 2:
1382 irq = 5;
1383 break;
1384 case 4:
1385 irq = 10;
1386 break;
1387 case 8:
1388 irq = 11;
1389 break;
1390 default:
1391 printk("%s: mca_probe IRQ error. You should never get here (%d).\n", mdev->name, where);
1392 return -EINVAL;
1393 }
1394
1395 /*
1396 ** Shared memory address of adapter is stored in bits 3-5 of pos0.
1397 ** They are mapped as follows:
1398 **
1399 ** Bit
1400 ** 5 4 3 Memory Addresses
1401 ** 0 0 0 C0000-CFFFF (64K)
1402 ** 1 0 0 C8000-CFFFF (32K)
1403 ** 0 0 1 D0000-DFFFF (64K)
1404 ** 1 0 1 D8000-DFFFF (32K)
1405 ** 0 1 0 E0000-EFFFF (64K)
1406 ** 1 1 0 E8000-EFFFF (32K)
1407 */
1408 where = (pos[0] & 0x18) >> 3;
1409 mem_start = 0xc0000 + (where * 0x10000);
1410 if (pos[0] & 0x20) {
1411 mem_start += 0x8000;
1412 }
1413
1414 /* claim the slot */
1415 strncpy(mdev->name, depca_mca_adapter_name[mdev->index],
1416 sizeof(mdev->name));
1417 mca_device_set_claim(mdev, 1);
1418
1419 /*
1420 ** Get everything allocated and initialized... (almost just
1421 ** like the ISA and EISA probes)
1422 */
1423 irq = mca_device_transform_irq(mdev, irq);
1424 iobase = mca_device_transform_ioport(mdev, iobase);
1425
1426 if ((err = depca_common_init (iobase, &dev)))
1427 goto out_unclaim;
1428
1429 dev->irq = irq;
1430 dev->base_addr = iobase;
1431 lp = netdev_priv(dev);
1432 lp->depca_bus = DEPCA_BUS_MCA;
1433 lp->adapter = depca_mca_adapter_type[mdev->index];
1434 lp->mem_start = mem_start;
1435
1436 if ((err = depca_hw_init(dev, device)))
1437 goto out_free;
1438
1439 return 0;
1440
1441 out_free:
1442 free_netdev (dev);
1443 release_region (iobase, DEPCA_TOTAL_SIZE);
1444 out_unclaim:
1445 mca_device_set_claim(mdev, 0);
1446
1447 return err;
1448}
1449#endif
1450
1451/* 1263/*
1452** ISA bus I/O device probe 1264** ISA bus I/O device probe
1453*/ 1265*/
@@ -2058,15 +1870,10 @@ static int __init depca_module_init (void)
2058{ 1870{
2059 int err = 0; 1871 int err = 0;
2060 1872
2061#ifdef CONFIG_MCA
2062 err = mca_register_driver(&depca_mca_driver);
2063 if (err)
2064 goto err;
2065#endif
2066#ifdef CONFIG_EISA 1873#ifdef CONFIG_EISA
2067 err = eisa_driver_register(&depca_eisa_driver); 1874 err = eisa_driver_register(&depca_eisa_driver);
2068 if (err) 1875 if (err)
2069 goto err_mca; 1876 goto err_eisa;
2070#endif 1877#endif
2071 err = platform_driver_register(&depca_isa_driver); 1878 err = platform_driver_register(&depca_isa_driver);
2072 if (err) 1879 if (err)
@@ -2078,11 +1885,6 @@ static int __init depca_module_init (void)
2078err_eisa: 1885err_eisa:
2079#ifdef CONFIG_EISA 1886#ifdef CONFIG_EISA
2080 eisa_driver_unregister(&depca_eisa_driver); 1887 eisa_driver_unregister(&depca_eisa_driver);
2081err_mca:
2082#endif
2083#ifdef CONFIG_MCA
2084 mca_unregister_driver(&depca_mca_driver);
2085err:
2086#endif 1888#endif
2087 return err; 1889 return err;
2088} 1890}
@@ -2090,9 +1892,6 @@ err:
2090static void __exit depca_module_exit (void) 1892static void __exit depca_module_exit (void)
2091{ 1893{
2092 int i; 1894 int i;
2093#ifdef CONFIG_MCA
2094 mca_unregister_driver (&depca_mca_driver);
2095#endif
2096#ifdef CONFIG_EISA 1895#ifdef CONFIG_EISA
2097 eisa_driver_unregister (&depca_eisa_driver); 1896 eisa_driver_unregister (&depca_eisa_driver);
2098#endif 1897#endif
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index ca70e16b6e2c..b2bf324631dc 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -74,8 +74,6 @@
74 74
75#define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN) 75#define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)
76#define MAX_JUMBO_FRAME_SIZE (6*1024) 76#define MAX_JUMBO_FRAME_SIZE (6*1024)
77#define MAX_TSO_FRAME_SIZE (7*1024)
78#define MAX_TX_OFFLOAD_THRESH (9*1024)
79 77
80#define AT_MAX_RECEIVE_QUEUE 4 78#define AT_MAX_RECEIVE_QUEUE 4
81#define AT_DEF_RECEIVE_QUEUE 1 79#define AT_DEF_RECEIVE_QUEUE 1
@@ -100,7 +98,7 @@
100#define ATL1C_ASPM_L0s_ENABLE 0x0001 98#define ATL1C_ASPM_L0s_ENABLE 0x0001
101#define ATL1C_ASPM_L1_ENABLE 0x0002 99#define ATL1C_ASPM_L1_ENABLE 0x0002
102 100
103#define AT_REGS_LEN (75 * sizeof(u32)) 101#define AT_REGS_LEN (74 * sizeof(u32))
104#define AT_EEPROM_LEN 512 102#define AT_EEPROM_LEN 512
105 103
106#define ATL1C_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i])) 104#define ATL1C_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i]))
@@ -297,20 +295,6 @@ enum atl1c_dma_req_block {
297 atl1c_dma_req_4096 = 5 295 atl1c_dma_req_4096 = 5
298}; 296};
299 297
300enum atl1c_rss_mode {
301 atl1c_rss_mode_disable = 0,
302 atl1c_rss_sig_que = 1,
303 atl1c_rss_mul_que_sig_int = 2,
304 atl1c_rss_mul_que_mul_int = 4,
305};
306
307enum atl1c_rss_type {
308 atl1c_rss_disable = 0,
309 atl1c_rss_ipv4 = 1,
310 atl1c_rss_ipv4_tcp = 2,
311 atl1c_rss_ipv6 = 4,
312 atl1c_rss_ipv6_tcp = 8
313};
314 298
315enum atl1c_nic_type { 299enum atl1c_nic_type {
316 athr_l1c = 0, 300 athr_l1c = 0,
@@ -388,7 +372,6 @@ struct atl1c_hw {
388 enum atl1c_dma_order dma_order; 372 enum atl1c_dma_order dma_order;
389 enum atl1c_dma_rcb rcb_value; 373 enum atl1c_dma_rcb rcb_value;
390 enum atl1c_dma_req_block dmar_block; 374 enum atl1c_dma_req_block dmar_block;
391 enum atl1c_dma_req_block dmaw_block;
392 375
393 u16 device_id; 376 u16 device_id;
394 u16 vendor_id; 377 u16 vendor_id;
@@ -399,8 +382,6 @@ struct atl1c_hw {
399 u16 phy_id2; 382 u16 phy_id2;
400 383
401 u32 intr_mask; 384 u32 intr_mask;
402 u8 dmaw_dly_cnt;
403 u8 dmar_dly_cnt;
404 385
405 u8 preamble_len; 386 u8 preamble_len;
406 u16 max_frame_size; 387 u16 max_frame_size;
@@ -440,10 +421,6 @@ struct atl1c_hw {
440#define ATL1C_FPGA_VERSION 0x8000 421#define ATL1C_FPGA_VERSION 0x8000
441 u16 link_cap_flags; 422 u16 link_cap_flags;
442#define ATL1C_LINK_CAP_1000M 0x0001 423#define ATL1C_LINK_CAP_1000M 0x0001
443 u16 cmb_tpd;
444 u16 cmb_rrd;
445 u16 cmb_rx_timer; /* 2us resolution */
446 u16 cmb_tx_timer;
447 u32 smb_timer; 424 u32 smb_timer;
448 425
449 u16 rrd_thresh; /* Threshold of number of RRD produced to trigger 426 u16 rrd_thresh; /* Threshold of number of RRD produced to trigger
@@ -451,9 +428,6 @@ struct atl1c_hw {
451 u16 tpd_thresh; 428 u16 tpd_thresh;
452 u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */ 429 u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */
453 u8 rfd_burst; 430 u8 rfd_burst;
454 enum atl1c_rss_type rss_type;
455 enum atl1c_rss_mode rss_mode;
456 u8 rss_hash_bits;
457 u32 base_cpu; 431 u32 base_cpu;
458 u32 indirect_tab; 432 u32 indirect_tab;
459 u8 mac_addr[ETH_ALEN]; 433 u8 mac_addr[ETH_ALEN];
@@ -462,12 +436,12 @@ struct atl1c_hw {
462 bool phy_configured; 436 bool phy_configured;
463 bool re_autoneg; 437 bool re_autoneg;
464 bool emi_ca; 438 bool emi_ca;
439 bool msi_lnkpatch; /* link patch for specific platforms */
465}; 440};
466 441
467/* 442/*
468 * atl1c_ring_header represents a single, contiguous block of DMA space 443 * atl1c_ring_header represents a single, contiguous block of DMA space
469 * mapped for the three descriptor rings (tpd, rfd, rrd) and the two 444 * mapped for the three descriptor rings (tpd, rfd, rrd) described below
470 * message blocks (cmb, smb) described below
471 */ 445 */
472struct atl1c_ring_header { 446struct atl1c_ring_header {
473 void *desc; /* virtual address */ 447 void *desc; /* virtual address */
@@ -541,16 +515,6 @@ struct atl1c_rrd_ring {
541 u16 next_to_clean; 515 u16 next_to_clean;
542}; 516};
543 517
544struct atl1c_cmb {
545 void *cmb;
546 dma_addr_t dma;
547};
548
549struct atl1c_smb {
550 void *smb;
551 dma_addr_t dma;
552};
553
554/* board specific private data structure */ 518/* board specific private data structure */
555struct atl1c_adapter { 519struct atl1c_adapter {
556 struct net_device *netdev; 520 struct net_device *netdev;
@@ -586,11 +550,8 @@ struct atl1c_adapter {
586 /* All Descriptor memory */ 550 /* All Descriptor memory */
587 struct atl1c_ring_header ring_header; 551 struct atl1c_ring_header ring_header;
588 struct atl1c_tpd_ring tpd_ring[AT_MAX_TRANSMIT_QUEUE]; 552 struct atl1c_tpd_ring tpd_ring[AT_MAX_TRANSMIT_QUEUE];
589 struct atl1c_rfd_ring rfd_ring[AT_MAX_RECEIVE_QUEUE]; 553 struct atl1c_rfd_ring rfd_ring;
590 struct atl1c_rrd_ring rrd_ring[AT_MAX_RECEIVE_QUEUE]; 554 struct atl1c_rrd_ring rrd_ring;
591 struct atl1c_cmb cmb;
592 struct atl1c_smb smb;
593 int num_rx_queues;
594 u32 bd_number; /* board number;*/ 555 u32 bd_number; /* board number;*/
595}; 556};
596 557
@@ -618,8 +579,14 @@ struct atl1c_adapter {
618#define AT_WRITE_REGW(a, reg, value) (\ 579#define AT_WRITE_REGW(a, reg, value) (\
619 writew((value), ((a)->hw_addr + reg))) 580 writew((value), ((a)->hw_addr + reg)))
620 581
621#define AT_READ_REGW(a, reg) (\ 582#define AT_READ_REGW(a, reg, pdata) do { \
622 readw((a)->hw_addr + reg)) 583 if (unlikely((a)->hibernate)) { \
584 readw((a)->hw_addr + reg); \
585 *(u16 *)pdata = readw((a)->hw_addr + reg); \
586 } else { \
587 *(u16 *)pdata = readw((a)->hw_addr + reg); \
588 } \
589 } while (0)
623 590
624#define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \ 591#define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \
625 writel((value), (((a)->hw_addr + reg) + ((offset) << 2)))) 592 writel((value), (((a)->hw_addr + reg) + ((offset) << 2))))
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 0a9326aa58b5..859ea844ba0f 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -141,8 +141,7 @@ static void atl1c_get_regs(struct net_device *netdev,
141 141
142 memset(p, 0, AT_REGS_LEN); 142 memset(p, 0, AT_REGS_LEN);
143 143
144 regs->version = 0; 144 regs->version = 1;
145 AT_READ_REG(hw, REG_VPD_CAP, p++);
146 AT_READ_REG(hw, REG_PM_CTRL, p++); 145 AT_READ_REG(hw, REG_PM_CTRL, p++);
147 AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL, p++); 146 AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL, p++);
148 AT_READ_REG(hw, REG_TWSI_CTRL, p++); 147 AT_READ_REG(hw, REG_TWSI_CTRL, p++);
@@ -154,7 +153,7 @@ static void atl1c_get_regs(struct net_device *netdev,
154 AT_READ_REG(hw, REG_LINK_CTRL, p++); 153 AT_READ_REG(hw, REG_LINK_CTRL, p++);
155 AT_READ_REG(hw, REG_IDLE_STATUS, p++); 154 AT_READ_REG(hw, REG_IDLE_STATUS, p++);
156 AT_READ_REG(hw, REG_MDIO_CTRL, p++); 155 AT_READ_REG(hw, REG_MDIO_CTRL, p++);
157 AT_READ_REG(hw, REG_SERDES_LOCK, p++); 156 AT_READ_REG(hw, REG_SERDES, p++);
158 AT_READ_REG(hw, REG_MAC_CTRL, p++); 157 AT_READ_REG(hw, REG_MAC_CTRL, p++);
159 AT_READ_REG(hw, REG_MAC_IPG_IFG, p++); 158 AT_READ_REG(hw, REG_MAC_IPG_IFG, p++);
160 AT_READ_REG(hw, REG_MAC_STA_ADDR, p++); 159 AT_READ_REG(hw, REG_MAC_STA_ADDR, p++);
@@ -167,9 +166,9 @@ static void atl1c_get_regs(struct net_device *netdev,
167 AT_READ_REG(hw, REG_WOL_CTRL, p++); 166 AT_READ_REG(hw, REG_WOL_CTRL, p++);
168 167
169 atl1c_read_phy_reg(hw, MII_BMCR, &phy_data); 168 atl1c_read_phy_reg(hw, MII_BMCR, &phy_data);
170 regs_buff[73] = (u32) phy_data; 169 regs_buff[AT_REGS_LEN/sizeof(u32) - 2] = (u32) phy_data;
171 atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); 170 atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
172 regs_buff[74] = (u32) phy_data; 171 regs_buff[AT_REGS_LEN/sizeof(u32) - 1] = (u32) phy_data;
173} 172}
174 173
175static int atl1c_get_eeprom_len(struct net_device *netdev) 174static int atl1c_get_eeprom_len(struct net_device *netdev)
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index bd1667cbffa6..ff9c73859d45 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -43,7 +43,7 @@ int atl1c_check_eeprom_exist(struct atl1c_hw *hw)
43 return 0; 43 return 0;
44} 44}
45 45
46void atl1c_hw_set_mac_addr(struct atl1c_hw *hw) 46void atl1c_hw_set_mac_addr(struct atl1c_hw *hw, u8 *mac_addr)
47{ 47{
48 u32 value; 48 u32 value;
49 /* 49 /*
@@ -51,35 +51,48 @@ void atl1c_hw_set_mac_addr(struct atl1c_hw *hw)
51 * 0: 6AF600DC 1: 000B 51 * 0: 6AF600DC 1: 000B
52 * low dword 52 * low dword
53 */ 53 */
54 value = (((u32)hw->mac_addr[2]) << 24) | 54 value = mac_addr[2] << 24 |
55 (((u32)hw->mac_addr[3]) << 16) | 55 mac_addr[3] << 16 |
56 (((u32)hw->mac_addr[4]) << 8) | 56 mac_addr[4] << 8 |
57 (((u32)hw->mac_addr[5])) ; 57 mac_addr[5];
58 AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value); 58 AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
59 /* hight dword */ 59 /* hight dword */
60 value = (((u32)hw->mac_addr[0]) << 8) | 60 value = mac_addr[0] << 8 |
61 (((u32)hw->mac_addr[1])) ; 61 mac_addr[1];
62 AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value); 62 AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
63} 63}
64 64
65/* read mac address from hardware register */
66static bool atl1c_read_current_addr(struct atl1c_hw *hw, u8 *eth_addr)
67{
68 u32 addr[2];
69
70 AT_READ_REG(hw, REG_MAC_STA_ADDR, &addr[0]);
71 AT_READ_REG(hw, REG_MAC_STA_ADDR + 4, &addr[1]);
72
73 *(u32 *) &eth_addr[2] = htonl(addr[0]);
74 *(u16 *) &eth_addr[0] = htons((u16)addr[1]);
75
76 return is_valid_ether_addr(eth_addr);
77}
78
65/* 79/*
66 * atl1c_get_permanent_address 80 * atl1c_get_permanent_address
67 * return 0 if get valid mac address, 81 * return 0 if get valid mac address,
68 */ 82 */
69static int atl1c_get_permanent_address(struct atl1c_hw *hw) 83static int atl1c_get_permanent_address(struct atl1c_hw *hw)
70{ 84{
71 u32 addr[2];
72 u32 i; 85 u32 i;
73 u32 otp_ctrl_data; 86 u32 otp_ctrl_data;
74 u32 twsi_ctrl_data; 87 u32 twsi_ctrl_data;
75 u32 ltssm_ctrl_data;
76 u32 wol_data;
77 u8 eth_addr[ETH_ALEN];
78 u16 phy_data; 88 u16 phy_data;
79 bool raise_vol = false; 89 bool raise_vol = false;
80 90
91 /* MAC-address from BIOS is the 1st priority */
92 if (atl1c_read_current_addr(hw, hw->perm_mac_addr))
93 return 0;
94
81 /* init */ 95 /* init */
82 addr[0] = addr[1] = 0;
83 AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); 96 AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
84 if (atl1c_check_eeprom_exist(hw)) { 97 if (atl1c_check_eeprom_exist(hw)) {
85 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) { 98 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
@@ -91,33 +104,17 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
91 msleep(1); 104 msleep(1);
92 } 105 }
93 } 106 }
94 107 /* raise voltage temporally for l2cb */
95 if (hw->nic_type == athr_l2c_b || 108 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) {
96 hw->nic_type == athr_l2c_b2 || 109 atl1c_read_phy_dbg(hw, MIIDBG_ANACTRL, &phy_data);
97 hw->nic_type == athr_l1d) { 110 phy_data &= ~ANACTRL_HB_EN;
98 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00); 111 atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, phy_data);
99 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data)) 112 atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data);
100 goto out; 113 phy_data |= VOLT_CTRL_SWLOWEST;
101 phy_data &= 0xFF7F; 114 atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data);
102 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
103
104 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
105 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
106 goto out;
107 phy_data |= 0x8;
108 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
109 udelay(20); 115 udelay(20);
110 raise_vol = true; 116 raise_vol = true;
111 } 117 }
112 /* close open bit of ReadOnly*/
113 AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &ltssm_ctrl_data);
114 ltssm_ctrl_data &= ~LTSSM_ID_EN_WRO;
115 AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, ltssm_ctrl_data);
116
117 /* clear any WOL settings */
118 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
119 AT_READ_REG(hw, REG_WOL_CTRL, &wol_data);
120
121 118
122 AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data); 119 AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data);
123 twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART; 120 twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART;
@@ -138,37 +135,18 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
138 msleep(1); 135 msleep(1);
139 } 136 }
140 if (raise_vol) { 137 if (raise_vol) {
141 if (hw->nic_type == athr_l2c_b || 138 atl1c_read_phy_dbg(hw, MIIDBG_ANACTRL, &phy_data);
142 hw->nic_type == athr_l2c_b2 || 139 phy_data |= ANACTRL_HB_EN;
143 hw->nic_type == athr_l1d || 140 atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, phy_data);
144 hw->nic_type == athr_l1d_2) { 141 atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data);
145 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00); 142 phy_data &= ~VOLT_CTRL_SWLOWEST;
146 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data)) 143 atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data);
147 goto out; 144 udelay(20);
148 phy_data |= 0x80;
149 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
150
151 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
152 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
153 goto out;
154 phy_data &= 0xFFF7;
155 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
156 udelay(20);
157 }
158 } 145 }
159 146
160 /* maybe MAC-address is from BIOS */ 147 if (atl1c_read_current_addr(hw, hw->perm_mac_addr))
161 AT_READ_REG(hw, REG_MAC_STA_ADDR, &addr[0]);
162 AT_READ_REG(hw, REG_MAC_STA_ADDR + 4, &addr[1]);
163 *(u32 *) &eth_addr[2] = swab32(addr[0]);
164 *(u16 *) &eth_addr[0] = swab16(*(u16 *)&addr[1]);
165
166 if (is_valid_ether_addr(eth_addr)) {
167 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
168 return 0; 148 return 0;
169 }
170 149
171out:
172 return -1; 150 return -1;
173} 151}
174 152
@@ -278,33 +256,158 @@ void atl1c_hash_set(struct atl1c_hw *hw, u32 hash_value)
278} 256}
279 257
280/* 258/*
281 * Reads the value from a PHY register 259 * wait mdio module be idle
282 * hw - Struct containing variables accessed by shared code 260 * return true: idle
283 * reg_addr - address of the PHY register to read 261 * false: still busy
284 */ 262 */
285int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data) 263bool atl1c_wait_mdio_idle(struct atl1c_hw *hw)
286{ 264{
287 u32 val; 265 u32 val;
288 int i; 266 int i;
289 267
290 val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | 268 for (i = 0; i < MDIO_MAX_AC_TO; i++) {
291 MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | 269 AT_READ_REG(hw, REG_MDIO_CTRL, &val);
292 MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; 270 if (!(val & (MDIO_CTRL_BUSY | MDIO_CTRL_START)))
271 break;
272 udelay(10);
273 }
274
275 return i != MDIO_MAX_AC_TO;
276}
277
278void atl1c_stop_phy_polling(struct atl1c_hw *hw)
279{
280 if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION))
281 return;
282
283 AT_WRITE_REG(hw, REG_MDIO_CTRL, 0);
284 atl1c_wait_mdio_idle(hw);
285}
286
287void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel)
288{
289 u32 val;
290
291 if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION))
292 return;
293 293
294 val = MDIO_CTRL_SPRES_PRMBL |
295 FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) |
296 FIELDX(MDIO_CTRL_REG, 1) |
297 MDIO_CTRL_START |
298 MDIO_CTRL_OP_READ;
299 AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
300 atl1c_wait_mdio_idle(hw);
301 val |= MDIO_CTRL_AP_EN;
302 val &= ~MDIO_CTRL_START;
294 AT_WRITE_REG(hw, REG_MDIO_CTRL, val); 303 AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
304 udelay(30);
305}
295 306
296 for (i = 0; i < MDIO_WAIT_TIMES; i++) { 307
297 udelay(2); 308/*
298 AT_READ_REG(hw, REG_MDIO_CTRL, &val); 309 * atl1c_read_phy_core
299 if (!(val & (MDIO_START | MDIO_BUSY))) 310 * core funtion to read register in PHY via MDIO control regsiter.
300 break; 311 * ext: extension register (see IEEE 802.3)
312 * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0)
313 * reg: reg to read
314 */
315int atl1c_read_phy_core(struct atl1c_hw *hw, bool ext, u8 dev,
316 u16 reg, u16 *phy_data)
317{
318 u32 val;
319 u16 clk_sel = MDIO_CTRL_CLK_25_4;
320
321 atl1c_stop_phy_polling(hw);
322
323 *phy_data = 0;
324
325 /* only l2c_b2 & l1d_2 could use slow clock */
326 if ((hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) &&
327 hw->hibernate)
328 clk_sel = MDIO_CTRL_CLK_25_128;
329 if (ext) {
330 val = FIELDX(MDIO_EXTN_DEVAD, dev) | FIELDX(MDIO_EXTN_REG, reg);
331 AT_WRITE_REG(hw, REG_MDIO_EXTN, val);
332 val = MDIO_CTRL_SPRES_PRMBL |
333 FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) |
334 MDIO_CTRL_START |
335 MDIO_CTRL_MODE_EXT |
336 MDIO_CTRL_OP_READ;
337 } else {
338 val = MDIO_CTRL_SPRES_PRMBL |
339 FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) |
340 FIELDX(MDIO_CTRL_REG, reg) |
341 MDIO_CTRL_START |
342 MDIO_CTRL_OP_READ;
301 } 343 }
302 if (!(val & (MDIO_START | MDIO_BUSY))) { 344 AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
303 *phy_data = (u16)val; 345
304 return 0; 346 if (!atl1c_wait_mdio_idle(hw))
347 return -1;
348
349 AT_READ_REG(hw, REG_MDIO_CTRL, &val);
350 *phy_data = (u16)FIELD_GETX(val, MDIO_CTRL_DATA);
351
352 atl1c_start_phy_polling(hw, clk_sel);
353
354 return 0;
355}
356
357/*
358 * atl1c_write_phy_core
359 * core funtion to write to register in PHY via MDIO control regsiter.
360 * ext: extension register (see IEEE 802.3)
361 * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0)
362 * reg: reg to write
363 */
364int atl1c_write_phy_core(struct atl1c_hw *hw, bool ext, u8 dev,
365 u16 reg, u16 phy_data)
366{
367 u32 val;
368 u16 clk_sel = MDIO_CTRL_CLK_25_4;
369
370 atl1c_stop_phy_polling(hw);
371
372
373 /* only l2c_b2 & l1d_2 could use slow clock */
374 if ((hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) &&
375 hw->hibernate)
376 clk_sel = MDIO_CTRL_CLK_25_128;
377
378 if (ext) {
379 val = FIELDX(MDIO_EXTN_DEVAD, dev) | FIELDX(MDIO_EXTN_REG, reg);
380 AT_WRITE_REG(hw, REG_MDIO_EXTN, val);
381 val = MDIO_CTRL_SPRES_PRMBL |
382 FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) |
383 FIELDX(MDIO_CTRL_DATA, phy_data) |
384 MDIO_CTRL_START |
385 MDIO_CTRL_MODE_EXT;
386 } else {
387 val = MDIO_CTRL_SPRES_PRMBL |
388 FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) |
389 FIELDX(MDIO_CTRL_DATA, phy_data) |
390 FIELDX(MDIO_CTRL_REG, reg) |
391 MDIO_CTRL_START;
305 } 392 }
393 AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
306 394
307 return -1; 395 if (!atl1c_wait_mdio_idle(hw))
396 return -1;
397
398 atl1c_start_phy_polling(hw, clk_sel);
399
400 return 0;
401}
402
403/*
404 * Reads the value from a PHY register
405 * hw - Struct containing variables accessed by shared code
406 * reg_addr - address of the PHY register to read
407 */
408int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data)
409{
410 return atl1c_read_phy_core(hw, false, 0, reg_addr, phy_data);
308} 411}
309 412
310/* 413/*
@@ -315,27 +418,47 @@ int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data)
315 */ 418 */
316int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data) 419int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data)
317{ 420{
318 int i; 421 return atl1c_write_phy_core(hw, false, 0, reg_addr, phy_data);
319 u32 val; 422}
320 423
321 val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | 424/* read from PHY extension register */
322 (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | 425int atl1c_read_phy_ext(struct atl1c_hw *hw, u8 dev_addr,
323 MDIO_SUP_PREAMBLE | MDIO_START | 426 u16 reg_addr, u16 *phy_data)
324 MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; 427{
428 return atl1c_read_phy_core(hw, true, dev_addr, reg_addr, phy_data);
429}
325 430
326 AT_WRITE_REG(hw, REG_MDIO_CTRL, val); 431/* write to PHY extension register */
432int atl1c_write_phy_ext(struct atl1c_hw *hw, u8 dev_addr,
433 u16 reg_addr, u16 phy_data)
434{
435 return atl1c_write_phy_core(hw, true, dev_addr, reg_addr, phy_data);
436}
327 437
328 for (i = 0; i < MDIO_WAIT_TIMES; i++) { 438int atl1c_read_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data)
329 udelay(2); 439{
330 AT_READ_REG(hw, REG_MDIO_CTRL, &val); 440 int err;
331 if (!(val & (MDIO_START | MDIO_BUSY)))
332 break;
333 }
334 441
335 if (!(val & (MDIO_START | MDIO_BUSY))) 442 err = atl1c_write_phy_reg(hw, MII_DBG_ADDR, reg_addr);
336 return 0; 443 if (unlikely(err))
444 return err;
445 else
446 err = atl1c_read_phy_reg(hw, MII_DBG_DATA, phy_data);
337 447
338 return -1; 448 return err;
449}
450
451int atl1c_write_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 phy_data)
452{
453 int err;
454
455 err = atl1c_write_phy_reg(hw, MII_DBG_ADDR, reg_addr);
456 if (unlikely(err))
457 return err;
458 else
459 err = atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
460
461 return err;
339} 462}
340 463
341/* 464/*
@@ -380,119 +503,100 @@ static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
380 503
381void atl1c_phy_disable(struct atl1c_hw *hw) 504void atl1c_phy_disable(struct atl1c_hw *hw)
382{ 505{
383 AT_WRITE_REGW(hw, REG_GPHY_CTRL, 506 atl1c_power_saving(hw, 0);
384 GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET);
385} 507}
386 508
387static void atl1c_phy_magic_data(struct atl1c_hw *hw)
388{
389 u16 data;
390
391 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
392 ((1 & ANA_INTERVAL_SEL_TIMER_MASK) <<
393 ANA_INTERVAL_SEL_TIMER_SHIFT);
394
395 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_18);
396 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
397
398 data = (2 & ANA_SERDES_CDR_BW_MASK) | ANA_MS_PAD_DBG |
399 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
400 ANA_SERDES_EN_LCKDT;
401
402 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_5);
403 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
404
405 data = (44 & ANA_LONG_CABLE_TH_100_MASK) |
406 ((33 & ANA_SHORT_CABLE_TH_100_MASK) <<
407 ANA_SHORT_CABLE_TH_100_SHIFT) | ANA_BP_BAD_LINK_ACCUM |
408 ANA_BP_SMALL_BW;
409
410 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_54);
411 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
412
413 data = (11 & ANA_IECHO_ADJ_MASK) | ((11 & ANA_IECHO_ADJ_MASK) <<
414 ANA_IECHO_ADJ_2_SHIFT) | ((8 & ANA_IECHO_ADJ_MASK) <<
415 ANA_IECHO_ADJ_1_SHIFT) | ((8 & ANA_IECHO_ADJ_MASK) <<
416 ANA_IECHO_ADJ_0_SHIFT);
417
418 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_4);
419 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
420
421 data = ANA_RESTART_CAL | ((7 & ANA_MANUL_SWICH_ON_MASK) <<
422 ANA_MANUL_SWICH_ON_SHIFT) | ANA_MAN_ENABLE |
423 ANA_SEL_HSP | ANA_EN_HB | ANA_OEN_125M;
424
425 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_0);
426 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
427
428 if (hw->ctrl_flags & ATL1C_HIB_DISABLE) {
429 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_41);
430 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &data) != 0)
431 return;
432 data &= ~ANA_TOP_PS_EN;
433 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
434
435 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_11);
436 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &data) != 0)
437 return;
438 data &= ~ANA_PS_HIB_EN;
439 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
440 }
441}
442 509
443int atl1c_phy_reset(struct atl1c_hw *hw) 510int atl1c_phy_reset(struct atl1c_hw *hw)
444{ 511{
445 struct atl1c_adapter *adapter = hw->adapter; 512 struct atl1c_adapter *adapter = hw->adapter;
446 struct pci_dev *pdev = adapter->pdev; 513 struct pci_dev *pdev = adapter->pdev;
447 u16 phy_data; 514 u16 phy_data;
448 u32 phy_ctrl_data = GPHY_CTRL_DEFAULT; 515 u32 phy_ctrl_data, lpi_ctrl;
449 u32 mii_ier_data = IER_LINK_UP | IER_LINK_DOWN;
450 int err; 516 int err;
451 517
452 if (hw->ctrl_flags & ATL1C_HIB_DISABLE) 518 /* reset PHY core */
453 phy_ctrl_data &= ~GPHY_CTRL_HIB_EN; 519 AT_READ_REG(hw, REG_GPHY_CTRL, &phy_ctrl_data);
454 520 phy_ctrl_data &= ~(GPHY_CTRL_EXT_RESET | GPHY_CTRL_PHY_IDDQ |
521 GPHY_CTRL_GATE_25M_EN | GPHY_CTRL_PWDOWN_HW | GPHY_CTRL_CLS);
522 phy_ctrl_data |= GPHY_CTRL_SEL_ANA_RST;
523 if (!(hw->ctrl_flags & ATL1C_HIB_DISABLE))
524 phy_ctrl_data |= (GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE);
525 else
526 phy_ctrl_data &= ~(GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE);
455 AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data); 527 AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data);
456 AT_WRITE_FLUSH(hw); 528 AT_WRITE_FLUSH(hw);
457 msleep(40); 529 udelay(10);
458 phy_ctrl_data |= GPHY_CTRL_EXT_RESET; 530 AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data | GPHY_CTRL_EXT_RESET);
459 AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data);
460 AT_WRITE_FLUSH(hw); 531 AT_WRITE_FLUSH(hw);
461 msleep(10); 532 udelay(10 * GPHY_CTRL_EXT_RST_TO); /* delay 800us */
462 533
534 /* switch clock */
463 if (hw->nic_type == athr_l2c_b) { 535 if (hw->nic_type == athr_l2c_b) {
464 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x0A); 536 atl1c_read_phy_dbg(hw, MIIDBG_CFGLPSPD, &phy_data);
465 atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data); 537 atl1c_write_phy_dbg(hw, MIIDBG_CFGLPSPD,
466 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xDFFF); 538 phy_data & ~CFGLPSPD_RSTCNT_CLK125SW);
467 } 539 }
468 540
469 if (hw->nic_type == athr_l2c_b || 541 /* tx-half amplitude issue fix */
470 hw->nic_type == athr_l2c_b2 || 542 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) {
471 hw->nic_type == athr_l1d || 543 atl1c_read_phy_dbg(hw, MIIDBG_CABLE1TH_DET, &phy_data);
472 hw->nic_type == athr_l1d_2) { 544 phy_data |= CABLE1TH_DET_EN;
473 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B); 545 atl1c_write_phy_dbg(hw, MIIDBG_CABLE1TH_DET, phy_data);
474 atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data);
475 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7);
476 msleep(20);
477 } 546 }
478 if (hw->nic_type == athr_l1d) { 547
479 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29); 548 /* clear bit3 of dbgport 3B to lower voltage */
480 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x929D); 549 if (!(hw->ctrl_flags & ATL1C_HIB_DISABLE)) {
550 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) {
551 atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data);
552 phy_data &= ~VOLT_CTRL_SWLOWEST;
553 atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data);
554 }
555 /* power saving config */
556 phy_data =
557 hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2 ?
558 L1D_LEGCYPS_DEF : L1C_LEGCYPS_DEF;
559 atl1c_write_phy_dbg(hw, MIIDBG_LEGCYPS, phy_data);
560 /* hib */
561 atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL,
562 SYSMODCTRL_IECHOADJ_DEF);
563 } else {
564 /* disable pws */
565 atl1c_read_phy_dbg(hw, MIIDBG_LEGCYPS, &phy_data);
566 atl1c_write_phy_dbg(hw, MIIDBG_LEGCYPS,
567 phy_data & ~LEGCYPS_EN);
568 /* disable hibernate */
569 atl1c_read_phy_dbg(hw, MIIDBG_HIBNEG, &phy_data);
570 atl1c_write_phy_dbg(hw, MIIDBG_HIBNEG,
571 phy_data & HIBNEG_PSHIB_EN);
481 } 572 }
482 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b2 573 /* disable AZ(EEE) by default */
483 || hw->nic_type == athr_l2c) { 574 if (hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2 ||
484 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29); 575 hw->nic_type == athr_l2c_b2) {
485 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD); 576 AT_READ_REG(hw, REG_LPI_CTRL, &lpi_ctrl);
577 AT_WRITE_REG(hw, REG_LPI_CTRL, lpi_ctrl & ~LPI_CTRL_EN);
578 atl1c_write_phy_ext(hw, MIIEXT_ANEG, MIIEXT_LOCAL_EEEADV, 0);
579 atl1c_write_phy_ext(hw, MIIEXT_PCS, MIIEXT_CLDCTRL3,
580 L2CB_CLDCTRL3);
486 } 581 }
487 err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data); 582
583 /* other debug port to set */
584 atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, ANACTRL_DEF);
585 atl1c_write_phy_dbg(hw, MIIDBG_SRDSYSMOD, SRDSYSMOD_DEF);
586 atl1c_write_phy_dbg(hw, MIIDBG_TST10BTCFG, TST10BTCFG_DEF);
587 /* UNH-IOL test issue, set bit7 */
588 atl1c_write_phy_dbg(hw, MIIDBG_TST100BTCFG,
589 TST100BTCFG_DEF | TST100BTCFG_LITCH_EN);
590
591 /* set phy interrupt mask */
592 phy_data = IER_LINK_UP | IER_LINK_DOWN;
593 err = atl1c_write_phy_reg(hw, MII_IER, phy_data);
488 if (err) { 594 if (err) {
489 if (netif_msg_hw(adapter)) 595 if (netif_msg_hw(adapter))
490 dev_err(&pdev->dev, 596 dev_err(&pdev->dev,
491 "Error enable PHY linkChange Interrupt\n"); 597 "Error enable PHY linkChange Interrupt\n");
492 return err; 598 return err;
493 } 599 }
494 if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION))
495 atl1c_phy_magic_data(hw);
496 return 0; 600 return 0;
497} 601}
498 602
@@ -589,7 +693,8 @@ int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex)
589 return 0; 693 return 0;
590} 694}
591 695
592int atl1c_phy_power_saving(struct atl1c_hw *hw) 696/* select one link mode to get lower power consumption */
697int atl1c_phy_to_ps_link(struct atl1c_hw *hw)
593{ 698{
594 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; 699 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
595 struct pci_dev *pdev = adapter->pdev; 700 struct pci_dev *pdev = adapter->pdev;
@@ -660,3 +765,101 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw)
660 765
661 return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data); 766 return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
662} 767}
768
769int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc)
770{
771 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
772 struct pci_dev *pdev = adapter->pdev;
773 u32 master_ctrl, mac_ctrl, phy_ctrl;
774 u32 wol_ctrl, speed;
775 u16 phy_data;
776
777 wol_ctrl = 0;
778 speed = adapter->link_speed == SPEED_1000 ?
779 MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100;
780
781 AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl);
782 AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl);
783 AT_READ_REG(hw, REG_GPHY_CTRL, &phy_ctrl);
784
785 master_ctrl &= ~MASTER_CTRL_CLK_SEL_DIS;
786 mac_ctrl = FIELD_SETX(mac_ctrl, MAC_CTRL_SPEED, speed);
787 mac_ctrl &= ~(MAC_CTRL_DUPLX | MAC_CTRL_RX_EN | MAC_CTRL_TX_EN);
788 if (adapter->link_duplex == FULL_DUPLEX)
789 mac_ctrl |= MAC_CTRL_DUPLX;
790 phy_ctrl &= ~(GPHY_CTRL_EXT_RESET | GPHY_CTRL_CLS);
791 phy_ctrl |= GPHY_CTRL_SEL_ANA_RST | GPHY_CTRL_HIB_PULSE |
792 GPHY_CTRL_HIB_EN;
793 if (!wufc) { /* without WoL */
794 master_ctrl |= MASTER_CTRL_CLK_SEL_DIS;
795 phy_ctrl |= GPHY_CTRL_PHY_IDDQ | GPHY_CTRL_PWDOWN_HW;
796 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl);
797 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl);
798 AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl);
799 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
800 hw->phy_configured = false; /* re-init PHY when resume */
801 return 0;
802 }
803 phy_ctrl |= GPHY_CTRL_EXT_RESET;
804 if (wufc & AT_WUFC_MAG) {
805 mac_ctrl |= MAC_CTRL_RX_EN | MAC_CTRL_BC_EN;
806 wol_ctrl |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
807 if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V11)
808 wol_ctrl |= WOL_PATTERN_EN | WOL_PATTERN_PME_EN;
809 }
810 if (wufc & AT_WUFC_LNKC) {
811 wol_ctrl |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
812 if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) {
813 dev_dbg(&pdev->dev, "%s: write phy MII_IER faild.\n",
814 atl1c_driver_name);
815 }
816 }
817 /* clear PHY interrupt */
818 atl1c_read_phy_reg(hw, MII_ISR, &phy_data);
819
820 dev_dbg(&pdev->dev, "%s: suspend MAC=%x,MASTER=%x,PHY=0x%x,WOL=%x\n",
821 atl1c_driver_name, mac_ctrl, master_ctrl, phy_ctrl, wol_ctrl);
822 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl);
823 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl);
824 AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl);
825 AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl);
826
827 return 0;
828}
829
830
831/* configure phy after Link change Event */
832void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed)
833{
834 u16 phy_val;
835 bool adj_thresh = false;
836
837 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 ||
838 hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2)
839 adj_thresh = true;
840
841 if (link_speed != SPEED_0) { /* link up */
842 /* az with brcm, half-amp */
843 if (hw->nic_type == athr_l1d_2) {
844 atl1c_read_phy_ext(hw, MIIEXT_PCS, MIIEXT_CLDCTRL6,
845 &phy_val);
846 phy_val = FIELD_GETX(phy_val, CLDCTRL6_CAB_LEN);
847 phy_val = phy_val > CLDCTRL6_CAB_LEN_SHORT ?
848 AZ_ANADECT_LONG : AZ_ANADECT_DEF;
849 atl1c_write_phy_dbg(hw, MIIDBG_AZ_ANADECT, phy_val);
850 }
851 /* threshold adjust */
852 if (adj_thresh && link_speed == SPEED_100 && hw->msi_lnkpatch) {
853 atl1c_write_phy_dbg(hw, MIIDBG_MSE16DB, L1D_MSE16DB_UP);
854 atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL,
855 L1D_SYSMODCTRL_IECHOADJ_DEF);
856 }
857 } else { /* link down */
858 if (adj_thresh && hw->msi_lnkpatch) {
859 atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL,
860 SYSMODCTRL_IECHOADJ_DEF);
861 atl1c_write_phy_dbg(hw, MIIDBG_MSE16DB,
862 L1D_MSE16DB_DOWN);
863 }
864 }
865}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
index 655fc6c4a8a4..17d935bdde0a 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
@@ -25,12 +25,18 @@
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/mii.h> 26#include <linux/mii.h>
27 27
28#define FIELD_GETX(_x, _name) ((_x) >> (_name##_SHIFT) & (_name##_MASK))
29#define FIELD_SETX(_x, _name, _v) \
30(((_x) & ~((_name##_MASK) << (_name##_SHIFT))) |\
31(((_v) & (_name##_MASK)) << (_name##_SHIFT)))
32#define FIELDX(_name, _v) (((_v) & (_name##_MASK)) << (_name##_SHIFT))
33
28struct atl1c_adapter; 34struct atl1c_adapter;
29struct atl1c_hw; 35struct atl1c_hw;
30 36
31/* function prototype */ 37/* function prototype */
32void atl1c_phy_disable(struct atl1c_hw *hw); 38void atl1c_phy_disable(struct atl1c_hw *hw);
33void atl1c_hw_set_mac_addr(struct atl1c_hw *hw); 39void atl1c_hw_set_mac_addr(struct atl1c_hw *hw, u8 *mac_addr);
34int atl1c_phy_reset(struct atl1c_hw *hw); 40int atl1c_phy_reset(struct atl1c_hw *hw);
35int atl1c_read_mac_addr(struct atl1c_hw *hw); 41int atl1c_read_mac_addr(struct atl1c_hw *hw);
36int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex); 42int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex);
@@ -42,47 +48,45 @@ bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value);
42int atl1c_phy_init(struct atl1c_hw *hw); 48int atl1c_phy_init(struct atl1c_hw *hw);
43int atl1c_check_eeprom_exist(struct atl1c_hw *hw); 49int atl1c_check_eeprom_exist(struct atl1c_hw *hw);
44int atl1c_restart_autoneg(struct atl1c_hw *hw); 50int atl1c_restart_autoneg(struct atl1c_hw *hw);
45int atl1c_phy_power_saving(struct atl1c_hw *hw); 51int atl1c_phy_to_ps_link(struct atl1c_hw *hw);
52int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc);
53bool atl1c_wait_mdio_idle(struct atl1c_hw *hw);
54void atl1c_stop_phy_polling(struct atl1c_hw *hw);
55void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel);
56int atl1c_read_phy_core(struct atl1c_hw *hw, bool ext, u8 dev,
57 u16 reg, u16 *phy_data);
58int atl1c_write_phy_core(struct atl1c_hw *hw, bool ext, u8 dev,
59 u16 reg, u16 phy_data);
60int atl1c_read_phy_ext(struct atl1c_hw *hw, u8 dev_addr,
61 u16 reg_addr, u16 *phy_data);
62int atl1c_write_phy_ext(struct atl1c_hw *hw, u8 dev_addr,
63 u16 reg_addr, u16 phy_data);
64int atl1c_read_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data);
65int atl1c_write_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 phy_data);
66void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
67
68/* hw-ids */
69#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062
70#define PCI_DEVICE_ID_ATTANSIC_L1C 0x1063
71#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */
72#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */
73#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */
74#define PCI_DEVICE_ID_ATHEROS_L1D_2_0 0x1083 /* AR8151 v2.0 Gigabit 1000 */
75#define L2CB_V10 0xc0
76#define L2CB_V11 0xc1
77
46/* register definition */ 78/* register definition */
47#define REG_DEVICE_CAP 0x5C 79#define REG_DEVICE_CAP 0x5C
48#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7 80#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7
49#define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0 81#define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0
50 82
51#define REG_DEVICE_CTRL 0x60 83#define DEVICE_CTRL_MAXRRS_MIN 2
52#define DEVICE_CTRL_MAX_PAYLOAD_MASK 0x7
53#define DEVICE_CTRL_MAX_PAYLOAD_SHIFT 5
54#define DEVICE_CTRL_MAX_RREQ_SZ_MASK 0x7
55#define DEVICE_CTRL_MAX_RREQ_SZ_SHIFT 12
56 84
57#define REG_LINK_CTRL 0x68 85#define REG_LINK_CTRL 0x68
58#define LINK_CTRL_L0S_EN 0x01 86#define LINK_CTRL_L0S_EN 0x01
59#define LINK_CTRL_L1_EN 0x02 87#define LINK_CTRL_L1_EN 0x02
60#define LINK_CTRL_EXT_SYNC 0x80 88#define LINK_CTRL_EXT_SYNC 0x80
61 89
62#define REG_VPD_CAP 0x6C
63#define VPD_CAP_ID_MASK 0xff
64#define VPD_CAP_ID_SHIFT 0
65#define VPD_CAP_NEXT_PTR_MASK 0xFF
66#define VPD_CAP_NEXT_PTR_SHIFT 8
67#define VPD_CAP_VPD_ADDR_MASK 0x7FFF
68#define VPD_CAP_VPD_ADDR_SHIFT 16
69#define VPD_CAP_VPD_FLAG 0x80000000
70
71#define REG_VPD_DATA 0x70
72
73#define REG_PCIE_UC_SEVERITY 0x10C
74#define PCIE_UC_SERVRITY_TRN 0x00000001
75#define PCIE_UC_SERVRITY_DLP 0x00000010
76#define PCIE_UC_SERVRITY_PSN_TLP 0x00001000
77#define PCIE_UC_SERVRITY_FCP 0x00002000
78#define PCIE_UC_SERVRITY_CPL_TO 0x00004000
79#define PCIE_UC_SERVRITY_CA 0x00008000
80#define PCIE_UC_SERVRITY_UC 0x00010000
81#define PCIE_UC_SERVRITY_ROV 0x00020000
82#define PCIE_UC_SERVRITY_MLFP 0x00040000
83#define PCIE_UC_SERVRITY_ECRC 0x00080000
84#define PCIE_UC_SERVRITY_UR 0x00100000
85
86#define REG_DEV_SERIALNUM_CTRL 0x200 90#define REG_DEV_SERIALNUM_CTRL 0x200
87#define REG_DEV_MAC_SEL_MASK 0x0 /* 0:EUI; 1:MAC */ 91#define REG_DEV_MAC_SEL_MASK 0x0 /* 0:EUI; 1:MAC */
88#define REG_DEV_MAC_SEL_SHIFT 0 92#define REG_DEV_MAC_SEL_SHIFT 0
@@ -90,25 +94,17 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
90#define REG_DEV_SERIAL_NUM_EN_SHIFT 1 94#define REG_DEV_SERIAL_NUM_EN_SHIFT 1
91 95
92#define REG_TWSI_CTRL 0x218 96#define REG_TWSI_CTRL 0x218
97#define TWSI_CTLR_FREQ_MASK 0x3UL
98#define TWSI_CTRL_FREQ_SHIFT 24
99#define TWSI_CTRL_FREQ_100K 0
100#define TWSI_CTRL_FREQ_200K 1
101#define TWSI_CTRL_FREQ_300K 2
102#define TWSI_CTRL_FREQ_400K 3
103#define TWSI_CTRL_LD_EXIST BIT(23)
104#define TWSI_CTRL_HW_LDSTAT BIT(12) /* 0:finish,1:in progress */
105#define TWSI_CTRL_SW_LDSTART BIT(11)
93#define TWSI_CTRL_LD_OFFSET_MASK 0xFF 106#define TWSI_CTRL_LD_OFFSET_MASK 0xFF
94#define TWSI_CTRL_LD_OFFSET_SHIFT 0 107#define TWSI_CTRL_LD_OFFSET_SHIFT 0
95#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7
96#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
97#define TWSI_CTRL_SW_LDSTART 0x800
98#define TWSI_CTRL_HW_LDSTART 0x1000
99#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F
100#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
101#define TWSI_CTRL_LD_EXIST 0x400000
102#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
103#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23
104#define TWSI_CTRL_FREQ_SEL_100K 0
105#define TWSI_CTRL_FREQ_SEL_200K 1
106#define TWSI_CTRL_FREQ_SEL_300K 2
107#define TWSI_CTRL_FREQ_SEL_400K 3
108#define TWSI_CTRL_SMB_SLV_ADDR
109#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3
110#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24
111
112 108
113#define REG_PCIE_DEV_MISC_CTRL 0x21C 109#define REG_PCIE_DEV_MISC_CTRL 0x21C
114#define PCIE_DEV_MISC_EXT_PIPE 0x2 110#define PCIE_DEV_MISC_EXT_PIPE 0x2
@@ -118,16 +114,23 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
118#define PCIE_DEV_MISC_SERDES_SEL_DIN 0x10 114#define PCIE_DEV_MISC_SERDES_SEL_DIN 0x10
119 115
120#define REG_PCIE_PHYMISC 0x1000 116#define REG_PCIE_PHYMISC 0x1000
121#define PCIE_PHYMISC_FORCE_RCV_DET 0x4 117#define PCIE_PHYMISC_FORCE_RCV_DET BIT(2)
118#define PCIE_PHYMISC_NFTS_MASK 0xFFUL
119#define PCIE_PHYMISC_NFTS_SHIFT 16
122 120
123#define REG_PCIE_PHYMISC2 0x1004 121#define REG_PCIE_PHYMISC2 0x1004
124#define PCIE_PHYMISC2_SERDES_CDR_MASK 0x3 122#define PCIE_PHYMISC2_L0S_TH_MASK 0x3UL
125#define PCIE_PHYMISC2_SERDES_CDR_SHIFT 16 123#define PCIE_PHYMISC2_L0S_TH_SHIFT 18
126#define PCIE_PHYMISC2_SERDES_TH_MASK 0x3 124#define L2CB1_PCIE_PHYMISC2_L0S_TH 3
127#define PCIE_PHYMISC2_SERDES_TH_SHIFT 18 125#define PCIE_PHYMISC2_CDR_BW_MASK 0x3UL
126#define PCIE_PHYMISC2_CDR_BW_SHIFT 16
127#define L2CB1_PCIE_PHYMISC2_CDR_BW 3
128 128
129#define REG_TWSI_DEBUG 0x1108 129#define REG_TWSI_DEBUG 0x1108
130#define TWSI_DEBUG_DEV_EXIST 0x20000000 130#define TWSI_DEBUG_DEV_EXIST BIT(29)
131
132#define REG_DMA_DBG 0x1114
133#define DMA_DBG_VENDOR_MSG BIT(0)
131 134
132#define REG_EEPROM_CTRL 0x12C0 135#define REG_EEPROM_CTRL 0x12C0
133#define EEPROM_CTRL_DATA_HI_MASK 0xFFFF 136#define EEPROM_CTRL_DATA_HI_MASK 0xFFFF
@@ -140,56 +143,81 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
140#define REG_EEPROM_DATA_LO 0x12C4 143#define REG_EEPROM_DATA_LO 0x12C4
141 144
142#define REG_OTP_CTRL 0x12F0 145#define REG_OTP_CTRL 0x12F0
143#define OTP_CTRL_CLK_EN 0x0002 146#define OTP_CTRL_CLK_EN BIT(1)
144 147
145#define REG_PM_CTRL 0x12F8 148#define REG_PM_CTRL 0x12F8
146#define PM_CTRL_SDES_EN 0x00000001 149#define PM_CTRL_HOTRST BIT(31)
147#define PM_CTRL_RBER_EN 0x00000002 150#define PM_CTRL_MAC_ASPM_CHK BIT(30) /* L0s/L1 dis by MAC based on
148#define PM_CTRL_CLK_REQ_EN 0x00000004 151 * thrghput(setting in 15A0) */
149#define PM_CTRL_ASPM_L1_EN 0x00000008 152#define PM_CTRL_SA_DLY_EN BIT(29)
150#define PM_CTRL_SERDES_L1_EN 0x00000010 153#define PM_CTRL_L0S_BUFSRX_EN BIT(28)
151#define PM_CTRL_SERDES_PLL_L1_EN 0x00000020 154#define PM_CTRL_LCKDET_TIMER_MASK 0xFUL
152#define PM_CTRL_SERDES_PD_EX_L1 0x00000040
153#define PM_CTRL_SERDES_BUDS_RX_L1_EN 0x00000080
154#define PM_CTRL_L0S_ENTRY_TIMER_MASK 0xF
155#define PM_CTRL_L0S_ENTRY_TIMER_SHIFT 8
156#define PM_CTRL_ASPM_L0S_EN 0x00001000
157#define PM_CTRL_CLK_SWH_L1 0x00002000
158#define PM_CTRL_CLK_PWM_VER1_1 0x00004000
159#define PM_CTRL_RCVR_WT_TIMER 0x00008000
160#define PM_CTRL_L1_ENTRY_TIMER_MASK 0xF
161#define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16
162#define PM_CTRL_PM_REQ_TIMER_MASK 0xF
163#define PM_CTRL_PM_REQ_TIMER_SHIFT 20
164#define PM_CTRL_LCKDET_TIMER_MASK 0xF
165#define PM_CTRL_LCKDET_TIMER_SHIFT 24 155#define PM_CTRL_LCKDET_TIMER_SHIFT 24
166#define PM_CTRL_EN_BUFS_RX_L0S 0x10000000 156#define PM_CTRL_LCKDET_TIMER_DEF 0xC
167#define PM_CTRL_SA_DLY_EN 0x20000000 157#define PM_CTRL_PM_REQ_TIMER_MASK 0xFUL
168#define PM_CTRL_MAC_ASPM_CHK 0x40000000 158#define PM_CTRL_PM_REQ_TIMER_SHIFT 20 /* pm_request_l1 time > @
169#define PM_CTRL_HOTRST 0x80000000 159 * ->L0s not L1 */
160#define PM_CTRL_PM_REQ_TO_DEF 0xF
161#define PMCTRL_TXL1_AFTER_L0S BIT(19) /* l1dv2.0+ */
162#define L1D_PMCTRL_L1_ENTRY_TM_MASK 7UL /* l1dv2.0+, 3bits */
163#define L1D_PMCTRL_L1_ENTRY_TM_SHIFT 16
164#define L1D_PMCTRL_L1_ENTRY_TM_DIS 0
165#define L1D_PMCTRL_L1_ENTRY_TM_2US 1
166#define L1D_PMCTRL_L1_ENTRY_TM_4US 2
167#define L1D_PMCTRL_L1_ENTRY_TM_8US 3
168#define L1D_PMCTRL_L1_ENTRY_TM_16US 4
169#define L1D_PMCTRL_L1_ENTRY_TM_24US 5
170#define L1D_PMCTRL_L1_ENTRY_TM_32US 6
171#define L1D_PMCTRL_L1_ENTRY_TM_63US 7
172#define PM_CTRL_L1_ENTRY_TIMER_MASK 0xFUL /* l1C 4bits */
173#define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16
174#define L2CB1_PM_CTRL_L1_ENTRY_TM 7
175#define L1C_PM_CTRL_L1_ENTRY_TM 0xF
176#define PM_CTRL_RCVR_WT_TIMER BIT(15) /* 1:1us, 0:2ms */
177#define PM_CTRL_CLK_PWM_VER1_1 BIT(14) /* 0:1.0a,1:1.1 */
178#define PM_CTRL_CLK_SWH_L1 BIT(13) /* en pcie clk sw in L1 */
179#define PM_CTRL_ASPM_L0S_EN BIT(12)
180#define PM_CTRL_RXL1_AFTER_L0S BIT(11) /* l1dv2.0+ */
181#define L1D_PMCTRL_L0S_TIMER_MASK 7UL /* l1d2.0+, 3bits*/
182#define L1D_PMCTRL_L0S_TIMER_SHIFT 8
183#define PM_CTRL_L0S_ENTRY_TIMER_MASK 0xFUL /* l1c, 4bits */
184#define PM_CTRL_L0S_ENTRY_TIMER_SHIFT 8
185#define PM_CTRL_SERDES_BUFS_RX_L1_EN BIT(7)
186#define PM_CTRL_SERDES_PD_EX_L1 BIT(6) /* power down serdes rx */
187#define PM_CTRL_SERDES_PLL_L1_EN BIT(5)
188#define PM_CTRL_SERDES_L1_EN BIT(4)
189#define PM_CTRL_ASPM_L1_EN BIT(3)
190#define PM_CTRL_CLK_REQ_EN BIT(2)
191#define PM_CTRL_RBER_EN BIT(1)
192#define PM_CTRL_SPRSDWER_EN BIT(0)
170 193
171#define REG_LTSSM_ID_CTRL 0x12FC 194#define REG_LTSSM_ID_CTRL 0x12FC
172#define LTSSM_ID_EN_WRO 0x1000 195#define LTSSM_ID_EN_WRO 0x1000
196
197
173/* Selene Master Control Register */ 198/* Selene Master Control Register */
174#define REG_MASTER_CTRL 0x1400 199#define REG_MASTER_CTRL 0x1400
175#define MASTER_CTRL_SOFT_RST 0x1 200#define MASTER_CTRL_OTP_SEL BIT(31)
176#define MASTER_CTRL_TEST_MODE_MASK 0x3 201#define MASTER_DEV_NUM_MASK 0x7FUL
177#define MASTER_CTRL_TEST_MODE_SHIFT 2 202#define MASTER_DEV_NUM_SHIFT 24
178#define MASTER_CTRL_BERT_START 0x10 203#define MASTER_REV_NUM_MASK 0xFFUL
179#define MASTER_CTRL_OOB_DIS_OFF 0x40 204#define MASTER_REV_NUM_SHIFT 16
180#define MASTER_CTRL_SA_TIMER_EN 0x80 205#define MASTER_CTRL_INT_RDCLR BIT(14)
181#define MASTER_CTRL_MTIMER_EN 0x100 206#define MASTER_CTRL_CLK_SEL_DIS BIT(12) /* 1:alwys sel pclk from
182#define MASTER_CTRL_MANUAL_INT 0x200 207 * serdes, not sw to 25M */
183#define MASTER_CTRL_TX_ITIMER_EN 0x400 208#define MASTER_CTRL_RX_ITIMER_EN BIT(11) /* IRQ MODURATION FOR RX */
184#define MASTER_CTRL_RX_ITIMER_EN 0x800 209#define MASTER_CTRL_TX_ITIMER_EN BIT(10) /* MODURATION FOR TX/RX */
185#define MASTER_CTRL_CLK_SEL_DIS 0x1000 210#define MASTER_CTRL_MANU_INT BIT(9) /* SOFT MANUAL INT */
186#define MASTER_CTRL_CLK_SWH_MODE 0x2000 211#define MASTER_CTRL_MANUTIMER_EN BIT(8)
187#define MASTER_CTRL_INT_RDCLR 0x4000 212#define MASTER_CTRL_SA_TIMER_EN BIT(7) /* SYS ALIVE TIMER EN */
188#define MASTER_CTRL_REV_NUM_SHIFT 16 213#define MASTER_CTRL_OOB_DIS BIT(6) /* OUT OF BOX DIS */
189#define MASTER_CTRL_REV_NUM_MASK 0xff 214#define MASTER_CTRL_WAKEN_25M BIT(5) /* WAKE WO. PCIE CLK */
190#define MASTER_CTRL_DEV_ID_SHIFT 24 215#define MASTER_CTRL_BERT_START BIT(4)
191#define MASTER_CTRL_DEV_ID_MASK 0x7f 216#define MASTER_PCIE_TSTMOD_MASK 3UL
192#define MASTER_CTRL_OTP_SEL 0x80000000 217#define MASTER_PCIE_TSTMOD_SHIFT 2
218#define MASTER_PCIE_RST BIT(1)
219#define MASTER_CTRL_SOFT_RST BIT(0) /* RST MAC & DMA */
220#define DMA_MAC_RST_TO 50
193 221
194/* Timer Initial Value Register */ 222/* Timer Initial Value Register */
195#define REG_MANUAL_TIMER_INIT 0x1404 223#define REG_MANUAL_TIMER_INIT 0x1404
@@ -201,87 +229,85 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
201#define IRQ_MODRT_RX_TIMER_SHIFT 16 229#define IRQ_MODRT_RX_TIMER_SHIFT 16
202 230
203#define REG_GPHY_CTRL 0x140C 231#define REG_GPHY_CTRL 0x140C
204#define GPHY_CTRL_EXT_RESET 0x1 232#define GPHY_CTRL_ADDR_MASK 0x1FUL
205#define GPHY_CTRL_RTL_MODE 0x2 233#define GPHY_CTRL_ADDR_SHIFT 19
206#define GPHY_CTRL_LED_MODE 0x4 234#define GPHY_CTRL_BP_VLTGSW BIT(18)
207#define GPHY_CTRL_ANEG_NOW 0x8 235#define GPHY_CTRL_100AB_EN BIT(17)
208#define GPHY_CTRL_REV_ANEG 0x10 236#define GPHY_CTRL_10AB_EN BIT(16)
209#define GPHY_CTRL_GATE_25M_EN 0x20 237#define GPHY_CTRL_PHY_PLL_BYPASS BIT(15)
210#define GPHY_CTRL_LPW_EXIT 0x40 238#define GPHY_CTRL_PWDOWN_HW BIT(14) /* affect MAC&PHY, to low pw */
211#define GPHY_CTRL_PHY_IDDQ 0x80 239#define GPHY_CTRL_PHY_PLL_ON BIT(13) /* 1:pll always on, 0:can sw */
212#define GPHY_CTRL_PHY_IDDQ_DIS 0x100 240#define GPHY_CTRL_SEL_ANA_RST BIT(12)
213#define GPHY_CTRL_GIGA_DIS 0x200 241#define GPHY_CTRL_HIB_PULSE BIT(11)
214#define GPHY_CTRL_HIB_EN 0x400 242#define GPHY_CTRL_HIB_EN BIT(10)
215#define GPHY_CTRL_HIB_PULSE 0x800 243#define GPHY_CTRL_GIGA_DIS BIT(9)
216#define GPHY_CTRL_SEL_ANA_RST 0x1000 244#define GPHY_CTRL_PHY_IDDQ_DIS BIT(8) /* pw on RST */
217#define GPHY_CTRL_PHY_PLL_ON 0x2000 245#define GPHY_CTRL_PHY_IDDQ BIT(7) /* bit8 affect bit7 while rb */
218#define GPHY_CTRL_PWDOWN_HW 0x4000 246#define GPHY_CTRL_LPW_EXIT BIT(6)
219#define GPHY_CTRL_PHY_PLL_BYPASS 0x8000 247#define GPHY_CTRL_GATE_25M_EN BIT(5)
220 248#define GPHY_CTRL_REV_ANEG BIT(4)
221#define GPHY_CTRL_DEFAULT ( \ 249#define GPHY_CTRL_ANEG_NOW BIT(3)
222 GPHY_CTRL_SEL_ANA_RST |\ 250#define GPHY_CTRL_LED_MODE BIT(2)
223 GPHY_CTRL_HIB_PULSE |\ 251#define GPHY_CTRL_RTL_MODE BIT(1)
224 GPHY_CTRL_HIB_EN) 252#define GPHY_CTRL_EXT_RESET BIT(0) /* 1:out of DSP RST status */
225 253#define GPHY_CTRL_EXT_RST_TO 80 /* 800us atmost */
226#define GPHY_CTRL_PW_WOL_DIS ( \ 254#define GPHY_CTRL_CLS (\
227 GPHY_CTRL_SEL_ANA_RST |\ 255 GPHY_CTRL_LED_MODE |\
228 GPHY_CTRL_HIB_PULSE |\ 256 GPHY_CTRL_100AB_EN |\
229 GPHY_CTRL_HIB_EN |\ 257 GPHY_CTRL_PHY_PLL_ON)
230 GPHY_CTRL_PWDOWN_HW |\ 258
231 GPHY_CTRL_PHY_IDDQ)
232
233#define GPHY_CTRL_POWER_SAVING ( \
234 GPHY_CTRL_SEL_ANA_RST |\
235 GPHY_CTRL_HIB_EN |\
236 GPHY_CTRL_HIB_PULSE |\
237 GPHY_CTRL_PWDOWN_HW |\
238 GPHY_CTRL_PHY_IDDQ)
239/* Block IDLE Status Register */ 259/* Block IDLE Status Register */
240#define REG_IDLE_STATUS 0x1410 260#define REG_IDLE_STATUS 0x1410
241#define IDLE_STATUS_MASK 0x00FF 261#define IDLE_STATUS_SFORCE_MASK 0xFUL
242#define IDLE_STATUS_RXMAC_NO_IDLE 0x1 262#define IDLE_STATUS_SFORCE_SHIFT 14
243#define IDLE_STATUS_TXMAC_NO_IDLE 0x2 263#define IDLE_STATUS_CALIB_DONE BIT(13)
244#define IDLE_STATUS_RXQ_NO_IDLE 0x4 264#define IDLE_STATUS_CALIB_RES_MASK 0x1FUL
245#define IDLE_STATUS_TXQ_NO_IDLE 0x8 265#define IDLE_STATUS_CALIB_RES_SHIFT 8
246#define IDLE_STATUS_DMAR_NO_IDLE 0x10 266#define IDLE_STATUS_CALIBERR_MASK 0xFUL
247#define IDLE_STATUS_DMAW_NO_IDLE 0x20 267#define IDLE_STATUS_CALIBERR_SHIFT 4
248#define IDLE_STATUS_SMB_NO_IDLE 0x40 268#define IDLE_STATUS_TXQ_BUSY BIT(3)
249#define IDLE_STATUS_CMB_NO_IDLE 0x80 269#define IDLE_STATUS_RXQ_BUSY BIT(2)
270#define IDLE_STATUS_TXMAC_BUSY BIT(1)
271#define IDLE_STATUS_RXMAC_BUSY BIT(0)
272#define IDLE_STATUS_MASK (\
273 IDLE_STATUS_TXQ_BUSY |\
274 IDLE_STATUS_RXQ_BUSY |\
275 IDLE_STATUS_TXMAC_BUSY |\
276 IDLE_STATUS_RXMAC_BUSY)
250 277
251/* MDIO Control Register */ 278/* MDIO Control Register */
252#define REG_MDIO_CTRL 0x1414 279#define REG_MDIO_CTRL 0x1414
253#define MDIO_DATA_MASK 0xffff /* On MDIO write, the 16-bit 280#define MDIO_CTRL_MODE_EXT BIT(30)
254 * control data to write to PHY 281#define MDIO_CTRL_POST_READ BIT(29)
255 * MII management register */ 282#define MDIO_CTRL_AP_EN BIT(28)
256#define MDIO_DATA_SHIFT 0 /* On MDIO read, the 16-bit 283#define MDIO_CTRL_BUSY BIT(27)
257 * status data that was read 284#define MDIO_CTRL_CLK_SEL_MASK 0x7UL
258 * from the PHY MII management register */ 285#define MDIO_CTRL_CLK_SEL_SHIFT 24
259#define MDIO_REG_ADDR_MASK 0x1f /* MDIO register address */ 286#define MDIO_CTRL_CLK_25_4 0 /* 25MHz divide 4 */
260#define MDIO_REG_ADDR_SHIFT 16 287#define MDIO_CTRL_CLK_25_6 2
261#define MDIO_RW 0x200000 /* 1: read, 0: write */ 288#define MDIO_CTRL_CLK_25_8 3
262#define MDIO_SUP_PREAMBLE 0x400000 /* Suppress preamble */ 289#define MDIO_CTRL_CLK_25_10 4
263#define MDIO_START 0x800000 /* Write 1 to initiate the MDIO 290#define MDIO_CTRL_CLK_25_32 5
264 * master. And this bit is self 291#define MDIO_CTRL_CLK_25_64 6
265 * cleared after one cycle */ 292#define MDIO_CTRL_CLK_25_128 7
266#define MDIO_CLK_SEL_SHIFT 24 293#define MDIO_CTRL_START BIT(23)
267#define MDIO_CLK_25_4 0 294#define MDIO_CTRL_SPRES_PRMBL BIT(22)
268#define MDIO_CLK_25_6 2 295#define MDIO_CTRL_OP_READ BIT(21) /* 1:read, 0:write */
269#define MDIO_CLK_25_8 3 296#define MDIO_CTRL_REG_MASK 0x1FUL
270#define MDIO_CLK_25_10 4 297#define MDIO_CTRL_REG_SHIFT 16
271#define MDIO_CLK_25_14 5 298#define MDIO_CTRL_DATA_MASK 0xFFFFUL
272#define MDIO_CLK_25_20 6 299#define MDIO_CTRL_DATA_SHIFT 0
273#define MDIO_CLK_25_28 7 300#define MDIO_MAX_AC_TO 120 /* 1.2ms timeout for slow clk */
274#define MDIO_BUSY 0x8000000 301
275#define MDIO_AP_EN 0x10000000 302/* for extension reg access */
276#define MDIO_WAIT_TIMES 10 303#define REG_MDIO_EXTN 0x1448
277 304#define MDIO_EXTN_PORTAD_MASK 0x1FUL
278/* MII PHY Status Register */ 305#define MDIO_EXTN_PORTAD_SHIFT 21
279#define REG_PHY_STATUS 0x1418 306#define MDIO_EXTN_DEVAD_MASK 0x1FUL
280#define PHY_GENERAL_STATUS_MASK 0xFFFF 307#define MDIO_EXTN_DEVAD_SHIFT 16
281#define PHY_STATUS_RECV_ENABLE 0x0001 308#define MDIO_EXTN_REG_MASK 0xFFFFUL
282#define PHY_OE_PWSP_STATUS_MASK 0x07FF 309#define MDIO_EXTN_REG_SHIFT 0
283#define PHY_OE_PWSP_STATUS_SHIFT 16 310
284#define PHY_STATUS_LPW_STATE 0x80000000
285/* BIST Control and Status Register0 (for the Packet Memory) */ 311/* BIST Control and Status Register0 (for the Packet Memory) */
286#define REG_BIST0_CTRL 0x141c 312#define REG_BIST0_CTRL 0x141c
287#define BIST0_NOW 0x1 313#define BIST0_NOW 0x1
@@ -299,50 +325,81 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
299#define BIST1_FUSE_FLAG 0x4 325#define BIST1_FUSE_FLAG 0x4
300 326
301/* SerDes Lock Detect Control and Status Register */ 327/* SerDes Lock Detect Control and Status Register */
302#define REG_SERDES_LOCK 0x1424 328#define REG_SERDES 0x1424
303#define SERDES_LOCK_DETECT 0x1 /* SerDes lock detected. This signal 329#define SERDES_PHY_CLK_SLOWDOWN BIT(18)
304 * comes from Analog SerDes */ 330#define SERDES_MAC_CLK_SLOWDOWN BIT(17)
305#define SERDES_LOCK_DETECT_EN 0x2 /* 1: Enable SerDes Lock detect function */ 331#define SERDES_SELFB_PLL_MASK 0x3UL
306#define SERDES_LOCK_STS_SELFB_PLL_SHIFT 0xE 332#define SERDES_SELFB_PLL_SHIFT 14
307#define SERDES_LOCK_STS_SELFB_PLL_MASK 0x3 333#define SERDES_PHYCLK_SEL_GTX BIT(13) /* 1:gtx_clk, 0:25M */
308#define SERDES_OVCLK_18_25 0x0 334#define SERDES_PCIECLK_SEL_SRDS BIT(12) /* 1:serdes,0:25M */
309#define SERDES_OVCLK_12_18 0x1 335#define SERDES_BUFS_RX_EN BIT(11)
310#define SERDES_OVCLK_0_4 0x2 336#define SERDES_PD_RX BIT(10)
311#define SERDES_OVCLK_4_12 0x3 337#define SERDES_PLL_EN BIT(9)
312#define SERDES_MAC_CLK_SLOWDOWN 0x20000 338#define SERDES_EN BIT(8)
313#define SERDES_PYH_CLK_SLOWDOWN 0x40000 339#define SERDES_SELFB_PLL_SEL_CSR BIT(6) /* 0:state-machine,1:csr */
340#define SERDES_SELFB_PLL_CSR_MASK 0x3UL
341#define SERDES_SELFB_PLL_CSR_SHIFT 4
342#define SERDES_SELFB_PLL_CSR_4 3 /* 4-12% OV-CLK */
343#define SERDES_SELFB_PLL_CSR_0 2 /* 0-4% OV-CLK */
344#define SERDES_SELFB_PLL_CSR_12 1 /* 12-18% OV-CLK */
345#define SERDES_SELFB_PLL_CSR_18 0 /* 18-25% OV-CLK */
346#define SERDES_VCO_SLOW BIT(3)
347#define SERDES_VCO_FAST BIT(2)
348#define SERDES_LOCK_DETECT_EN BIT(1)
349#define SERDES_LOCK_DETECT BIT(0)
350
351#define REG_LPI_DECISN_TIMER 0x143C
352#define L2CB_LPI_DESISN_TIMER 0x7D00
353
354#define REG_LPI_CTRL 0x1440
355#define LPI_CTRL_CHK_DA BIT(31)
356#define LPI_CTRL_ENH_TO_MASK 0x1FFFUL
357#define LPI_CTRL_ENH_TO_SHIFT 12
358#define LPI_CTRL_ENH_TH_MASK 0x1FUL
359#define LPI_CTRL_ENH_TH_SHIFT 6
360#define LPI_CTRL_ENH_EN BIT(5)
361#define LPI_CTRL_CHK_RX BIT(4)
362#define LPI_CTRL_CHK_STATE BIT(3)
363#define LPI_CTRL_GMII BIT(2)
364#define LPI_CTRL_TO_PHY BIT(1)
365#define LPI_CTRL_EN BIT(0)
366
367#define REG_LPI_WAIT 0x1444
368#define LPI_WAIT_TIMER_MASK 0xFFFFUL
369#define LPI_WAIT_TIMER_SHIFT 0
314 370
315/* MAC Control Register */ 371/* MAC Control Register */
316#define REG_MAC_CTRL 0x1480 372#define REG_MAC_CTRL 0x1480
317#define MAC_CTRL_TX_EN 0x1 373#define MAC_CTRL_SPEED_MODE_SW BIT(30) /* 0:phy,1:sw */
318#define MAC_CTRL_RX_EN 0x2 374#define MAC_CTRL_HASH_ALG_CRC32 BIT(29) /* 1:legacy,0:lw_5b */
319#define MAC_CTRL_TX_FLOW 0x4 375#define MAC_CTRL_SINGLE_PAUSE_EN BIT(28)
320#define MAC_CTRL_RX_FLOW 0x8 376#define MAC_CTRL_DBG BIT(27)
321#define MAC_CTRL_LOOPBACK 0x10 377#define MAC_CTRL_BC_EN BIT(26)
322#define MAC_CTRL_DUPLX 0x20 378#define MAC_CTRL_MC_ALL_EN BIT(25)
323#define MAC_CTRL_ADD_CRC 0x40 379#define MAC_CTRL_RX_CHKSUM_EN BIT(24)
324#define MAC_CTRL_PAD 0x80 380#define MAC_CTRL_TX_HUGE BIT(23)
325#define MAC_CTRL_LENCHK 0x100 381#define MAC_CTRL_DBG_TX_BKPRESURE BIT(22)
326#define MAC_CTRL_HUGE_EN 0x200 382#define MAC_CTRL_SPEED_MASK 3UL
327#define MAC_CTRL_PRMLEN_SHIFT 10 383#define MAC_CTRL_SPEED_SHIFT 20
328#define MAC_CTRL_PRMLEN_MASK 0xf 384#define MAC_CTRL_SPEED_10_100 1
329#define MAC_CTRL_RMV_VLAN 0x4000 385#define MAC_CTRL_SPEED_1000 2
330#define MAC_CTRL_PROMIS_EN 0x8000 386#define MAC_CTRL_TX_SIMURST BIT(19)
331#define MAC_CTRL_TX_PAUSE 0x10000 387#define MAC_CTRL_SCNT BIT(17)
332#define MAC_CTRL_SCNT 0x20000 388#define MAC_CTRL_TX_PAUSE BIT(16)
333#define MAC_CTRL_SRST_TX 0x40000 389#define MAC_CTRL_PROMIS_EN BIT(15)
334#define MAC_CTRL_TX_SIMURST 0x80000 390#define MAC_CTRL_RMV_VLAN BIT(14)
335#define MAC_CTRL_SPEED_SHIFT 20 391#define MAC_CTRL_PRMLEN_MASK 0xFUL
336#define MAC_CTRL_SPEED_MASK 0x3 392#define MAC_CTRL_PRMLEN_SHIFT 10
337#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000 393#define MAC_CTRL_HUGE_EN BIT(9)
338#define MAC_CTRL_TX_HUGE 0x800000 394#define MAC_CTRL_LENCHK BIT(8)
339#define MAC_CTRL_RX_CHKSUM_EN 0x1000000 395#define MAC_CTRL_PAD BIT(7)
340#define MAC_CTRL_MC_ALL_EN 0x2000000 396#define MAC_CTRL_ADD_CRC BIT(6)
341#define MAC_CTRL_BC_EN 0x4000000 397#define MAC_CTRL_DUPLX BIT(5)
342#define MAC_CTRL_DBG 0x8000000 398#define MAC_CTRL_LOOPBACK BIT(4)
343#define MAC_CTRL_SINGLE_PAUSE_EN 0x10000000 399#define MAC_CTRL_RX_FLOW BIT(3)
344#define MAC_CTRL_HASH_ALG_CRC32 0x20000000 400#define MAC_CTRL_TX_FLOW BIT(2)
345#define MAC_CTRL_SPEED_MODE_SW 0x40000000 401#define MAC_CTRL_RX_EN BIT(1)
402#define MAC_CTRL_TX_EN BIT(0)
346 403
347/* MAC IPG/IFG Control Register */ 404/* MAC IPG/IFG Control Register */
348#define REG_MAC_IPG_IFG 0x1484 405#define REG_MAC_IPG_IFG 0x1484
@@ -386,34 +443,53 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
386 443
387/* Wake-On-Lan control register */ 444/* Wake-On-Lan control register */
388#define REG_WOL_CTRL 0x14a0 445#define REG_WOL_CTRL 0x14a0
389#define WOL_PATTERN_EN 0x00000001 446#define WOL_PT7_MATCH BIT(31)
390#define WOL_PATTERN_PME_EN 0x00000002 447#define WOL_PT6_MATCH BIT(30)
391#define WOL_MAGIC_EN 0x00000004 448#define WOL_PT5_MATCH BIT(29)
392#define WOL_MAGIC_PME_EN 0x00000008 449#define WOL_PT4_MATCH BIT(28)
393#define WOL_LINK_CHG_EN 0x00000010 450#define WOL_PT3_MATCH BIT(27)
394#define WOL_LINK_CHG_PME_EN 0x00000020 451#define WOL_PT2_MATCH BIT(26)
395#define WOL_PATTERN_ST 0x00000100 452#define WOL_PT1_MATCH BIT(25)
396#define WOL_MAGIC_ST 0x00000200 453#define WOL_PT0_MATCH BIT(24)
397#define WOL_LINKCHG_ST 0x00000400 454#define WOL_PT7_EN BIT(23)
398#define WOL_CLK_SWITCH_EN 0x00008000 455#define WOL_PT6_EN BIT(22)
399#define WOL_PT0_EN 0x00010000 456#define WOL_PT5_EN BIT(21)
400#define WOL_PT1_EN 0x00020000 457#define WOL_PT4_EN BIT(20)
401#define WOL_PT2_EN 0x00040000 458#define WOL_PT3_EN BIT(19)
402#define WOL_PT3_EN 0x00080000 459#define WOL_PT2_EN BIT(18)
403#define WOL_PT4_EN 0x00100000 460#define WOL_PT1_EN BIT(17)
404#define WOL_PT5_EN 0x00200000 461#define WOL_PT0_EN BIT(16)
405#define WOL_PT6_EN 0x00400000 462#define WOL_LNKCHG_ST BIT(10)
463#define WOL_MAGIC_ST BIT(9)
464#define WOL_PATTERN_ST BIT(8)
465#define WOL_OOB_EN BIT(6)
466#define WOL_LINK_CHG_PME_EN BIT(5)
467#define WOL_LINK_CHG_EN BIT(4)
468#define WOL_MAGIC_PME_EN BIT(3)
469#define WOL_MAGIC_EN BIT(2)
470#define WOL_PATTERN_PME_EN BIT(1)
471#define WOL_PATTERN_EN BIT(0)
406 472
407/* WOL Length ( 2 DWORD ) */ 473/* WOL Length ( 2 DWORD ) */
408#define REG_WOL_PATTERN_LEN 0x14a4 474#define REG_WOL_PTLEN1 0x14A4
409#define WOL_PT_LEN_MASK 0x7f 475#define WOL_PTLEN1_3_MASK 0xFFUL
410#define WOL_PT0_LEN_SHIFT 0 476#define WOL_PTLEN1_3_SHIFT 24
411#define WOL_PT1_LEN_SHIFT 8 477#define WOL_PTLEN1_2_MASK 0xFFUL
412#define WOL_PT2_LEN_SHIFT 16 478#define WOL_PTLEN1_2_SHIFT 16
413#define WOL_PT3_LEN_SHIFT 24 479#define WOL_PTLEN1_1_MASK 0xFFUL
414#define WOL_PT4_LEN_SHIFT 0 480#define WOL_PTLEN1_1_SHIFT 8
415#define WOL_PT5_LEN_SHIFT 8 481#define WOL_PTLEN1_0_MASK 0xFFUL
416#define WOL_PT6_LEN_SHIFT 16 482#define WOL_PTLEN1_0_SHIFT 0
483
484#define REG_WOL_PTLEN2 0x14A8
485#define WOL_PTLEN2_7_MASK 0xFFUL
486#define WOL_PTLEN2_7_SHIFT 24
487#define WOL_PTLEN2_6_MASK 0xFFUL
488#define WOL_PTLEN2_6_SHIFT 16
489#define WOL_PTLEN2_5_MASK 0xFFUL
490#define WOL_PTLEN2_5_SHIFT 8
491#define WOL_PTLEN2_4_MASK 0xFFUL
492#define WOL_PTLEN2_4_SHIFT 0
417 493
418/* Internal SRAM Partition Register */ 494/* Internal SRAM Partition Register */
419#define RFDX_HEAD_ADDR_MASK 0x03FF 495#define RFDX_HEAD_ADDR_MASK 0x03FF
@@ -458,66 +534,50 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
458 */ 534 */
459#define REG_RX_BASE_ADDR_HI 0x1540 535#define REG_RX_BASE_ADDR_HI 0x1540
460#define REG_TX_BASE_ADDR_HI 0x1544 536#define REG_TX_BASE_ADDR_HI 0x1544
461#define REG_SMB_BASE_ADDR_HI 0x1548
462#define REG_SMB_BASE_ADDR_LO 0x154C
463#define REG_RFD0_HEAD_ADDR_LO 0x1550 537#define REG_RFD0_HEAD_ADDR_LO 0x1550
464#define REG_RFD1_HEAD_ADDR_LO 0x1554
465#define REG_RFD2_HEAD_ADDR_LO 0x1558
466#define REG_RFD3_HEAD_ADDR_LO 0x155C
467#define REG_RFD_RING_SIZE 0x1560 538#define REG_RFD_RING_SIZE 0x1560
468#define RFD_RING_SIZE_MASK 0x0FFF 539#define RFD_RING_SIZE_MASK 0x0FFF
469#define REG_RX_BUF_SIZE 0x1564 540#define REG_RX_BUF_SIZE 0x1564
470#define RX_BUF_SIZE_MASK 0xFFFF 541#define RX_BUF_SIZE_MASK 0xFFFF
471#define REG_RRD0_HEAD_ADDR_LO 0x1568 542#define REG_RRD0_HEAD_ADDR_LO 0x1568
472#define REG_RRD1_HEAD_ADDR_LO 0x156C
473#define REG_RRD2_HEAD_ADDR_LO 0x1570
474#define REG_RRD3_HEAD_ADDR_LO 0x1574
475#define REG_RRD_RING_SIZE 0x1578 543#define REG_RRD_RING_SIZE 0x1578
476#define RRD_RING_SIZE_MASK 0x0FFF 544#define RRD_RING_SIZE_MASK 0x0FFF
477#define REG_HTPD_HEAD_ADDR_LO 0x157C 545#define REG_TPD_PRI1_ADDR_LO 0x157C
478#define REG_NTPD_HEAD_ADDR_LO 0x1580 546#define REG_TPD_PRI0_ADDR_LO 0x1580
479#define REG_TPD_RING_SIZE 0x1584 547#define REG_TPD_RING_SIZE 0x1584
480#define TPD_RING_SIZE_MASK 0xFFFF 548#define TPD_RING_SIZE_MASK 0xFFFF
481#define REG_CMB_BASE_ADDR_LO 0x1588
482
483/* RSS about */
484#define REG_RSS_KEY0 0x14B0
485#define REG_RSS_KEY1 0x14B4
486#define REG_RSS_KEY2 0x14B8
487#define REG_RSS_KEY3 0x14BC
488#define REG_RSS_KEY4 0x14C0
489#define REG_RSS_KEY5 0x14C4
490#define REG_RSS_KEY6 0x14C8
491#define REG_RSS_KEY7 0x14CC
492#define REG_RSS_KEY8 0x14D0
493#define REG_RSS_KEY9 0x14D4
494#define REG_IDT_TABLE0 0x14E0
495#define REG_IDT_TABLE1 0x14E4
496#define REG_IDT_TABLE2 0x14E8
497#define REG_IDT_TABLE3 0x14EC
498#define REG_IDT_TABLE4 0x14F0
499#define REG_IDT_TABLE5 0x14F4
500#define REG_IDT_TABLE6 0x14F8
501#define REG_IDT_TABLE7 0x14FC
502#define REG_IDT_TABLE REG_IDT_TABLE0
503#define REG_RSS_HASH_VALUE 0x15B0
504#define REG_RSS_HASH_FLAG 0x15B4
505#define REG_BASE_CPU_NUMBER 0x15B8
506 549
507/* TXQ Control Register */ 550/* TXQ Control Register */
508#define REG_TXQ_CTRL 0x1590 551#define REG_TXQ_CTRL 0x1590
509#define TXQ_NUM_TPD_BURST_MASK 0xF 552#define TXQ_TXF_BURST_NUM_MASK 0xFFFFUL
510#define TXQ_NUM_TPD_BURST_SHIFT 0 553#define TXQ_TXF_BURST_NUM_SHIFT 16
511#define TXQ_CTRL_IP_OPTION_EN 0x10 554#define L1C_TXQ_TXF_BURST_PREF 0x200
512#define TXQ_CTRL_EN 0x20 555#define L2CB_TXQ_TXF_BURST_PREF 0x40
513#define TXQ_CTRL_ENH_MODE 0x40 556#define TXQ_CTRL_PEDING_CLR BIT(8)
514#define TXQ_CTRL_LS_8023_EN 0x80 557#define TXQ_CTRL_LS_8023_EN BIT(7)
515#define TXQ_TXF_BURST_NUM_SHIFT 16 558#define TXQ_CTRL_ENH_MODE BIT(6)
516#define TXQ_TXF_BURST_NUM_MASK 0xFFFF 559#define TXQ_CTRL_EN BIT(5)
560#define TXQ_CTRL_IP_OPTION_EN BIT(4)
561#define TXQ_NUM_TPD_BURST_MASK 0xFUL
562#define TXQ_NUM_TPD_BURST_SHIFT 0
563#define TXQ_NUM_TPD_BURST_DEF 5
564#define TXQ_CFGV (\
565 FIELDX(TXQ_NUM_TPD_BURST, TXQ_NUM_TPD_BURST_DEF) |\
566 TXQ_CTRL_ENH_MODE |\
567 TXQ_CTRL_LS_8023_EN |\
568 TXQ_CTRL_IP_OPTION_EN)
569#define L1C_TXQ_CFGV (\
570 TXQ_CFGV |\
571 FIELDX(TXQ_TXF_BURST_NUM, L1C_TXQ_TXF_BURST_PREF))
572#define L2CB_TXQ_CFGV (\
573 TXQ_CFGV |\
574 FIELDX(TXQ_TXF_BURST_NUM, L2CB_TXQ_TXF_BURST_PREF))
575
517 576
518/* Jumbo packet Threshold for task offload */ 577/* Jumbo packet Threshold for task offload */
519#define REG_TX_TSO_OFFLOAD_THRESH 0x1594 /* In 8-bytes */ 578#define REG_TX_TSO_OFFLOAD_THRESH 0x1594 /* In 8-bytes */
520#define TX_TSO_OFFLOAD_THRESH_MASK 0x07FF 579#define TX_TSO_OFFLOAD_THRESH_MASK 0x07FF
580#define MAX_TSO_FRAME_SIZE (7*1024)
521 581
522#define REG_TXF_WATER_MARK 0x1598 /* In 8-bytes */ 582#define REG_TXF_WATER_MARK 0x1598 /* In 8-bytes */
523#define TXF_WATER_MARK_MASK 0x0FFF 583#define TXF_WATER_MARK_MASK 0x0FFF
@@ -537,26 +597,21 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
537#define ASPM_THRUPUT_LIMIT_NO 0x00 597#define ASPM_THRUPUT_LIMIT_NO 0x00
538#define ASPM_THRUPUT_LIMIT_1M 0x01 598#define ASPM_THRUPUT_LIMIT_1M 0x01
539#define ASPM_THRUPUT_LIMIT_10M 0x02 599#define ASPM_THRUPUT_LIMIT_10M 0x02
540#define ASPM_THRUPUT_LIMIT_100M 0x04 600#define ASPM_THRUPUT_LIMIT_100M 0x03
541#define RXQ1_CTRL_EN 0x10 601#define IPV6_CHKSUM_CTRL_EN BIT(7)
542#define RXQ2_CTRL_EN 0x20
543#define RXQ3_CTRL_EN 0x40
544#define IPV6_CHKSUM_CTRL_EN 0x80
545#define RSS_HASH_BITS_MASK 0x00FF
546#define RSS_HASH_BITS_SHIFT 8
547#define RSS_HASH_IPV4 0x10000
548#define RSS_HASH_IPV4_TCP 0x20000
549#define RSS_HASH_IPV6 0x40000
550#define RSS_HASH_IPV6_TCP 0x80000
551#define RXQ_RFD_BURST_NUM_MASK 0x003F 602#define RXQ_RFD_BURST_NUM_MASK 0x003F
552#define RXQ_RFD_BURST_NUM_SHIFT 20 603#define RXQ_RFD_BURST_NUM_SHIFT 20
553#define RSS_MODE_MASK 0x0003 604#define RXQ_NUM_RFD_PREF_DEF 8
605#define RSS_MODE_MASK 3UL
554#define RSS_MODE_SHIFT 26 606#define RSS_MODE_SHIFT 26
555#define RSS_NIP_QUEUE_SEL_MASK 0x1 607#define RSS_MODE_DIS 0
556#define RSS_NIP_QUEUE_SEL_SHIFT 28 608#define RSS_MODE_SQSI 1
557#define RRS_HASH_CTRL_EN 0x20000000 609#define RSS_MODE_MQSI 2
558#define RX_CUT_THRU_EN 0x40000000 610#define RSS_MODE_MQMI 3
559#define RXQ_CTRL_EN 0x80000000 611#define RSS_NIP_QUEUE_SEL BIT(28) /* 0:q0, 1:table */
612#define RRS_HASH_CTRL_EN BIT(29)
613#define RX_CUT_THRU_EN BIT(30)
614#define RXQ_CTRL_EN BIT(31)
560 615
561#define REG_RFD_FREE_THRESH 0x15A4 616#define REG_RFD_FREE_THRESH 0x15A4
562#define RFD_FREE_THRESH_MASK 0x003F 617#define RFD_FREE_THRESH_MASK 0x003F
@@ -577,57 +632,45 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
577#define RXD_DMA_DOWN_TIMER_SHIFT 16 632#define RXD_DMA_DOWN_TIMER_SHIFT 16
578 633
579/* DMA Engine Control Register */ 634/* DMA Engine Control Register */
580#define REG_DMA_CTRL 0x15C0 635#define REG_DMA_CTRL 0x15C0
581#define DMA_CTRL_DMAR_IN_ORDER 0x1 636#define DMA_CTRL_SMB_NOW BIT(31)
582#define DMA_CTRL_DMAR_ENH_ORDER 0x2 637#define DMA_CTRL_WPEND_CLR BIT(30)
583#define DMA_CTRL_DMAR_OUT_ORDER 0x4 638#define DMA_CTRL_RPEND_CLR BIT(29)
584#define DMA_CTRL_RCB_VALUE 0x8 639#define DMA_CTRL_WDLY_CNT_MASK 0xFUL
585#define DMA_CTRL_DMAR_BURST_LEN_MASK 0x0007 640#define DMA_CTRL_WDLY_CNT_SHIFT 16
586#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4 641#define DMA_CTRL_WDLY_CNT_DEF 4
587#define DMA_CTRL_DMAW_BURST_LEN_MASK 0x0007 642#define DMA_CTRL_RDLY_CNT_MASK 0x1FUL
588#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7 643#define DMA_CTRL_RDLY_CNT_SHIFT 11
589#define DMA_CTRL_DMAR_REQ_PRI 0x400 644#define DMA_CTRL_RDLY_CNT_DEF 15
590#define DMA_CTRL_DMAR_DLY_CNT_MASK 0x001F 645#define DMA_CTRL_RREQ_PRI_DATA BIT(10) /* 0:tpd, 1:data */
591#define DMA_CTRL_DMAR_DLY_CNT_SHIFT 11 646#define DMA_CTRL_WREQ_BLEN_MASK 7UL
592#define DMA_CTRL_DMAW_DLY_CNT_MASK 0x000F 647#define DMA_CTRL_WREQ_BLEN_SHIFT 7
593#define DMA_CTRL_DMAW_DLY_CNT_SHIFT 16 648#define DMA_CTRL_RREQ_BLEN_MASK 7UL
594#define DMA_CTRL_CMB_EN 0x100000 649#define DMA_CTRL_RREQ_BLEN_SHIFT 4
595#define DMA_CTRL_SMB_EN 0x200000 650#define L1C_CTRL_DMA_RCB_LEN128 BIT(3) /* 0:64bytes,1:128bytes */
596#define DMA_CTRL_CMB_NOW 0x400000 651#define DMA_CTRL_RORDER_MODE_MASK 7UL
597#define MAC_CTRL_SMB_DIS 0x1000000 652#define DMA_CTRL_RORDER_MODE_SHIFT 0
598#define DMA_CTRL_SMB_NOW 0x80000000 653#define DMA_CTRL_RORDER_MODE_OUT 4
599 654#define DMA_CTRL_RORDER_MODE_ENHANCE 2
600/* CMB/SMB Control Register */ 655#define DMA_CTRL_RORDER_MODE_IN 1
656
657/* INT-triggle/SMB Control Register */
601#define REG_SMB_STAT_TIMER 0x15C4 /* 2us resolution */ 658#define REG_SMB_STAT_TIMER 0x15C4 /* 2us resolution */
602#define SMB_STAT_TIMER_MASK 0xFFFFFF 659#define SMB_STAT_TIMER_MASK 0xFFFFFF
603#define REG_CMB_TPD_THRESH 0x15C8 660#define REG_TINT_TPD_THRESH 0x15C8 /* tpd th to trig intrrupt */
604#define CMB_TPD_THRESH_MASK 0xFFFF
605#define REG_CMB_TX_TIMER 0x15CC /* 2us resolution */
606#define CMB_TX_TIMER_MASK 0xFFFF
607 661
608/* Mail box */ 662/* Mail box */
609#define MB_RFDX_PROD_IDX_MASK 0xFFFF 663#define MB_RFDX_PROD_IDX_MASK 0xFFFF
610#define REG_MB_RFD0_PROD_IDX 0x15E0 664#define REG_MB_RFD0_PROD_IDX 0x15E0
611#define REG_MB_RFD1_PROD_IDX 0x15E4
612#define REG_MB_RFD2_PROD_IDX 0x15E8
613#define REG_MB_RFD3_PROD_IDX 0x15EC
614 665
615#define MB_PRIO_PROD_IDX_MASK 0xFFFF 666#define REG_TPD_PRI1_PIDX 0x15F0 /* 16bit,hi-tpd producer idx */
616#define REG_MB_PRIO_PROD_IDX 0x15F0 667#define REG_TPD_PRI0_PIDX 0x15F2 /* 16bit,lo-tpd producer idx */
617#define MB_HTPD_PROD_IDX_SHIFT 0 668#define REG_TPD_PRI1_CIDX 0x15F4 /* 16bit,hi-tpd consumer idx */
618#define MB_NTPD_PROD_IDX_SHIFT 16 669#define REG_TPD_PRI0_CIDX 0x15F6 /* 16bit,lo-tpd consumer idx */
619
620#define MB_PRIO_CONS_IDX_MASK 0xFFFF
621#define REG_MB_PRIO_CONS_IDX 0x15F4
622#define MB_HTPD_CONS_IDX_SHIFT 0
623#define MB_NTPD_CONS_IDX_SHIFT 16
624 670
625#define REG_MB_RFD01_CONS_IDX 0x15F8 671#define REG_MB_RFD01_CONS_IDX 0x15F8
626#define MB_RFD0_CONS_IDX_MASK 0x0000FFFF 672#define MB_RFD0_CONS_IDX_MASK 0x0000FFFF
627#define MB_RFD1_CONS_IDX_MASK 0xFFFF0000 673#define MB_RFD1_CONS_IDX_MASK 0xFFFF0000
628#define REG_MB_RFD23_CONS_IDX 0x15FC
629#define MB_RFD2_CONS_IDX_MASK 0x0000FFFF
630#define MB_RFD3_CONS_IDX_MASK 0xFFFF0000
631 674
632/* Interrupt Status Register */ 675/* Interrupt Status Register */
633#define REG_ISR 0x1600 676#define REG_ISR 0x1600
@@ -705,13 +748,6 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
705#define REG_INT_RETRIG_TIMER 0x1608 748#define REG_INT_RETRIG_TIMER 0x1608
706#define INT_RETRIG_TIMER_MASK 0xFFFF 749#define INT_RETRIG_TIMER_MASK 0xFFFF
707 750
708#define REG_HDS_CTRL 0x160C
709#define HDS_CTRL_EN 0x0001
710#define HDS_CTRL_BACKFILLSIZE_SHIFT 8
711#define HDS_CTRL_BACKFILLSIZE_MASK 0x0FFF
712#define HDS_CTRL_MAX_HDRSIZE_SHIFT 20
713#define HDS_CTRL_MAC_HDRSIZE_MASK 0x0FFF
714
715#define REG_MAC_RX_STATUS_BIN 0x1700 751#define REG_MAC_RX_STATUS_BIN 0x1700
716#define REG_MAC_RX_STATUS_END 0x175c 752#define REG_MAC_RX_STATUS_END 0x175c
717#define REG_MAC_TX_STATUS_BIN 0x1760 753#define REG_MAC_TX_STATUS_BIN 0x1760
@@ -796,73 +832,188 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
796#define MII_DBG_ADDR 0x1D 832#define MII_DBG_ADDR 0x1D
797#define MII_DBG_DATA 0x1E 833#define MII_DBG_DATA 0x1E
798 834
799#define MII_ANA_CTRL_0 0x0 835/***************************** debug port *************************************/
800#define ANA_RESTART_CAL 0x0001 836
801#define ANA_MANUL_SWICH_ON_SHIFT 0x1 837#define MIIDBG_ANACTRL 0x00
802#define ANA_MANUL_SWICH_ON_MASK 0xF 838#define ANACTRL_CLK125M_DELAY_EN 0x8000
803#define ANA_MAN_ENABLE 0x0020 839#define ANACTRL_VCO_FAST 0x4000
804#define ANA_SEL_HSP 0x0040 840#define ANACTRL_VCO_SLOW 0x2000
805#define ANA_EN_HB 0x0080 841#define ANACTRL_AFE_MODE_EN 0x1000
806#define ANA_EN_HBIAS 0x0100 842#define ANACTRL_LCKDET_PHY 0x800
807#define ANA_OEN_125M 0x0200 843#define ANACTRL_LCKDET_EN 0x400
808#define ANA_EN_LCKDT 0x0400 844#define ANACTRL_OEN_125M 0x200
809#define ANA_LCKDT_PHY 0x0800 845#define ANACTRL_HBIAS_EN 0x100
810#define ANA_AFE_MODE 0x1000 846#define ANACTRL_HB_EN 0x80
811#define ANA_VCO_SLOW 0x2000 847#define ANACTRL_SEL_HSP 0x40
812#define ANA_VCO_FAST 0x4000 848#define ANACTRL_CLASSA_EN 0x20
813#define ANA_SEL_CLK125M_DSP 0x8000 849#define ANACTRL_MANUSWON_SWR_MASK 3U
814 850#define ANACTRL_MANUSWON_SWR_SHIFT 2
815#define MII_ANA_CTRL_4 0x4 851#define ANACTRL_MANUSWON_SWR_2V 0
816#define ANA_IECHO_ADJ_MASK 0xF 852#define ANACTRL_MANUSWON_SWR_1P9V 1
817#define ANA_IECHO_ADJ_3_SHIFT 0 853#define ANACTRL_MANUSWON_SWR_1P8V 2
818#define ANA_IECHO_ADJ_2_SHIFT 4 854#define ANACTRL_MANUSWON_SWR_1P7V 3
819#define ANA_IECHO_ADJ_1_SHIFT 8 855#define ANACTRL_MANUSWON_BW3_4M 0x2
820#define ANA_IECHO_ADJ_0_SHIFT 12 856#define ANACTRL_RESTART_CAL 0x1
821 857#define ANACTRL_DEF 0x02EF
822#define MII_ANA_CTRL_5 0x5 858
823#define ANA_SERDES_CDR_BW_SHIFT 0 859#define MIIDBG_SYSMODCTRL 0x04
824#define ANA_SERDES_CDR_BW_MASK 0x3 860#define SYSMODCTRL_IECHOADJ_PFMH_PHY 0x8000
825#define ANA_MS_PAD_DBG 0x0004 861#define SYSMODCTRL_IECHOADJ_BIASGEN 0x4000
826#define ANA_SPEEDUP_DBG 0x0008 862#define SYSMODCTRL_IECHOADJ_PFML_PHY 0x2000
827#define ANA_SERDES_TH_LOS_SHIFT 4 863#define SYSMODCTRL_IECHOADJ_PS_MASK 3U
828#define ANA_SERDES_TH_LOS_MASK 0x3 864#define SYSMODCTRL_IECHOADJ_PS_SHIFT 10
829#define ANA_SERDES_EN_DEEM 0x0040 865#define SYSMODCTRL_IECHOADJ_PS_40 3
830#define ANA_SERDES_TXELECIDLE 0x0080 866#define SYSMODCTRL_IECHOADJ_PS_20 2
831#define ANA_SERDES_BEACON 0x0100 867#define SYSMODCTRL_IECHOADJ_PS_0 1
832#define ANA_SERDES_HALFTXDR 0x0200 868#define SYSMODCTRL_IECHOADJ_10BT_100MV 0x40 /* 1:100mv, 0:200mv */
833#define ANA_SERDES_SEL_HSP 0x0400 869#define SYSMODCTRL_IECHOADJ_HLFAP_MASK 3U
834#define ANA_SERDES_EN_PLL 0x0800 870#define SYSMODCTRL_IECHOADJ_HLFAP_SHIFT 4
835#define ANA_SERDES_EN 0x1000 871#define SYSMODCTRL_IECHOADJ_VDFULBW 0x8
836#define ANA_SERDES_EN_LCKDT 0x2000 872#define SYSMODCTRL_IECHOADJ_VDBIASHLF 0x4
837 873#define SYSMODCTRL_IECHOADJ_VDAMPHLF 0x2
838#define MII_ANA_CTRL_11 0xB 874#define SYSMODCTRL_IECHOADJ_VDLANSW 0x1
839#define ANA_PS_HIB_EN 0x8000 875#define SYSMODCTRL_IECHOADJ_DEF 0x88BB /* ???? */
840 876
841#define MII_ANA_CTRL_18 0x12 877/* for l1d & l2cb */
842#define ANA_TEST_MODE_10BT_01SHIFT 0 878#define SYSMODCTRL_IECHOADJ_CUR_ADD 0x8000
843#define ANA_TEST_MODE_10BT_01MASK 0x3 879#define SYSMODCTRL_IECHOADJ_CUR_MASK 7U
844#define ANA_LOOP_SEL_10BT 0x0004 880#define SYSMODCTRL_IECHOADJ_CUR_SHIFT 12
845#define ANA_RGMII_MODE_SW 0x0008 881#define SYSMODCTRL_IECHOADJ_VOL_MASK 0xFU
846#define ANA_EN_LONGECABLE 0x0010 882#define SYSMODCTRL_IECHOADJ_VOL_SHIFT 8
847#define ANA_TEST_MODE_10BT_2 0x0020 883#define SYSMODCTRL_IECHOADJ_VOL_17ALL 3
848#define ANA_EN_10BT_IDLE 0x0400 884#define SYSMODCTRL_IECHOADJ_VOL_100M15 1
849#define ANA_EN_MASK_TB 0x0800 885#define SYSMODCTRL_IECHOADJ_VOL_10M17 0
850#define ANA_TRIGGER_SEL_TIMER_SHIFT 12 886#define SYSMODCTRL_IECHOADJ_BIAS1_MASK 0xFU
851#define ANA_TRIGGER_SEL_TIMER_MASK 0x3 887#define SYSMODCTRL_IECHOADJ_BIAS1_SHIFT 4
852#define ANA_INTERVAL_SEL_TIMER_SHIFT 14 888#define SYSMODCTRL_IECHOADJ_BIAS2_MASK 0xFU
853#define ANA_INTERVAL_SEL_TIMER_MASK 0x3 889#define SYSMODCTRL_IECHOADJ_BIAS2_SHIFT 0
854 890#define L1D_SYSMODCTRL_IECHOADJ_DEF 0x4FBB
855#define MII_ANA_CTRL_41 0x29 891
856#define ANA_TOP_PS_EN 0x8000 892#define MIIDBG_SRDSYSMOD 0x05
857 893#define SRDSYSMOD_LCKDET_EN 0x2000
858#define MII_ANA_CTRL_54 0x36 894#define SRDSYSMOD_PLL_EN 0x800
859#define ANA_LONG_CABLE_TH_100_SHIFT 0 895#define SRDSYSMOD_SEL_HSP 0x400
860#define ANA_LONG_CABLE_TH_100_MASK 0x3F 896#define SRDSYSMOD_HLFTXDR 0x200
861#define ANA_DESERVED 0x0040 897#define SRDSYSMOD_TXCLK_DELAY_EN 0x100
862#define ANA_EN_LIT_CH 0x0080 898#define SRDSYSMOD_TXELECIDLE 0x80
863#define ANA_SHORT_CABLE_TH_100_SHIFT 8 899#define SRDSYSMOD_DEEMP_EN 0x40
864#define ANA_SHORT_CABLE_TH_100_MASK 0x3F 900#define SRDSYSMOD_MS_PAD 0x4
865#define ANA_BP_BAD_LINK_ACCUM 0x4000 901#define SRDSYSMOD_CDR_ADC_VLTG 0x2
866#define ANA_BP_SMALL_BW 0x8000 902#define SRDSYSMOD_CDR_DAC_1MA 0x1
903#define SRDSYSMOD_DEF 0x2C46
904
905#define MIIDBG_CFGLPSPD 0x0A
906#define CFGLPSPD_RSTCNT_MASK 3U
907#define CFGLPSPD_RSTCNT_SHIFT 14
908#define CFGLPSPD_RSTCNT_CLK125SW 0x2000
909
910#define MIIDBG_HIBNEG 0x0B
911#define HIBNEG_PSHIB_EN 0x8000
912#define HIBNEG_WAKE_BOTH 0x4000
913#define HIBNEG_ONOFF_ANACHG_SUDEN 0x2000
914#define HIBNEG_HIB_PULSE 0x1000
915#define HIBNEG_GATE_25M_EN 0x800
916#define HIBNEG_RST_80U 0x400
917#define HIBNEG_RST_TIMER_MASK 3U
918#define HIBNEG_RST_TIMER_SHIFT 8
919#define HIBNEG_GTX_CLK_DELAY_MASK 3U
920#define HIBNEG_GTX_CLK_DELAY_SHIFT 5
921#define HIBNEG_BYPSS_BRKTIMER 0x10
922#define HIBNEG_DEF 0xBC40
923
924#define MIIDBG_TST10BTCFG 0x12
925#define TST10BTCFG_INTV_TIMER_MASK 3U
926#define TST10BTCFG_INTV_TIMER_SHIFT 14
927#define TST10BTCFG_TRIGER_TIMER_MASK 3U
928#define TST10BTCFG_TRIGER_TIMER_SHIFT 12
929#define TST10BTCFG_DIV_MAN_MLT3_EN 0x800
930#define TST10BTCFG_OFF_DAC_IDLE 0x400
931#define TST10BTCFG_LPBK_DEEP 0x4 /* 1:deep,0:shallow */
932#define TST10BTCFG_DEF 0x4C04
933
934#define MIIDBG_AZ_ANADECT 0x15
935#define AZ_ANADECT_10BTRX_TH 0x8000
936#define AZ_ANADECT_BOTH_01CHNL 0x4000
937#define AZ_ANADECT_INTV_MASK 0x3FU
938#define AZ_ANADECT_INTV_SHIFT 8
939#define AZ_ANADECT_THRESH_MASK 0xFU
940#define AZ_ANADECT_THRESH_SHIFT 4
941#define AZ_ANADECT_CHNL_MASK 0xFU
942#define AZ_ANADECT_CHNL_SHIFT 0
943#define AZ_ANADECT_DEF 0x3220
944#define AZ_ANADECT_LONG 0xb210
945
946#define MIIDBG_MSE16DB 0x18 /* l1d */
947#define L1D_MSE16DB_UP 0x05EA
948#define L1D_MSE16DB_DOWN 0x02EA
949
950#define MIIDBG_LEGCYPS 0x29
951#define LEGCYPS_EN 0x8000
952#define LEGCYPS_DAC_AMP1000_MASK 7U
953#define LEGCYPS_DAC_AMP1000_SHIFT 12
954#define LEGCYPS_DAC_AMP100_MASK 7U
955#define LEGCYPS_DAC_AMP100_SHIFT 9
956#define LEGCYPS_DAC_AMP10_MASK 7U
957#define LEGCYPS_DAC_AMP10_SHIFT 6
958#define LEGCYPS_UNPLUG_TIMER_MASK 7U
959#define LEGCYPS_UNPLUG_TIMER_SHIFT 3
960#define LEGCYPS_UNPLUG_DECT_EN 0x4
961#define LEGCYPS_ECNC_PS_EN 0x1
962#define L1D_LEGCYPS_DEF 0x129D
963#define L1C_LEGCYPS_DEF 0x36DD
964
965#define MIIDBG_TST100BTCFG 0x36
966#define TST100BTCFG_NORMAL_BW_EN 0x8000
967#define TST100BTCFG_BADLNK_BYPASS 0x4000
968#define TST100BTCFG_SHORTCABL_TH_MASK 0x3FU
969#define TST100BTCFG_SHORTCABL_TH_SHIFT 8
970#define TST100BTCFG_LITCH_EN 0x80
971#define TST100BTCFG_VLT_SW 0x40
972#define TST100BTCFG_LONGCABL_TH_MASK 0x3FU
973#define TST100BTCFG_LONGCABL_TH_SHIFT 0
974#define TST100BTCFG_DEF 0xE12C
975
976#define MIIDBG_VOLT_CTRL 0x3B /* only for l2cb 1 & 2 */
977#define VOLT_CTRL_CABLE1TH_MASK 0x1FFU
978#define VOLT_CTRL_CABLE1TH_SHIFT 7
979#define VOLT_CTRL_AMPCTRL_MASK 3U
980#define VOLT_CTRL_AMPCTRL_SHIFT 5
981#define VOLT_CTRL_SW_BYPASS 0x10
982#define VOLT_CTRL_SWLOWEST 0x8
983#define VOLT_CTRL_DACAMP10_MASK 7U
984#define VOLT_CTRL_DACAMP10_SHIFT 0
985
986#define MIIDBG_CABLE1TH_DET 0x3E
987#define CABLE1TH_DET_EN 0x8000
988
989
990/******* dev 3 *********/
991#define MIIEXT_PCS 3
992
993#define MIIEXT_CLDCTRL3 0x8003
994#define CLDCTRL3_BP_CABLE1TH_DET_GT 0x8000
995#define CLDCTRL3_AZ_DISAMP 0x1000
996#define L2CB_CLDCTRL3 0x4D19
997#define L1D_CLDCTRL3 0xDD19
998
999#define MIIEXT_CLDCTRL6 0x8006
1000#define CLDCTRL6_CAB_LEN_MASK 0x1FFU
1001#define CLDCTRL6_CAB_LEN_SHIFT 0
1002#define CLDCTRL6_CAB_LEN_SHORT 0x50
1003
1004/********* dev 7 **********/
1005#define MIIEXT_ANEG 7
1006
1007#define MIIEXT_LOCAL_EEEADV 0x3C
1008#define LOCAL_EEEADV_1000BT 0x4
1009#define LOCAL_EEEADV_100BT 0x2
1010
1011#define MIIEXT_REMOTE_EEEADV 0x3D
1012#define REMOTE_EEEADV_1000BT 0x4
1013#define REMOTE_EEEADV_100BT 0x2
1014
1015#define MIIEXT_EEE_ANEG 0x8000
1016#define EEE_ANEG_1000M 0x4
1017#define EEE_ANEG_100M 0x2
867 1018
868#endif /*_ATL1C_HW_H_*/ 1019#endif /*_ATL1C_HW_H_*/
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 1ef0c9275dee..9cc15701101b 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -24,14 +24,6 @@
24#define ATL1C_DRV_VERSION "1.0.1.0-NAPI" 24#define ATL1C_DRV_VERSION "1.0.1.0-NAPI"
25char atl1c_driver_name[] = "atl1c"; 25char atl1c_driver_name[] = "atl1c";
26char atl1c_driver_version[] = ATL1C_DRV_VERSION; 26char atl1c_driver_version[] = ATL1C_DRV_VERSION;
27#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062
28#define PCI_DEVICE_ID_ATTANSIC_L1C 0x1063
29#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */
30#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */
31#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */
32#define PCI_DEVICE_ID_ATHEROS_L1D_2_0 0x1083 /* AR8151 v2.0 Gigabit 1000 */
33#define L2CB_V10 0xc0
34#define L2CB_V11 0xc1
35 27
36/* 28/*
37 * atl1c_pci_tbl - PCI Device ID Table 29 * atl1c_pci_tbl - PCI Device ID Table
@@ -54,70 +46,72 @@ static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
54}; 46};
55MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl); 47MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl);
56 48
57MODULE_AUTHOR("Jie Yang <jie.yang@atheros.com>"); 49MODULE_AUTHOR("Jie Yang");
58MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver"); 50MODULE_AUTHOR("Qualcomm Atheros Inc., <nic-devel@qualcomm.com>");
51MODULE_DESCRIPTION("Qualcom Atheros 100/1000M Ethernet Network Driver");
59MODULE_LICENSE("GPL"); 52MODULE_LICENSE("GPL");
60MODULE_VERSION(ATL1C_DRV_VERSION); 53MODULE_VERSION(ATL1C_DRV_VERSION);
61 54
62static int atl1c_stop_mac(struct atl1c_hw *hw); 55static int atl1c_stop_mac(struct atl1c_hw *hw);
63static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw);
64static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw);
65static void atl1c_disable_l0s_l1(struct atl1c_hw *hw); 56static void atl1c_disable_l0s_l1(struct atl1c_hw *hw);
66static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup); 57static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed);
67static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter); 58static void atl1c_start_mac(struct atl1c_adapter *adapter);
68static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que, 59static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter,
69 int *work_done, int work_to_do); 60 int *work_done, int work_to_do);
70static int atl1c_up(struct atl1c_adapter *adapter); 61static int atl1c_up(struct atl1c_adapter *adapter);
71static void atl1c_down(struct atl1c_adapter *adapter); 62static void atl1c_down(struct atl1c_adapter *adapter);
63static int atl1c_reset_mac(struct atl1c_hw *hw);
64static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter);
65static int atl1c_configure(struct atl1c_adapter *adapter);
66static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter);
72 67
73static const u16 atl1c_pay_load_size[] = { 68static const u16 atl1c_pay_load_size[] = {
74 128, 256, 512, 1024, 2048, 4096, 69 128, 256, 512, 1024, 2048, 4096,
75}; 70};
76 71
77static const u16 atl1c_rfd_prod_idx_regs[AT_MAX_RECEIVE_QUEUE] =
78{
79 REG_MB_RFD0_PROD_IDX,
80 REG_MB_RFD1_PROD_IDX,
81 REG_MB_RFD2_PROD_IDX,
82 REG_MB_RFD3_PROD_IDX
83};
84
85static const u16 atl1c_rfd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
86{
87 REG_RFD0_HEAD_ADDR_LO,
88 REG_RFD1_HEAD_ADDR_LO,
89 REG_RFD2_HEAD_ADDR_LO,
90 REG_RFD3_HEAD_ADDR_LO
91};
92
93static const u16 atl1c_rrd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
94{
95 REG_RRD0_HEAD_ADDR_LO,
96 REG_RRD1_HEAD_ADDR_LO,
97 REG_RRD2_HEAD_ADDR_LO,
98 REG_RRD3_HEAD_ADDR_LO
99};
100 72
101static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | 73static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
102 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; 74 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
103static void atl1c_pcie_patch(struct atl1c_hw *hw) 75static void atl1c_pcie_patch(struct atl1c_hw *hw)
104{ 76{
105 u32 data; 77 u32 mst_data, data;
106 78
107 AT_READ_REG(hw, REG_PCIE_PHYMISC, &data); 79 /* pclk sel could switch to 25M */
108 data |= PCIE_PHYMISC_FORCE_RCV_DET; 80 AT_READ_REG(hw, REG_MASTER_CTRL, &mst_data);
109 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data); 81 mst_data &= ~MASTER_CTRL_CLK_SEL_DIS;
82 AT_WRITE_REG(hw, REG_MASTER_CTRL, mst_data);
110 83
84 /* WoL/PCIE related settings */
85 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
86 AT_READ_REG(hw, REG_PCIE_PHYMISC, &data);
87 data |= PCIE_PHYMISC_FORCE_RCV_DET;
88 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data);
89 } else { /* new dev set bit5 of MASTER */
90 if (!(mst_data & MASTER_CTRL_WAKEN_25M))
91 AT_WRITE_REG(hw, REG_MASTER_CTRL,
92 mst_data | MASTER_CTRL_WAKEN_25M);
93 }
94 /* aspm/PCIE setting only for l2cb 1.0 */
111 if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) { 95 if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) {
112 AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data); 96 AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data);
113 97 data = FIELD_SETX(data, PCIE_PHYMISC2_CDR_BW,
114 data &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK << 98 L2CB1_PCIE_PHYMISC2_CDR_BW);
115 PCIE_PHYMISC2_SERDES_CDR_SHIFT); 99 data = FIELD_SETX(data, PCIE_PHYMISC2_L0S_TH,
116 data |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT; 100 L2CB1_PCIE_PHYMISC2_L0S_TH);
117 data &= ~(PCIE_PHYMISC2_SERDES_TH_MASK <<
118 PCIE_PHYMISC2_SERDES_TH_SHIFT);
119 data |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
120 AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data); 101 AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data);
102 /* extend L1 sync timer */
103 AT_READ_REG(hw, REG_LINK_CTRL, &data);
104 data |= LINK_CTRL_EXT_SYNC;
105 AT_WRITE_REG(hw, REG_LINK_CTRL, data);
106 }
107 /* l2cb 1.x & l1d 1.x */
108 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d) {
109 AT_READ_REG(hw, REG_PM_CTRL, &data);
110 data |= PM_CTRL_L0S_BUFSRX_EN;
111 AT_WRITE_REG(hw, REG_PM_CTRL, data);
112 /* clear vendor msg */
113 AT_READ_REG(hw, REG_DMA_DBG, &data);
114 AT_WRITE_REG(hw, REG_DMA_DBG, data & ~DMA_DBG_VENDOR_MSG);
121 } 115 }
122} 116}
123 117
@@ -130,6 +124,7 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
130 u32 data; 124 u32 data;
131 u32 pci_cmd; 125 u32 pci_cmd;
132 struct pci_dev *pdev = hw->adapter->pdev; 126 struct pci_dev *pdev = hw->adapter->pdev;
127 int pos;
133 128
134 AT_READ_REG(hw, PCI_COMMAND, &pci_cmd); 129 AT_READ_REG(hw, PCI_COMMAND, &pci_cmd);
135 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; 130 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
@@ -142,14 +137,23 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
142 */ 137 */
143 pci_enable_wake(pdev, PCI_D3hot, 0); 138 pci_enable_wake(pdev, PCI_D3hot, 0);
144 pci_enable_wake(pdev, PCI_D3cold, 0); 139 pci_enable_wake(pdev, PCI_D3cold, 0);
140 /* wol sts read-clear */
141 AT_READ_REG(hw, REG_WOL_CTRL, &data);
142 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
145 143
146 /* 144 /*
147 * Mask some pcie error bits 145 * Mask some pcie error bits
148 */ 146 */
149 AT_READ_REG(hw, REG_PCIE_UC_SEVERITY, &data); 147 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
150 data &= ~PCIE_UC_SERVRITY_DLP; 148 pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
151 data &= ~PCIE_UC_SERVRITY_FCP; 149 data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
152 AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data); 150 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
151 /* clear error status */
152 pci_write_config_word(pdev, pci_pcie_cap(pdev) + PCI_EXP_DEVSTA,
153 PCI_EXP_DEVSTA_NFED |
154 PCI_EXP_DEVSTA_FED |
155 PCI_EXP_DEVSTA_CED |
156 PCI_EXP_DEVSTA_URD);
153 157
154 AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data); 158 AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data);
155 data &= ~LTSSM_ID_EN_WRO; 159 data &= ~LTSSM_ID_EN_WRO;
@@ -158,11 +162,6 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
158 atl1c_pcie_patch(hw); 162 atl1c_pcie_patch(hw);
159 if (flag & ATL1C_PCIE_L0S_L1_DISABLE) 163 if (flag & ATL1C_PCIE_L0S_L1_DISABLE)
160 atl1c_disable_l0s_l1(hw); 164 atl1c_disable_l0s_l1(hw);
161 if (flag & ATL1C_PCIE_PHY_RESET)
162 AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT);
163 else
164 AT_WRITE_REG(hw, REG_GPHY_CTRL,
165 GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET);
166 165
167 msleep(5); 166 msleep(5);
168} 167}
@@ -207,14 +206,14 @@ static inline void atl1c_irq_reset(struct atl1c_adapter *adapter)
207 * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads 206 * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads
208 * of the idle status register until the device is actually idle 207 * of the idle status register until the device is actually idle
209 */ 208 */
210static u32 atl1c_wait_until_idle(struct atl1c_hw *hw) 209static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
211{ 210{
212 int timeout; 211 int timeout;
213 u32 data; 212 u32 data;
214 213
215 for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) { 214 for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
216 AT_READ_REG(hw, REG_IDLE_STATUS, &data); 215 AT_READ_REG(hw, REG_IDLE_STATUS, &data);
217 if ((data & IDLE_STATUS_MASK) == 0) 216 if ((data & modu_ctrl) == 0)
218 return 0; 217 return 0;
219 msleep(1); 218 msleep(1);
220 } 219 }
@@ -261,15 +260,16 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
261 260
262 if ((phy_data & BMSR_LSTATUS) == 0) { 261 if ((phy_data & BMSR_LSTATUS) == 0) {
263 /* link down */ 262 /* link down */
264 hw->hibernate = true;
265 if (atl1c_stop_mac(hw) != 0)
266 if (netif_msg_hw(adapter))
267 dev_warn(&pdev->dev, "stop mac failed\n");
268 atl1c_set_aspm(hw, false);
269 netif_carrier_off(netdev); 263 netif_carrier_off(netdev);
270 netif_stop_queue(netdev); 264 netif_stop_queue(netdev);
271 atl1c_phy_reset(hw); 265 hw->hibernate = true;
272 atl1c_phy_init(&adapter->hw); 266 if (atl1c_reset_mac(hw) != 0)
267 if (netif_msg_hw(adapter))
268 dev_warn(&pdev->dev, "reset mac failed\n");
269 atl1c_set_aspm(hw, SPEED_0);
270 atl1c_post_phy_linkchg(hw, SPEED_0);
271 atl1c_reset_dma_ring(adapter);
272 atl1c_configure(adapter);
273 } else { 273 } else {
274 /* Link Up */ 274 /* Link Up */
275 hw->hibernate = false; 275 hw->hibernate = false;
@@ -283,10 +283,9 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
283 adapter->link_duplex != duplex) { 283 adapter->link_duplex != duplex) {
284 adapter->link_speed = speed; 284 adapter->link_speed = speed;
285 adapter->link_duplex = duplex; 285 adapter->link_duplex = duplex;
286 atl1c_set_aspm(hw, true); 286 atl1c_set_aspm(hw, speed);
287 atl1c_enable_tx_ctrl(hw); 287 atl1c_post_phy_linkchg(hw, speed);
288 atl1c_enable_rx_ctrl(hw); 288 atl1c_start_mac(adapter);
289 atl1c_setup_mac_ctrl(adapter);
290 if (netif_msg_link(adapter)) 289 if (netif_msg_link(adapter))
291 dev_info(&pdev->dev, 290 dev_info(&pdev->dev,
292 "%s: %s NIC Link is Up<%d Mbps %s>\n", 291 "%s: %s NIC Link is Up<%d Mbps %s>\n",
@@ -337,6 +336,9 @@ static void atl1c_common_task(struct work_struct *work)
337 adapter = container_of(work, struct atl1c_adapter, common_task); 336 adapter = container_of(work, struct atl1c_adapter, common_task);
338 netdev = adapter->netdev; 337 netdev = adapter->netdev;
339 338
339 if (test_bit(__AT_DOWN, &adapter->flags))
340 return;
341
340 if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) { 342 if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) {
341 netif_device_detach(netdev); 343 netif_device_detach(netdev);
342 atl1c_down(adapter); 344 atl1c_down(adapter);
@@ -345,8 +347,11 @@ static void atl1c_common_task(struct work_struct *work)
345 } 347 }
346 348
347 if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE, 349 if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE,
348 &adapter->work_event)) 350 &adapter->work_event)) {
351 atl1c_irq_disable(adapter);
349 atl1c_check_link_status(adapter); 352 atl1c_check_link_status(adapter);
353 atl1c_irq_enable(adapter);
354 }
350} 355}
351 356
352 357
@@ -470,7 +475,7 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
470 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); 475 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
471 netdev->addr_assign_type &= ~NET_ADDR_RANDOM; 476 netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
472 477
473 atl1c_hw_set_mac_addr(&adapter->hw); 478 atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr);
474 479
475 return 0; 480 return 0;
476} 481}
@@ -523,11 +528,16 @@ static int atl1c_set_features(struct net_device *netdev,
523static int atl1c_change_mtu(struct net_device *netdev, int new_mtu) 528static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
524{ 529{
525 struct atl1c_adapter *adapter = netdev_priv(netdev); 530 struct atl1c_adapter *adapter = netdev_priv(netdev);
531 struct atl1c_hw *hw = &adapter->hw;
526 int old_mtu = netdev->mtu; 532 int old_mtu = netdev->mtu;
527 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 533 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
528 534
529 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 535 /* Fast Ethernet controller doesn't support jumbo packet */
530 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 536 if (((hw->nic_type == athr_l2c ||
537 hw->nic_type == athr_l2c_b ||
538 hw->nic_type == athr_l2c_b2) && new_mtu > ETH_DATA_LEN) ||
539 max_frame < ETH_ZLEN + ETH_FCS_LEN ||
540 max_frame > MAX_JUMBO_FRAME_SIZE) {
531 if (netif_msg_link(adapter)) 541 if (netif_msg_link(adapter))
532 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); 542 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
533 return -EINVAL; 543 return -EINVAL;
@@ -543,14 +553,6 @@ static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
543 netdev_update_features(netdev); 553 netdev_update_features(netdev);
544 atl1c_up(adapter); 554 atl1c_up(adapter);
545 clear_bit(__AT_RESETTING, &adapter->flags); 555 clear_bit(__AT_RESETTING, &adapter->flags);
546 if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) {
547 u32 phy_data;
548
549 AT_READ_REG(&adapter->hw, 0x1414, &phy_data);
550 phy_data |= 0x10000000;
551 AT_WRITE_REG(&adapter->hw, 0x1414, phy_data);
552 }
553
554 } 556 }
555 return 0; 557 return 0;
556} 558}
@@ -563,7 +565,7 @@ static int atl1c_mdio_read(struct net_device *netdev, int phy_id, int reg_num)
563 struct atl1c_adapter *adapter = netdev_priv(netdev); 565 struct atl1c_adapter *adapter = netdev_priv(netdev);
564 u16 result; 566 u16 result;
565 567
566 atl1c_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result); 568 atl1c_read_phy_reg(&adapter->hw, reg_num, &result);
567 return result; 569 return result;
568} 570}
569 571
@@ -572,7 +574,7 @@ static void atl1c_mdio_write(struct net_device *netdev, int phy_id,
572{ 574{
573 struct atl1c_adapter *adapter = netdev_priv(netdev); 575 struct atl1c_adapter *adapter = netdev_priv(netdev);
574 576
575 atl1c_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val); 577 atl1c_write_phy_reg(&adapter->hw, reg_num, val);
576} 578}
577 579
578/* 580/*
@@ -687,21 +689,15 @@ static void atl1c_set_mac_type(struct atl1c_hw *hw)
687 689
688static int atl1c_setup_mac_funcs(struct atl1c_hw *hw) 690static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
689{ 691{
690 u32 phy_status_data;
691 u32 link_ctrl_data; 692 u32 link_ctrl_data;
692 693
693 atl1c_set_mac_type(hw); 694 atl1c_set_mac_type(hw);
694 AT_READ_REG(hw, REG_PHY_STATUS, &phy_status_data);
695 AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); 695 AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
696 696
697 hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE | 697 hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE |
698 ATL1C_TXQ_MODE_ENHANCE; 698 ATL1C_TXQ_MODE_ENHANCE;
699 if (link_ctrl_data & LINK_CTRL_L0S_EN) 699 hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT |
700 hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT; 700 ATL1C_ASPM_L1_SUPPORT;
701 if (link_ctrl_data & LINK_CTRL_L1_EN)
702 hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT;
703 if (link_ctrl_data & LINK_CTRL_EXT_SYNC)
704 hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC;
705 hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON; 701 hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
706 702
707 if (hw->nic_type == athr_l1c || 703 if (hw->nic_type == athr_l1c ||
@@ -710,6 +706,55 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
710 hw->link_cap_flags |= ATL1C_LINK_CAP_1000M; 706 hw->link_cap_flags |= ATL1C_LINK_CAP_1000M;
711 return 0; 707 return 0;
712} 708}
709
710struct atl1c_platform_patch {
711 u16 pci_did;
712 u8 pci_revid;
713 u16 subsystem_vid;
714 u16 subsystem_did;
715 u32 patch_flag;
716#define ATL1C_LINK_PATCH 0x1
717};
718static const struct atl1c_platform_patch plats[] __devinitdata = {
719{0x2060, 0xC1, 0x1019, 0x8152, 0x1},
720{0x2060, 0xC1, 0x1019, 0x2060, 0x1},
721{0x2060, 0xC1, 0x1019, 0xE000, 0x1},
722{0x2062, 0xC0, 0x1019, 0x8152, 0x1},
723{0x2062, 0xC0, 0x1019, 0x2062, 0x1},
724{0x2062, 0xC0, 0x1458, 0xE000, 0x1},
725{0x2062, 0xC1, 0x1019, 0x8152, 0x1},
726{0x2062, 0xC1, 0x1019, 0x2062, 0x1},
727{0x2062, 0xC1, 0x1458, 0xE000, 0x1},
728{0x2062, 0xC1, 0x1565, 0x2802, 0x1},
729{0x2062, 0xC1, 0x1565, 0x2801, 0x1},
730{0x1073, 0xC0, 0x1019, 0x8151, 0x1},
731{0x1073, 0xC0, 0x1019, 0x1073, 0x1},
732{0x1073, 0xC0, 0x1458, 0xE000, 0x1},
733{0x1083, 0xC0, 0x1458, 0xE000, 0x1},
734{0x1083, 0xC0, 0x1019, 0x8151, 0x1},
735{0x1083, 0xC0, 0x1019, 0x1083, 0x1},
736{0x1083, 0xC0, 0x1462, 0x7680, 0x1},
737{0x1083, 0xC0, 0x1565, 0x2803, 0x1},
738{0},
739};
740
741static void __devinit atl1c_patch_assign(struct atl1c_hw *hw)
742{
743 int i = 0;
744
745 hw->msi_lnkpatch = false;
746
747 while (plats[i].pci_did != 0) {
748 if (plats[i].pci_did == hw->device_id &&
749 plats[i].pci_revid == hw->revision_id &&
750 plats[i].subsystem_vid == hw->subsystem_vendor_id &&
751 plats[i].subsystem_did == hw->subsystem_id) {
752 if (plats[i].patch_flag & ATL1C_LINK_PATCH)
753 hw->msi_lnkpatch = true;
754 }
755 i++;
756 }
757}
713/* 758/*
714 * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter) 759 * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter)
715 * @adapter: board private structure to initialize 760 * @adapter: board private structure to initialize
@@ -729,9 +774,8 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
729 device_set_wakeup_enable(&pdev->dev, false); 774 device_set_wakeup_enable(&pdev->dev, false);
730 adapter->link_speed = SPEED_0; 775 adapter->link_speed = SPEED_0;
731 adapter->link_duplex = FULL_DUPLEX; 776 adapter->link_duplex = FULL_DUPLEX;
732 adapter->num_rx_queues = AT_DEF_RECEIVE_QUEUE;
733 adapter->tpd_ring[0].count = 1024; 777 adapter->tpd_ring[0].count = 1024;
734 adapter->rfd_ring[0].count = 512; 778 adapter->rfd_ring.count = 512;
735 779
736 hw->vendor_id = pdev->vendor; 780 hw->vendor_id = pdev->vendor;
737 hw->device_id = pdev->device; 781 hw->device_id = pdev->device;
@@ -746,26 +790,18 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
746 dev_err(&pdev->dev, "set mac function pointers failed\n"); 790 dev_err(&pdev->dev, "set mac function pointers failed\n");
747 return -1; 791 return -1;
748 } 792 }
793 atl1c_patch_assign(hw);
794
749 hw->intr_mask = IMR_NORMAL_MASK; 795 hw->intr_mask = IMR_NORMAL_MASK;
750 hw->phy_configured = false; 796 hw->phy_configured = false;
751 hw->preamble_len = 7; 797 hw->preamble_len = 7;
752 hw->max_frame_size = adapter->netdev->mtu; 798 hw->max_frame_size = adapter->netdev->mtu;
753 if (adapter->num_rx_queues < 2) {
754 hw->rss_type = atl1c_rss_disable;
755 hw->rss_mode = atl1c_rss_mode_disable;
756 } else {
757 hw->rss_type = atl1c_rss_ipv4;
758 hw->rss_mode = atl1c_rss_mul_que_mul_int;
759 hw->rss_hash_bits = 16;
760 }
761 hw->autoneg_advertised = ADVERTISED_Autoneg; 799 hw->autoneg_advertised = ADVERTISED_Autoneg;
762 hw->indirect_tab = 0xE4E4E4E4; 800 hw->indirect_tab = 0xE4E4E4E4;
763 hw->base_cpu = 0; 801 hw->base_cpu = 0;
764 802
765 hw->ict = 50000; /* 100ms */ 803 hw->ict = 50000; /* 100ms */
766 hw->smb_timer = 200000; /* 400ms */ 804 hw->smb_timer = 200000; /* 400ms */
767 hw->cmb_tpd = 4;
768 hw->cmb_tx_timer = 1; /* 2 us */
769 hw->rx_imt = 200; 805 hw->rx_imt = 200;
770 hw->tx_imt = 1000; 806 hw->tx_imt = 1000;
771 807
@@ -773,9 +809,6 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
773 hw->rfd_burst = 8; 809 hw->rfd_burst = 8;
774 hw->dma_order = atl1c_dma_ord_out; 810 hw->dma_order = atl1c_dma_ord_out;
775 hw->dmar_block = atl1c_dma_req_1024; 811 hw->dmar_block = atl1c_dma_req_1024;
776 hw->dmaw_block = atl1c_dma_req_1024;
777 hw->dmar_dly_cnt = 15;
778 hw->dmaw_dly_cnt = 4;
779 812
780 if (atl1c_alloc_queues(adapter)) { 813 if (atl1c_alloc_queues(adapter)) {
781 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 814 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
@@ -851,24 +884,22 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
851 */ 884 */
852static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter) 885static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
853{ 886{
854 struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; 887 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
855 struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; 888 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
856 struct atl1c_buffer *buffer_info; 889 struct atl1c_buffer *buffer_info;
857 struct pci_dev *pdev = adapter->pdev; 890 struct pci_dev *pdev = adapter->pdev;
858 int i, j; 891 int j;
859 892
860 for (i = 0; i < adapter->num_rx_queues; i++) { 893 for (j = 0; j < rfd_ring->count; j++) {
861 for (j = 0; j < rfd_ring[i].count; j++) { 894 buffer_info = &rfd_ring->buffer_info[j];
862 buffer_info = &rfd_ring[i].buffer_info[j]; 895 atl1c_clean_buffer(pdev, buffer_info, 0);
863 atl1c_clean_buffer(pdev, buffer_info, 0);
864 }
865 /* zero out the descriptor ring */
866 memset(rfd_ring[i].desc, 0, rfd_ring[i].size);
867 rfd_ring[i].next_to_clean = 0;
868 rfd_ring[i].next_to_use = 0;
869 rrd_ring[i].next_to_use = 0;
870 rrd_ring[i].next_to_clean = 0;
871 } 896 }
897 /* zero out the descriptor ring */
898 memset(rfd_ring->desc, 0, rfd_ring->size);
899 rfd_ring->next_to_clean = 0;
900 rfd_ring->next_to_use = 0;
901 rrd_ring->next_to_use = 0;
902 rrd_ring->next_to_clean = 0;
872} 903}
873 904
874/* 905/*
@@ -877,8 +908,8 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
877static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter) 908static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
878{ 909{
879 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; 910 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
880 struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; 911 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
881 struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; 912 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
882 struct atl1c_buffer *buffer_info; 913 struct atl1c_buffer *buffer_info;
883 int i, j; 914 int i, j;
884 915
@@ -890,15 +921,13 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
890 ATL1C_SET_BUFFER_STATE(&buffer_info[i], 921 ATL1C_SET_BUFFER_STATE(&buffer_info[i],
891 ATL1C_BUFFER_FREE); 922 ATL1C_BUFFER_FREE);
892 } 923 }
893 for (i = 0; i < adapter->num_rx_queues; i++) { 924 rfd_ring->next_to_use = 0;
894 rfd_ring[i].next_to_use = 0; 925 rfd_ring->next_to_clean = 0;
895 rfd_ring[i].next_to_clean = 0; 926 rrd_ring->next_to_use = 0;
896 rrd_ring[i].next_to_use = 0; 927 rrd_ring->next_to_clean = 0;
897 rrd_ring[i].next_to_clean = 0; 928 for (j = 0; j < rfd_ring->count; j++) {
898 for (j = 0; j < rfd_ring[i].count; j++) { 929 buffer_info = &rfd_ring->buffer_info[j];
899 buffer_info = &rfd_ring[i].buffer_info[j]; 930 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
900 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
901 }
902 } 931 }
903} 932}
904 933
@@ -935,27 +964,23 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
935{ 964{
936 struct pci_dev *pdev = adapter->pdev; 965 struct pci_dev *pdev = adapter->pdev;
937 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; 966 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
938 struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; 967 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
939 struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; 968 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
940 struct atl1c_ring_header *ring_header = &adapter->ring_header; 969 struct atl1c_ring_header *ring_header = &adapter->ring_header;
941 int num_rx_queues = adapter->num_rx_queues;
942 int size; 970 int size;
943 int i; 971 int i;
944 int count = 0; 972 int count = 0;
945 int rx_desc_count = 0; 973 int rx_desc_count = 0;
946 u32 offset = 0; 974 u32 offset = 0;
947 975
948 rrd_ring[0].count = rfd_ring[0].count; 976 rrd_ring->count = rfd_ring->count;
949 for (i = 1; i < AT_MAX_TRANSMIT_QUEUE; i++) 977 for (i = 1; i < AT_MAX_TRANSMIT_QUEUE; i++)
950 tpd_ring[i].count = tpd_ring[0].count; 978 tpd_ring[i].count = tpd_ring[0].count;
951 979
952 for (i = 1; i < adapter->num_rx_queues; i++)
953 rfd_ring[i].count = rrd_ring[i].count = rfd_ring[0].count;
954
955 /* 2 tpd queue, one high priority queue, 980 /* 2 tpd queue, one high priority queue,
956 * another normal priority queue */ 981 * another normal priority queue */
957 size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 + 982 size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 +
958 rfd_ring->count * num_rx_queues); 983 rfd_ring->count);
959 tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); 984 tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
960 if (unlikely(!tpd_ring->buffer_info)) { 985 if (unlikely(!tpd_ring->buffer_info)) {
961 dev_err(&pdev->dev, "kzalloc failed, size = %d\n", 986 dev_err(&pdev->dev, "kzalloc failed, size = %d\n",
@@ -968,12 +993,11 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
968 count += tpd_ring[i].count; 993 count += tpd_ring[i].count;
969 } 994 }
970 995
971 for (i = 0; i < num_rx_queues; i++) { 996 rfd_ring->buffer_info =
972 rfd_ring[i].buffer_info = 997 (struct atl1c_buffer *) (tpd_ring->buffer_info + count);
973 (struct atl1c_buffer *) (tpd_ring->buffer_info + count); 998 count += rfd_ring->count;
974 count += rfd_ring[i].count; 999 rx_desc_count += rfd_ring->count;
975 rx_desc_count += rfd_ring[i].count; 1000
976 }
977 /* 1001 /*
978 * real ring DMA buffer 1002 * real ring DMA buffer
979 * each ring/block may need up to 8 bytes for alignment, hence the 1003 * each ring/block may need up to 8 bytes for alignment, hence the
@@ -983,8 +1007,7 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
983 sizeof(struct atl1c_tpd_desc) * tpd_ring->count * 2 + 1007 sizeof(struct atl1c_tpd_desc) * tpd_ring->count * 2 +
984 sizeof(struct atl1c_rx_free_desc) * rx_desc_count + 1008 sizeof(struct atl1c_rx_free_desc) * rx_desc_count +
985 sizeof(struct atl1c_recv_ret_status) * rx_desc_count + 1009 sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
986 sizeof(struct atl1c_hw_stats) + 1010 8 * 4;
987 8 * 4 + 8 * 2 * num_rx_queues;
988 1011
989 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, 1012 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
990 &ring_header->dma); 1013 &ring_header->dma);
@@ -1005,25 +1028,18 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
1005 offset += roundup(tpd_ring[i].size, 8); 1028 offset += roundup(tpd_ring[i].size, 8);
1006 } 1029 }
1007 /* init RFD ring */ 1030 /* init RFD ring */
1008 for (i = 0; i < num_rx_queues; i++) { 1031 rfd_ring->dma = ring_header->dma + offset;
1009 rfd_ring[i].dma = ring_header->dma + offset; 1032 rfd_ring->desc = (u8 *) ring_header->desc + offset;
1010 rfd_ring[i].desc = (u8 *) ring_header->desc + offset; 1033 rfd_ring->size = sizeof(struct atl1c_rx_free_desc) * rfd_ring->count;
1011 rfd_ring[i].size = sizeof(struct atl1c_rx_free_desc) * 1034 offset += roundup(rfd_ring->size, 8);
1012 rfd_ring[i].count;
1013 offset += roundup(rfd_ring[i].size, 8);
1014 }
1015 1035
1016 /* init RRD ring */ 1036 /* init RRD ring */
1017 for (i = 0; i < num_rx_queues; i++) { 1037 rrd_ring->dma = ring_header->dma + offset;
1018 rrd_ring[i].dma = ring_header->dma + offset; 1038 rrd_ring->desc = (u8 *) ring_header->desc + offset;
1019 rrd_ring[i].desc = (u8 *) ring_header->desc + offset; 1039 rrd_ring->size = sizeof(struct atl1c_recv_ret_status) *
1020 rrd_ring[i].size = sizeof(struct atl1c_recv_ret_status) * 1040 rrd_ring->count;
1021 rrd_ring[i].count; 1041 offset += roundup(rrd_ring->size, 8);
1022 offset += roundup(rrd_ring[i].size, 8);
1023 }
1024 1042
1025 adapter->smb.dma = ring_header->dma + offset;
1026 adapter->smb.smb = (u8 *)ring_header->desc + offset;
1027 return 0; 1043 return 0;
1028 1044
1029err_nomem: 1045err_nomem:
@@ -1034,26 +1050,20 @@ err_nomem:
1034static void atl1c_configure_des_ring(struct atl1c_adapter *adapter) 1050static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
1035{ 1051{
1036 struct atl1c_hw *hw = &adapter->hw; 1052 struct atl1c_hw *hw = &adapter->hw;
1037 struct atl1c_rfd_ring *rfd_ring = (struct atl1c_rfd_ring *) 1053 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
1038 adapter->rfd_ring; 1054 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
1039 struct atl1c_rrd_ring *rrd_ring = (struct atl1c_rrd_ring *)
1040 adapter->rrd_ring;
1041 struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *) 1055 struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
1042 adapter->tpd_ring; 1056 adapter->tpd_ring;
1043 struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb;
1044 struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb;
1045 int i;
1046 u32 data;
1047 1057
1048 /* TPD */ 1058 /* TPD */
1049 AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI, 1059 AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI,
1050 (u32)((tpd_ring[atl1c_trans_normal].dma & 1060 (u32)((tpd_ring[atl1c_trans_normal].dma &
1051 AT_DMA_HI_ADDR_MASK) >> 32)); 1061 AT_DMA_HI_ADDR_MASK) >> 32));
1052 /* just enable normal priority TX queue */ 1062 /* just enable normal priority TX queue */
1053 AT_WRITE_REG(hw, REG_NTPD_HEAD_ADDR_LO, 1063 AT_WRITE_REG(hw, REG_TPD_PRI0_ADDR_LO,
1054 (u32)(tpd_ring[atl1c_trans_normal].dma & 1064 (u32)(tpd_ring[atl1c_trans_normal].dma &
1055 AT_DMA_LO_ADDR_MASK)); 1065 AT_DMA_LO_ADDR_MASK));
1056 AT_WRITE_REG(hw, REG_HTPD_HEAD_ADDR_LO, 1066 AT_WRITE_REG(hw, REG_TPD_PRI1_ADDR_LO,
1057 (u32)(tpd_ring[atl1c_trans_high].dma & 1067 (u32)(tpd_ring[atl1c_trans_high].dma &
1058 AT_DMA_LO_ADDR_MASK)); 1068 AT_DMA_LO_ADDR_MASK));
1059 AT_WRITE_REG(hw, REG_TPD_RING_SIZE, 1069 AT_WRITE_REG(hw, REG_TPD_RING_SIZE,
@@ -1062,31 +1072,21 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
1062 1072
1063 /* RFD */ 1073 /* RFD */
1064 AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI, 1074 AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI,
1065 (u32)((rfd_ring[0].dma & AT_DMA_HI_ADDR_MASK) >> 32)); 1075 (u32)((rfd_ring->dma & AT_DMA_HI_ADDR_MASK) >> 32));
1066 for (i = 0; i < adapter->num_rx_queues; i++) 1076 AT_WRITE_REG(hw, REG_RFD0_HEAD_ADDR_LO,
1067 AT_WRITE_REG(hw, atl1c_rfd_addr_lo_regs[i], 1077 (u32)(rfd_ring->dma & AT_DMA_LO_ADDR_MASK));
1068 (u32)(rfd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
1069 1078
1070 AT_WRITE_REG(hw, REG_RFD_RING_SIZE, 1079 AT_WRITE_REG(hw, REG_RFD_RING_SIZE,
1071 rfd_ring[0].count & RFD_RING_SIZE_MASK); 1080 rfd_ring->count & RFD_RING_SIZE_MASK);
1072 AT_WRITE_REG(hw, REG_RX_BUF_SIZE, 1081 AT_WRITE_REG(hw, REG_RX_BUF_SIZE,
1073 adapter->rx_buffer_len & RX_BUF_SIZE_MASK); 1082 adapter->rx_buffer_len & RX_BUF_SIZE_MASK);
1074 1083
1075 /* RRD */ 1084 /* RRD */
1076 for (i = 0; i < adapter->num_rx_queues; i++) 1085 AT_WRITE_REG(hw, REG_RRD0_HEAD_ADDR_LO,
1077 AT_WRITE_REG(hw, atl1c_rrd_addr_lo_regs[i], 1086 (u32)(rrd_ring->dma & AT_DMA_LO_ADDR_MASK));
1078 (u32)(rrd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
1079 AT_WRITE_REG(hw, REG_RRD_RING_SIZE, 1087 AT_WRITE_REG(hw, REG_RRD_RING_SIZE,
1080 (rrd_ring[0].count & RRD_RING_SIZE_MASK)); 1088 (rrd_ring->count & RRD_RING_SIZE_MASK));
1081 1089
1082 /* CMB */
1083 AT_WRITE_REG(hw, REG_CMB_BASE_ADDR_LO, cmb->dma & AT_DMA_LO_ADDR_MASK);
1084
1085 /* SMB */
1086 AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_HI,
1087 (u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32));
1088 AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO,
1089 (u32)(smb->dma & AT_DMA_LO_ADDR_MASK));
1090 if (hw->nic_type == athr_l2c_b) { 1090 if (hw->nic_type == athr_l2c_b) {
1091 AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L); 1091 AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L);
1092 AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L); 1092 AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L);
@@ -1097,13 +1097,6 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
1097 AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0); /* TX watermark, to enter l1 state.*/ 1097 AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0); /* TX watermark, to enter l1 state.*/
1098 AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0); /* RXD threshold.*/ 1098 AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0); /* RXD threshold.*/
1099 } 1099 }
1100 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d_2) {
1101 /* Power Saving for L2c_B */
1102 AT_READ_REG(hw, REG_SERDES_LOCK, &data);
1103 data |= SERDES_MAC_CLK_SLOWDOWN;
1104 data |= SERDES_PYH_CLK_SLOWDOWN;
1105 AT_WRITE_REG(hw, REG_SERDES_LOCK, data);
1106 }
1107 /* Load all of base address above */ 1100 /* Load all of base address above */
1108 AT_WRITE_REG(hw, REG_LOAD_PTR, 1); 1101 AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
1109} 1102}
@@ -1111,32 +1104,26 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
1111static void atl1c_configure_tx(struct atl1c_adapter *adapter) 1104static void atl1c_configure_tx(struct atl1c_adapter *adapter)
1112{ 1105{
1113 struct atl1c_hw *hw = &adapter->hw; 1106 struct atl1c_hw *hw = &adapter->hw;
1114 u32 dev_ctrl_data; 1107 int max_pay_load;
1115 u32 max_pay_load;
1116 u16 tx_offload_thresh; 1108 u16 tx_offload_thresh;
1117 u32 txq_ctrl_data; 1109 u32 txq_ctrl_data;
1118 u32 max_pay_load_data;
1119 1110
1120 tx_offload_thresh = MAX_TX_OFFLOAD_THRESH; 1111 tx_offload_thresh = MAX_TSO_FRAME_SIZE;
1121 AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH, 1112 AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH,
1122 (tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK); 1113 (tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK);
1123 AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data); 1114 max_pay_load = pcie_get_readrq(adapter->pdev) >> 8;
1124 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) &
1125 DEVICE_CTRL_MAX_PAYLOAD_MASK;
1126 hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
1127 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) &
1128 DEVICE_CTRL_MAX_RREQ_SZ_MASK;
1129 hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block); 1115 hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
1130 1116 /*
1131 txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) << 1117 * if BIOS had changed the dam-read-max-length to an invalid value,
1132 TXQ_NUM_TPD_BURST_SHIFT; 1118 * restore it to default value
1133 if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE) 1119 */
1134 txq_ctrl_data |= TXQ_CTRL_ENH_MODE; 1120 if (hw->dmar_block < DEVICE_CTRL_MAXRRS_MIN) {
1135 max_pay_load_data = (atl1c_pay_load_size[hw->dmar_block] & 1121 pcie_set_readrq(adapter->pdev, 128 << DEVICE_CTRL_MAXRRS_MIN);
1136 TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT; 1122 hw->dmar_block = DEVICE_CTRL_MAXRRS_MIN;
1137 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) 1123 }
1138 max_pay_load_data >>= 1; 1124 txq_ctrl_data =
1139 txq_ctrl_data |= max_pay_load_data; 1125 hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 ?
1126 L2CB_TXQ_CFGV : L1C_TXQ_CFGV;
1140 1127
1141 AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data); 1128 AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data);
1142} 1129}
@@ -1151,34 +1138,13 @@ static void atl1c_configure_rx(struct atl1c_adapter *adapter)
1151 1138
1152 if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM) 1139 if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM)
1153 rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN; 1140 rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN;
1154 if (hw->rss_type == atl1c_rss_ipv4)
1155 rxq_ctrl_data |= RSS_HASH_IPV4;
1156 if (hw->rss_type == atl1c_rss_ipv4_tcp)
1157 rxq_ctrl_data |= RSS_HASH_IPV4_TCP;
1158 if (hw->rss_type == atl1c_rss_ipv6)
1159 rxq_ctrl_data |= RSS_HASH_IPV6;
1160 if (hw->rss_type == atl1c_rss_ipv6_tcp)
1161 rxq_ctrl_data |= RSS_HASH_IPV6_TCP;
1162 if (hw->rss_type != atl1c_rss_disable)
1163 rxq_ctrl_data |= RRS_HASH_CTRL_EN;
1164
1165 rxq_ctrl_data |= (hw->rss_mode & RSS_MODE_MASK) <<
1166 RSS_MODE_SHIFT;
1167 rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) <<
1168 RSS_HASH_BITS_SHIFT;
1169 if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON)
1170 rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_1M &
1171 ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT;
1172 1141
1173 AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); 1142 /* aspm for gigabit */
1174} 1143 if (hw->nic_type != athr_l1d_2 && (hw->device_id & 1) != 0)
1175 1144 rxq_ctrl_data = FIELD_SETX(rxq_ctrl_data, ASPM_THRUPUT_LIMIT,
1176static void atl1c_configure_rss(struct atl1c_adapter *adapter) 1145 ASPM_THRUPUT_LIMIT_100M);
1177{
1178 struct atl1c_hw *hw = &adapter->hw;
1179 1146
1180 AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab); 1147 AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
1181 AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu);
1182} 1148}
1183 1149
1184static void atl1c_configure_dma(struct atl1c_adapter *adapter) 1150static void atl1c_configure_dma(struct atl1c_adapter *adapter)
@@ -1186,36 +1152,11 @@ static void atl1c_configure_dma(struct atl1c_adapter *adapter)
1186 struct atl1c_hw *hw = &adapter->hw; 1152 struct atl1c_hw *hw = &adapter->hw;
1187 u32 dma_ctrl_data; 1153 u32 dma_ctrl_data;
1188 1154
1189 dma_ctrl_data = DMA_CTRL_DMAR_REQ_PRI; 1155 dma_ctrl_data = FIELDX(DMA_CTRL_RORDER_MODE, DMA_CTRL_RORDER_MODE_OUT) |
1190 if (hw->ctrl_flags & ATL1C_CMB_ENABLE) 1156 DMA_CTRL_RREQ_PRI_DATA |
1191 dma_ctrl_data |= DMA_CTRL_CMB_EN; 1157 FIELDX(DMA_CTRL_RREQ_BLEN, hw->dmar_block) |
1192 if (hw->ctrl_flags & ATL1C_SMB_ENABLE) 1158 FIELDX(DMA_CTRL_WDLY_CNT, DMA_CTRL_WDLY_CNT_DEF) |
1193 dma_ctrl_data |= DMA_CTRL_SMB_EN; 1159 FIELDX(DMA_CTRL_RDLY_CNT, DMA_CTRL_RDLY_CNT_DEF);
1194 else
1195 dma_ctrl_data |= MAC_CTRL_SMB_DIS;
1196
1197 switch (hw->dma_order) {
1198 case atl1c_dma_ord_in:
1199 dma_ctrl_data |= DMA_CTRL_DMAR_IN_ORDER;
1200 break;
1201 case atl1c_dma_ord_enh:
1202 dma_ctrl_data |= DMA_CTRL_DMAR_ENH_ORDER;
1203 break;
1204 case atl1c_dma_ord_out:
1205 dma_ctrl_data |= DMA_CTRL_DMAR_OUT_ORDER;
1206 break;
1207 default:
1208 break;
1209 }
1210
1211 dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
1212 << DMA_CTRL_DMAR_BURST_LEN_SHIFT;
1213 dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
1214 << DMA_CTRL_DMAW_BURST_LEN_SHIFT;
1215 dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK)
1216 << DMA_CTRL_DMAR_DLY_CNT_SHIFT;
1217 dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK)
1218 << DMA_CTRL_DMAW_DLY_CNT_SHIFT;
1219 1160
1220 AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data); 1161 AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
1221} 1162}
@@ -1230,52 +1171,53 @@ static int atl1c_stop_mac(struct atl1c_hw *hw)
1230 u32 data; 1171 u32 data;
1231 1172
1232 AT_READ_REG(hw, REG_RXQ_CTRL, &data); 1173 AT_READ_REG(hw, REG_RXQ_CTRL, &data);
1233 data &= ~(RXQ1_CTRL_EN | RXQ2_CTRL_EN | 1174 data &= ~RXQ_CTRL_EN;
1234 RXQ3_CTRL_EN | RXQ_CTRL_EN);
1235 AT_WRITE_REG(hw, REG_RXQ_CTRL, data); 1175 AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
1236 1176
1237 AT_READ_REG(hw, REG_TXQ_CTRL, &data); 1177 AT_READ_REG(hw, REG_TXQ_CTRL, &data);
1238 data &= ~TXQ_CTRL_EN; 1178 data &= ~TXQ_CTRL_EN;
1239 AT_WRITE_REG(hw, REG_TWSI_CTRL, data); 1179 AT_WRITE_REG(hw, REG_TXQ_CTRL, data);
1240 1180
1241 atl1c_wait_until_idle(hw); 1181 atl1c_wait_until_idle(hw, IDLE_STATUS_RXQ_BUSY | IDLE_STATUS_TXQ_BUSY);
1242 1182
1243 AT_READ_REG(hw, REG_MAC_CTRL, &data); 1183 AT_READ_REG(hw, REG_MAC_CTRL, &data);
1244 data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN); 1184 data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN);
1245 AT_WRITE_REG(hw, REG_MAC_CTRL, data); 1185 AT_WRITE_REG(hw, REG_MAC_CTRL, data);
1246 1186
1247 return (int)atl1c_wait_until_idle(hw); 1187 return (int)atl1c_wait_until_idle(hw,
1248} 1188 IDLE_STATUS_TXMAC_BUSY | IDLE_STATUS_RXMAC_BUSY);
1249
1250static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw)
1251{
1252 u32 data;
1253
1254 AT_READ_REG(hw, REG_RXQ_CTRL, &data);
1255 switch (hw->adapter->num_rx_queues) {
1256 case 4:
1257 data |= (RXQ3_CTRL_EN | RXQ2_CTRL_EN | RXQ1_CTRL_EN);
1258 break;
1259 case 3:
1260 data |= (RXQ2_CTRL_EN | RXQ1_CTRL_EN);
1261 break;
1262 case 2:
1263 data |= RXQ1_CTRL_EN;
1264 break;
1265 default:
1266 break;
1267 }
1268 data |= RXQ_CTRL_EN;
1269 AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
1270} 1189}
1271 1190
1272static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw) 1191static void atl1c_start_mac(struct atl1c_adapter *adapter)
1273{ 1192{
1274 u32 data; 1193 struct atl1c_hw *hw = &adapter->hw;
1194 u32 mac, txq, rxq;
1195
1196 hw->mac_duplex = adapter->link_duplex == FULL_DUPLEX ? true : false;
1197 hw->mac_speed = adapter->link_speed == SPEED_1000 ?
1198 atl1c_mac_speed_1000 : atl1c_mac_speed_10_100;
1199
1200 AT_READ_REG(hw, REG_TXQ_CTRL, &txq);
1201 AT_READ_REG(hw, REG_RXQ_CTRL, &rxq);
1202 AT_READ_REG(hw, REG_MAC_CTRL, &mac);
1203
1204 txq |= TXQ_CTRL_EN;
1205 rxq |= RXQ_CTRL_EN;
1206 mac |= MAC_CTRL_TX_EN | MAC_CTRL_TX_FLOW |
1207 MAC_CTRL_RX_EN | MAC_CTRL_RX_FLOW |
1208 MAC_CTRL_ADD_CRC | MAC_CTRL_PAD |
1209 MAC_CTRL_BC_EN | MAC_CTRL_SINGLE_PAUSE_EN |
1210 MAC_CTRL_HASH_ALG_CRC32;
1211 if (hw->mac_duplex)
1212 mac |= MAC_CTRL_DUPLX;
1213 else
1214 mac &= ~MAC_CTRL_DUPLX;
1215 mac = FIELD_SETX(mac, MAC_CTRL_SPEED, hw->mac_speed);
1216 mac = FIELD_SETX(mac, MAC_CTRL_PRMLEN, hw->preamble_len);
1275 1217
1276 AT_READ_REG(hw, REG_TXQ_CTRL, &data); 1218 AT_WRITE_REG(hw, REG_TXQ_CTRL, txq);
1277 data |= TXQ_CTRL_EN; 1219 AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq);
1278 AT_WRITE_REG(hw, REG_TXQ_CTRL, data); 1220 AT_WRITE_REG(hw, REG_MAC_CTRL, mac);
1279} 1221}
1280 1222
1281/* 1223/*
@@ -1287,10 +1229,7 @@ static int atl1c_reset_mac(struct atl1c_hw *hw)
1287{ 1229{
1288 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; 1230 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
1289 struct pci_dev *pdev = adapter->pdev; 1231 struct pci_dev *pdev = adapter->pdev;
1290 u32 master_ctrl_data = 0; 1232 u32 ctrl_data = 0;
1291
1292 AT_WRITE_REG(hw, REG_IMR, 0);
1293 AT_WRITE_REG(hw, REG_ISR, ISR_DIS_INT);
1294 1233
1295 atl1c_stop_mac(hw); 1234 atl1c_stop_mac(hw);
1296 /* 1235 /*
@@ -1299,194 +1238,148 @@ static int atl1c_reset_mac(struct atl1c_hw *hw)
1299 * the current PCI configuration. The global reset bit is self- 1238 * the current PCI configuration. The global reset bit is self-
1300 * clearing, and should clear within a microsecond. 1239 * clearing, and should clear within a microsecond.
1301 */ 1240 */
1302 AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); 1241 AT_READ_REG(hw, REG_MASTER_CTRL, &ctrl_data);
1303 master_ctrl_data |= MASTER_CTRL_OOB_DIS_OFF; 1242 ctrl_data |= MASTER_CTRL_OOB_DIS;
1304 AT_WRITE_REGW(hw, REG_MASTER_CTRL, ((master_ctrl_data | MASTER_CTRL_SOFT_RST) 1243 AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data | MASTER_CTRL_SOFT_RST);
1305 & 0xFFFF));
1306 1244
1307 AT_WRITE_FLUSH(hw); 1245 AT_WRITE_FLUSH(hw);
1308 msleep(10); 1246 msleep(10);
1309 /* Wait at least 10ms for All module to be Idle */ 1247 /* Wait at least 10ms for All module to be Idle */
1310 1248
1311 if (atl1c_wait_until_idle(hw)) { 1249 if (atl1c_wait_until_idle(hw, IDLE_STATUS_MASK)) {
1312 dev_err(&pdev->dev, 1250 dev_err(&pdev->dev,
1313 "MAC state machine can't be idle since" 1251 "MAC state machine can't be idle since"
1314 " disabled for 10ms second\n"); 1252 " disabled for 10ms second\n");
1315 return -1; 1253 return -1;
1316 } 1254 }
1255 AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data);
1256
1257 /* driver control speed/duplex */
1258 AT_READ_REG(hw, REG_MAC_CTRL, &ctrl_data);
1259 AT_WRITE_REG(hw, REG_MAC_CTRL, ctrl_data | MAC_CTRL_SPEED_MODE_SW);
1260
1261 /* clk switch setting */
1262 AT_READ_REG(hw, REG_SERDES, &ctrl_data);
1263 switch (hw->nic_type) {
1264 case athr_l2c_b:
1265 ctrl_data &= ~(SERDES_PHY_CLK_SLOWDOWN |
1266 SERDES_MAC_CLK_SLOWDOWN);
1267 AT_WRITE_REG(hw, REG_SERDES, ctrl_data);
1268 break;
1269 case athr_l2c_b2:
1270 case athr_l1d_2:
1271 ctrl_data |= SERDES_PHY_CLK_SLOWDOWN | SERDES_MAC_CLK_SLOWDOWN;
1272 AT_WRITE_REG(hw, REG_SERDES, ctrl_data);
1273 break;
1274 default:
1275 break;
1276 }
1277
1317 return 0; 1278 return 0;
1318} 1279}
1319 1280
1320static void atl1c_disable_l0s_l1(struct atl1c_hw *hw) 1281static void atl1c_disable_l0s_l1(struct atl1c_hw *hw)
1321{ 1282{
1322 u32 pm_ctrl_data; 1283 u16 ctrl_flags = hw->ctrl_flags;
1323 1284
1324 AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data); 1285 hw->ctrl_flags &= ~(ATL1C_ASPM_L0S_SUPPORT | ATL1C_ASPM_L1_SUPPORT);
1325 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK << 1286 atl1c_set_aspm(hw, SPEED_0);
1326 PM_CTRL_L1_ENTRY_TIMER_SHIFT); 1287 hw->ctrl_flags = ctrl_flags;
1327 pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
1328 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1329 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1330 pm_ctrl_data &= ~PM_CTRL_MAC_ASPM_CHK;
1331 pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
1332
1333 pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
1334 pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
1335 pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
1336 AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
1337} 1288}
1338 1289
1339/* 1290/*
1340 * Set ASPM state. 1291 * Set ASPM state.
1341 * Enable/disable L0s/L1 depend on link state. 1292 * Enable/disable L0s/L1 depend on link state.
1342 */ 1293 */
1343static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup) 1294static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed)
1344{ 1295{
1345 u32 pm_ctrl_data; 1296 u32 pm_ctrl_data;
1346 u32 link_ctrl_data; 1297 u32 link_l1_timer;
1347 u32 link_l1_timer = 0xF;
1348 1298
1349 AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data); 1299 AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
1350 AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); 1300 pm_ctrl_data &= ~(PM_CTRL_ASPM_L1_EN |
1301 PM_CTRL_ASPM_L0S_EN |
1302 PM_CTRL_MAC_ASPM_CHK);
1303 /* L1 timer */
1304 if (hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
1305 pm_ctrl_data &= ~PMCTRL_TXL1_AFTER_L0S;
1306 link_l1_timer =
1307 link_speed == SPEED_1000 || link_speed == SPEED_100 ?
1308 L1D_PMCTRL_L1_ENTRY_TM_16US : 1;
1309 pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
1310 L1D_PMCTRL_L1_ENTRY_TM, link_l1_timer);
1311 } else {
1312 link_l1_timer = hw->nic_type == athr_l2c_b ?
1313 L2CB1_PM_CTRL_L1_ENTRY_TM : L1C_PM_CTRL_L1_ENTRY_TM;
1314 if (link_speed != SPEED_1000 && link_speed != SPEED_100)
1315 link_l1_timer = 1;
1316 pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
1317 PM_CTRL_L1_ENTRY_TIMER, link_l1_timer);
1318 }
1351 1319
1352 pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1; 1320 /* L0S/L1 enable */
1353 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK << 1321 if ((hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT) && link_speed != SPEED_0)
1354 PM_CTRL_L1_ENTRY_TIMER_SHIFT); 1322 pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN | PM_CTRL_MAC_ASPM_CHK;
1355 pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK << 1323 if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
1356 PM_CTRL_LCKDET_TIMER_SHIFT); 1324 pm_ctrl_data |= PM_CTRL_ASPM_L1_EN | PM_CTRL_MAC_ASPM_CHK;
1357 pm_ctrl_data |= AT_LCKDET_TIMER << PM_CTRL_LCKDET_TIMER_SHIFT;
1358 1325
1326 /* l2cb & l1d & l2cb2 & l1d2 */
1359 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d || 1327 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
1360 hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { 1328 hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
1361 link_ctrl_data &= ~LINK_CTRL_EXT_SYNC; 1329 pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
1362 if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) { 1330 PM_CTRL_PM_REQ_TIMER, PM_CTRL_PM_REQ_TO_DEF);
1363 if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) 1331 pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER |
1364 link_ctrl_data |= LINK_CTRL_EXT_SYNC; 1332 PM_CTRL_SERDES_PD_EX_L1 |
1365 } 1333 PM_CTRL_CLK_SWH_L1;
1366 1334 pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN |
1367 AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data); 1335 PM_CTRL_SERDES_PLL_L1_EN |
1368 1336 PM_CTRL_SERDES_BUFS_RX_L1_EN |
1369 pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER; 1337 PM_CTRL_SA_DLY_EN |
1370 pm_ctrl_data &= ~(PM_CTRL_PM_REQ_TIMER_MASK << 1338 PM_CTRL_HOTRST);
1371 PM_CTRL_PM_REQ_TIMER_SHIFT); 1339 /* disable l0s if link down or l2cb */
1372 pm_ctrl_data |= AT_ASPM_L1_TIMER << 1340 if (link_speed == SPEED_0 || hw->nic_type == athr_l2c_b)
1373 PM_CTRL_PM_REQ_TIMER_SHIFT;
1374 pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN;
1375 pm_ctrl_data &= ~PM_CTRL_HOTRST;
1376 pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT;
1377 pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1;
1378 }
1379 pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
1380 if (linkup) {
1381 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1382 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1383 if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
1384 pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
1385 if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
1386 pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN;
1387
1388 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
1389 hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
1390 if (hw->nic_type == athr_l2c_b)
1391 if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE))
1392 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1393 pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
1394 pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
1395 pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
1396 pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
1397 if (hw->adapter->link_speed == SPEED_100 ||
1398 hw->adapter->link_speed == SPEED_1000) {
1399 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
1400 PM_CTRL_L1_ENTRY_TIMER_SHIFT);
1401 if (hw->nic_type == athr_l2c_b)
1402 link_l1_timer = 7;
1403 else if (hw->nic_type == athr_l2c_b2 ||
1404 hw->nic_type == athr_l1d_2)
1405 link_l1_timer = 4;
1406 pm_ctrl_data |= link_l1_timer <<
1407 PM_CTRL_L1_ENTRY_TIMER_SHIFT;
1408 }
1409 } else {
1410 pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
1411 pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
1412 pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
1413 pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
1414 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; 1341 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1415 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; 1342 } else { /* l1c */
1416 1343 pm_ctrl_data =
1344 FIELD_SETX(pm_ctrl_data, PM_CTRL_L1_ENTRY_TIMER, 0);
1345 if (link_speed != SPEED_0) {
1346 pm_ctrl_data |= PM_CTRL_SERDES_L1_EN |
1347 PM_CTRL_SERDES_PLL_L1_EN |
1348 PM_CTRL_SERDES_BUFS_RX_L1_EN;
1349 pm_ctrl_data &= ~(PM_CTRL_SERDES_PD_EX_L1 |
1350 PM_CTRL_CLK_SWH_L1 |
1351 PM_CTRL_ASPM_L0S_EN |
1352 PM_CTRL_ASPM_L1_EN);
1353 } else { /* link down */
1354 pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
1355 pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN |
1356 PM_CTRL_SERDES_PLL_L1_EN |
1357 PM_CTRL_SERDES_BUFS_RX_L1_EN |
1358 PM_CTRL_ASPM_L0S_EN);
1417 } 1359 }
1418 } else {
1419 pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
1420 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1421 pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
1422 pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
1423
1424 if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
1425 pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
1426 else
1427 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1428 } 1360 }
1429 AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data); 1361 AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
1430 1362
1431 return; 1363 return;
1432} 1364}
1433 1365
1434static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
1435{
1436 struct atl1c_hw *hw = &adapter->hw;
1437 struct net_device *netdev = adapter->netdev;
1438 u32 mac_ctrl_data;
1439
1440 mac_ctrl_data = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
1441 mac_ctrl_data |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
1442
1443 if (adapter->link_duplex == FULL_DUPLEX) {
1444 hw->mac_duplex = true;
1445 mac_ctrl_data |= MAC_CTRL_DUPLX;
1446 }
1447
1448 if (adapter->link_speed == SPEED_1000)
1449 hw->mac_speed = atl1c_mac_speed_1000;
1450 else
1451 hw->mac_speed = atl1c_mac_speed_10_100;
1452
1453 mac_ctrl_data |= (hw->mac_speed & MAC_CTRL_SPEED_MASK) <<
1454 MAC_CTRL_SPEED_SHIFT;
1455
1456 mac_ctrl_data |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
1457 mac_ctrl_data |= ((hw->preamble_len & MAC_CTRL_PRMLEN_MASK) <<
1458 MAC_CTRL_PRMLEN_SHIFT);
1459
1460 __atl1c_vlan_mode(netdev->features, &mac_ctrl_data);
1461
1462 mac_ctrl_data |= MAC_CTRL_BC_EN;
1463 if (netdev->flags & IFF_PROMISC)
1464 mac_ctrl_data |= MAC_CTRL_PROMIS_EN;
1465 if (netdev->flags & IFF_ALLMULTI)
1466 mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
1467
1468 mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN;
1469 if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2 ||
1470 hw->nic_type == athr_l1d_2) {
1471 mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW;
1472 mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32;
1473 }
1474 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
1475}
1476
1477/* 1366/*
1478 * atl1c_configure - Configure Transmit&Receive Unit after Reset 1367 * atl1c_configure - Configure Transmit&Receive Unit after Reset
1479 * @adapter: board private structure 1368 * @adapter: board private structure
1480 * 1369 *
1481 * Configure the Tx /Rx unit of the MAC after a reset. 1370 * Configure the Tx /Rx unit of the MAC after a reset.
1482 */ 1371 */
1483static int atl1c_configure(struct atl1c_adapter *adapter) 1372static int atl1c_configure_mac(struct atl1c_adapter *adapter)
1484{ 1373{
1485 struct atl1c_hw *hw = &adapter->hw; 1374 struct atl1c_hw *hw = &adapter->hw;
1486 u32 master_ctrl_data = 0; 1375 u32 master_ctrl_data = 0;
1487 u32 intr_modrt_data; 1376 u32 intr_modrt_data;
1488 u32 data; 1377 u32 data;
1489 1378
1379 AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
1380 master_ctrl_data &= ~(MASTER_CTRL_TX_ITIMER_EN |
1381 MASTER_CTRL_RX_ITIMER_EN |
1382 MASTER_CTRL_INT_RDCLR);
1490 /* clear interrupt status */ 1383 /* clear interrupt status */
1491 AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF); 1384 AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF);
1492 /* Clear any WOL status */ 1385 /* Clear any WOL status */
@@ -1525,30 +1418,39 @@ static int atl1c_configure(struct atl1c_adapter *adapter)
1525 master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN; 1418 master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN;
1526 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); 1419 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
1527 1420
1528 if (hw->ctrl_flags & ATL1C_CMB_ENABLE) { 1421 AT_WRITE_REG(hw, REG_SMB_STAT_TIMER,
1529 AT_WRITE_REG(hw, REG_CMB_TPD_THRESH, 1422 hw->smb_timer & SMB_STAT_TIMER_MASK);
1530 hw->cmb_tpd & CMB_TPD_THRESH_MASK);
1531 AT_WRITE_REG(hw, REG_CMB_TX_TIMER,
1532 hw->cmb_tx_timer & CMB_TX_TIMER_MASK);
1533 }
1534 1423
1535 if (hw->ctrl_flags & ATL1C_SMB_ENABLE)
1536 AT_WRITE_REG(hw, REG_SMB_STAT_TIMER,
1537 hw->smb_timer & SMB_STAT_TIMER_MASK);
1538 /* set MTU */ 1424 /* set MTU */
1539 AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN + 1425 AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN +
1540 VLAN_HLEN + ETH_FCS_LEN); 1426 VLAN_HLEN + ETH_FCS_LEN);
1541 /* HDS, disable */
1542 AT_WRITE_REG(hw, REG_HDS_CTRL, 0);
1543 1427
1544 atl1c_configure_tx(adapter); 1428 atl1c_configure_tx(adapter);
1545 atl1c_configure_rx(adapter); 1429 atl1c_configure_rx(adapter);
1546 atl1c_configure_rss(adapter);
1547 atl1c_configure_dma(adapter); 1430 atl1c_configure_dma(adapter);
1548 1431
1549 return 0; 1432 return 0;
1550} 1433}
1551 1434
1435static int atl1c_configure(struct atl1c_adapter *adapter)
1436{
1437 struct net_device *netdev = adapter->netdev;
1438 int num;
1439
1440 atl1c_init_ring_ptrs(adapter);
1441 atl1c_set_multi(netdev);
1442 atl1c_restore_vlan(adapter);
1443
1444 num = atl1c_alloc_rx_buffer(adapter);
1445 if (unlikely(num == 0))
1446 return -ENOMEM;
1447
1448 if (atl1c_configure_mac(adapter))
1449 return -EIO;
1450
1451 return 0;
1452}
1453
1552static void atl1c_update_hw_stats(struct atl1c_adapter *adapter) 1454static void atl1c_update_hw_stats(struct atl1c_adapter *adapter)
1553{ 1455{
1554 u16 hw_reg_addr = 0; 1456 u16 hw_reg_addr = 0;
@@ -1635,16 +1537,11 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
1635 struct pci_dev *pdev = adapter->pdev; 1537 struct pci_dev *pdev = adapter->pdev;
1636 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); 1538 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1637 u16 hw_next_to_clean; 1539 u16 hw_next_to_clean;
1638 u16 shift; 1540 u16 reg;
1639 u32 data;
1640 1541
1641 if (type == atl1c_trans_high) 1542 reg = type == atl1c_trans_high ? REG_TPD_PRI1_CIDX : REG_TPD_PRI0_CIDX;
1642 shift = MB_HTPD_CONS_IDX_SHIFT;
1643 else
1644 shift = MB_NTPD_CONS_IDX_SHIFT;
1645 1543
1646 AT_READ_REG(&adapter->hw, REG_MB_PRIO_CONS_IDX, &data); 1544 AT_READ_REGW(&adapter->hw, reg, &hw_next_to_clean);
1647 hw_next_to_clean = (data >> shift) & MB_PRIO_PROD_IDX_MASK;
1648 1545
1649 while (next_to_clean != hw_next_to_clean) { 1546 while (next_to_clean != hw_next_to_clean) {
1650 buffer_info = &tpd_ring->buffer_info[next_to_clean]; 1547 buffer_info = &tpd_ring->buffer_info[next_to_clean];
@@ -1746,9 +1643,9 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
1746 skb_checksum_none_assert(skb); 1643 skb_checksum_none_assert(skb);
1747} 1644}
1748 1645
1749static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid) 1646static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
1750{ 1647{
1751 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[ringid]; 1648 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
1752 struct pci_dev *pdev = adapter->pdev; 1649 struct pci_dev *pdev = adapter->pdev;
1753 struct atl1c_buffer *buffer_info, *next_info; 1650 struct atl1c_buffer *buffer_info, *next_info;
1754 struct sk_buff *skb; 1651 struct sk_buff *skb;
@@ -1800,7 +1697,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid
1800 /* TODO: update mailbox here */ 1697 /* TODO: update mailbox here */
1801 wmb(); 1698 wmb();
1802 rfd_ring->next_to_use = rfd_next_to_use; 1699 rfd_ring->next_to_use = rfd_next_to_use;
1803 AT_WRITE_REG(&adapter->hw, atl1c_rfd_prod_idx_regs[ringid], 1700 AT_WRITE_REG(&adapter->hw, REG_MB_RFD0_PROD_IDX,
1804 rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK); 1701 rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK);
1805 } 1702 }
1806 1703
@@ -1839,7 +1736,7 @@ static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring,
1839 rfd_ring->next_to_clean = rfd_index; 1736 rfd_ring->next_to_clean = rfd_index;
1840} 1737}
1841 1738
1842static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que, 1739static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter,
1843 int *work_done, int work_to_do) 1740 int *work_done, int work_to_do)
1844{ 1741{
1845 u16 rfd_num, rfd_index; 1742 u16 rfd_num, rfd_index;
@@ -1847,8 +1744,8 @@ static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
1847 u16 length; 1744 u16 length;
1848 struct pci_dev *pdev = adapter->pdev; 1745 struct pci_dev *pdev = adapter->pdev;
1849 struct net_device *netdev = adapter->netdev; 1746 struct net_device *netdev = adapter->netdev;
1850 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[que]; 1747 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
1851 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[que]; 1748 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
1852 struct sk_buff *skb; 1749 struct sk_buff *skb;
1853 struct atl1c_recv_ret_status *rrs; 1750 struct atl1c_recv_ret_status *rrs;
1854 struct atl1c_buffer *buffer_info; 1751 struct atl1c_buffer *buffer_info;
@@ -1914,7 +1811,7 @@ rrs_checked:
1914 count++; 1811 count++;
1915 } 1812 }
1916 if (count) 1813 if (count)
1917 atl1c_alloc_rx_buffer(adapter, que); 1814 atl1c_alloc_rx_buffer(adapter);
1918} 1815}
1919 1816
1920/* 1817/*
@@ -1931,7 +1828,7 @@ static int atl1c_clean(struct napi_struct *napi, int budget)
1931 if (!netif_carrier_ok(adapter->netdev)) 1828 if (!netif_carrier_ok(adapter->netdev))
1932 goto quit_polling; 1829 goto quit_polling;
1933 /* just enable one RXQ */ 1830 /* just enable one RXQ */
1934 atl1c_clean_rx_irq(adapter, 0, &work_done, budget); 1831 atl1c_clean_rx_irq(adapter, &work_done, budget);
1935 1832
1936 if (work_done < budget) { 1833 if (work_done < budget) {
1937quit_polling: 1834quit_polling:
@@ -2206,23 +2103,10 @@ static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb,
2206 struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type) 2103 struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type)
2207{ 2104{
2208 struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; 2105 struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
2209 u32 prod_data; 2106 u16 reg;
2210 2107
2211 AT_READ_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, &prod_data); 2108 reg = type == atl1c_trans_high ? REG_TPD_PRI1_PIDX : REG_TPD_PRI0_PIDX;
2212 switch (type) { 2109 AT_WRITE_REGW(&adapter->hw, reg, tpd_ring->next_to_use);
2213 case atl1c_trans_high:
2214 prod_data &= 0xFFFF0000;
2215 prod_data |= tpd_ring->next_to_use & 0xFFFF;
2216 break;
2217 case atl1c_trans_normal:
2218 prod_data &= 0x0000FFFF;
2219 prod_data |= (tpd_ring->next_to_use & 0xFFFF) << 16;
2220 break;
2221 default:
2222 break;
2223 }
2224 wmb();
2225 AT_WRITE_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, prod_data);
2226} 2110}
2227 2111
2228static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, 2112static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
@@ -2307,8 +2191,7 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
2307 "Unable to allocate MSI interrupt Error: %d\n", 2191 "Unable to allocate MSI interrupt Error: %d\n",
2308 err); 2192 err);
2309 adapter->have_msi = false; 2193 adapter->have_msi = false;
2310 } else 2194 }
2311 netdev->irq = pdev->irq;
2312 2195
2313 if (!adapter->have_msi) 2196 if (!adapter->have_msi)
2314 flags |= IRQF_SHARED; 2197 flags |= IRQF_SHARED;
@@ -2328,44 +2211,38 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
2328 return err; 2211 return err;
2329} 2212}
2330 2213
2214
2215static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter)
2216{
2217 /* release tx-pending skbs and reset tx/rx ring index */
2218 atl1c_clean_tx_ring(adapter, atl1c_trans_normal);
2219 atl1c_clean_tx_ring(adapter, atl1c_trans_high);
2220 atl1c_clean_rx_ring(adapter);
2221}
2222
2331static int atl1c_up(struct atl1c_adapter *adapter) 2223static int atl1c_up(struct atl1c_adapter *adapter)
2332{ 2224{
2333 struct net_device *netdev = adapter->netdev; 2225 struct net_device *netdev = adapter->netdev;
2334 int num;
2335 int err; 2226 int err;
2336 int i;
2337 2227
2338 netif_carrier_off(netdev); 2228 netif_carrier_off(netdev);
2339 atl1c_init_ring_ptrs(adapter);
2340 atl1c_set_multi(netdev);
2341 atl1c_restore_vlan(adapter);
2342 2229
2343 for (i = 0; i < adapter->num_rx_queues; i++) { 2230 err = atl1c_configure(adapter);
2344 num = atl1c_alloc_rx_buffer(adapter, i); 2231 if (unlikely(err))
2345 if (unlikely(num == 0)) {
2346 err = -ENOMEM;
2347 goto err_alloc_rx;
2348 }
2349 }
2350
2351 if (atl1c_configure(adapter)) {
2352 err = -EIO;
2353 goto err_up; 2232 goto err_up;
2354 }
2355 2233
2356 err = atl1c_request_irq(adapter); 2234 err = atl1c_request_irq(adapter);
2357 if (unlikely(err)) 2235 if (unlikely(err))
2358 goto err_up; 2236 goto err_up;
2359 2237
2238 atl1c_check_link_status(adapter);
2360 clear_bit(__AT_DOWN, &adapter->flags); 2239 clear_bit(__AT_DOWN, &adapter->flags);
2361 napi_enable(&adapter->napi); 2240 napi_enable(&adapter->napi);
2362 atl1c_irq_enable(adapter); 2241 atl1c_irq_enable(adapter);
2363 atl1c_check_link_status(adapter);
2364 netif_start_queue(netdev); 2242 netif_start_queue(netdev);
2365 return err; 2243 return err;
2366 2244
2367err_up: 2245err_up:
2368err_alloc_rx:
2369 atl1c_clean_rx_ring(adapter); 2246 atl1c_clean_rx_ring(adapter);
2370 return err; 2247 return err;
2371} 2248}
@@ -2383,15 +2260,15 @@ static void atl1c_down(struct atl1c_adapter *adapter)
2383 napi_disable(&adapter->napi); 2260 napi_disable(&adapter->napi);
2384 atl1c_irq_disable(adapter); 2261 atl1c_irq_disable(adapter);
2385 atl1c_free_irq(adapter); 2262 atl1c_free_irq(adapter);
2263 /* disable ASPM if device inactive */
2264 atl1c_disable_l0s_l1(&adapter->hw);
2386 /* reset MAC to disable all RX/TX */ 2265 /* reset MAC to disable all RX/TX */
2387 atl1c_reset_mac(&adapter->hw); 2266 atl1c_reset_mac(&adapter->hw);
2388 msleep(1); 2267 msleep(1);
2389 2268
2390 adapter->link_speed = SPEED_0; 2269 adapter->link_speed = SPEED_0;
2391 adapter->link_duplex = -1; 2270 adapter->link_duplex = -1;
2392 atl1c_clean_tx_ring(adapter, atl1c_trans_normal); 2271 atl1c_reset_dma_ring(adapter);
2393 atl1c_clean_tx_ring(adapter, atl1c_trans_high);
2394 atl1c_clean_rx_ring(adapter);
2395} 2272}
2396 2273
2397/* 2274/*
@@ -2424,13 +2301,6 @@ static int atl1c_open(struct net_device *netdev)
2424 if (unlikely(err)) 2301 if (unlikely(err))
2425 goto err_up; 2302 goto err_up;
2426 2303
2427 if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) {
2428 u32 phy_data;
2429
2430 AT_READ_REG(&adapter->hw, REG_MDIO_CTRL, &phy_data);
2431 phy_data |= MDIO_AP_EN;
2432 AT_WRITE_REG(&adapter->hw, REG_MDIO_CTRL, phy_data);
2433 }
2434 return 0; 2304 return 0;
2435 2305
2436err_up: 2306err_up:
@@ -2456,6 +2326,8 @@ static int atl1c_close(struct net_device *netdev)
2456 struct atl1c_adapter *adapter = netdev_priv(netdev); 2326 struct atl1c_adapter *adapter = netdev_priv(netdev);
2457 2327
2458 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); 2328 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
2329 set_bit(__AT_DOWN, &adapter->flags);
2330 cancel_work_sync(&adapter->common_task);
2459 atl1c_down(adapter); 2331 atl1c_down(adapter);
2460 atl1c_free_ring_resources(adapter); 2332 atl1c_free_ring_resources(adapter);
2461 return 0; 2333 return 0;
@@ -2467,10 +2339,6 @@ static int atl1c_suspend(struct device *dev)
2467 struct net_device *netdev = pci_get_drvdata(pdev); 2339 struct net_device *netdev = pci_get_drvdata(pdev);
2468 struct atl1c_adapter *adapter = netdev_priv(netdev); 2340 struct atl1c_adapter *adapter = netdev_priv(netdev);
2469 struct atl1c_hw *hw = &adapter->hw; 2341 struct atl1c_hw *hw = &adapter->hw;
2470 u32 mac_ctrl_data = 0;
2471 u32 master_ctrl_data = 0;
2472 u32 wol_ctrl_data = 0;
2473 u16 mii_intr_status_data = 0;
2474 u32 wufc = adapter->wol; 2342 u32 wufc = adapter->wol;
2475 2343
2476 atl1c_disable_l0s_l1(hw); 2344 atl1c_disable_l0s_l1(hw);
@@ -2481,75 +2349,10 @@ static int atl1c_suspend(struct device *dev)
2481 netif_device_detach(netdev); 2349 netif_device_detach(netdev);
2482 2350
2483 if (wufc) 2351 if (wufc)
2484 if (atl1c_phy_power_saving(hw) != 0) 2352 if (atl1c_phy_to_ps_link(hw) != 0)
2485 dev_dbg(&pdev->dev, "phy power saving failed"); 2353 dev_dbg(&pdev->dev, "phy power saving failed");
2486 2354
2487 AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); 2355 atl1c_power_saving(hw, wufc);
2488 AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data);
2489
2490 master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS;
2491 mac_ctrl_data &= ~(MAC_CTRL_PRMLEN_MASK << MAC_CTRL_PRMLEN_SHIFT);
2492 mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
2493 MAC_CTRL_PRMLEN_MASK) <<
2494 MAC_CTRL_PRMLEN_SHIFT);
2495 mac_ctrl_data &= ~(MAC_CTRL_SPEED_MASK << MAC_CTRL_SPEED_SHIFT);
2496 mac_ctrl_data &= ~MAC_CTRL_DUPLX;
2497
2498 if (wufc) {
2499 mac_ctrl_data |= MAC_CTRL_RX_EN;
2500 if (adapter->link_speed == SPEED_1000 ||
2501 adapter->link_speed == SPEED_0) {
2502 mac_ctrl_data |= atl1c_mac_speed_1000 <<
2503 MAC_CTRL_SPEED_SHIFT;
2504 mac_ctrl_data |= MAC_CTRL_DUPLX;
2505 } else
2506 mac_ctrl_data |= atl1c_mac_speed_10_100 <<
2507 MAC_CTRL_SPEED_SHIFT;
2508
2509 if (adapter->link_duplex == DUPLEX_FULL)
2510 mac_ctrl_data |= MAC_CTRL_DUPLX;
2511
2512 /* turn on magic packet wol */
2513 if (wufc & AT_WUFC_MAG)
2514 wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
2515
2516 if (wufc & AT_WUFC_LNKC) {
2517 wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
2518 /* only link up can wake up */
2519 if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) {
2520 dev_dbg(&pdev->dev, "%s: read write phy "
2521 "register failed.\n",
2522 atl1c_driver_name);
2523 }
2524 }
2525 /* clear phy interrupt */
2526 atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data);
2527 /* Config MAC Ctrl register */
2528 __atl1c_vlan_mode(netdev->features, &mac_ctrl_data);
2529
2530 /* magic packet maybe Broadcast&multicast&Unicast frame */
2531 if (wufc & AT_WUFC_MAG)
2532 mac_ctrl_data |= MAC_CTRL_BC_EN;
2533
2534 dev_dbg(&pdev->dev,
2535 "%s: suspend MAC=0x%x\n",
2536 atl1c_driver_name, mac_ctrl_data);
2537 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
2538 AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
2539 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
2540
2541 AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
2542 GPHY_CTRL_EXT_RESET);
2543 } else {
2544 AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING);
2545 master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS;
2546 mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT;
2547 mac_ctrl_data |= MAC_CTRL_DUPLX;
2548 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
2549 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
2550 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
2551 hw->phy_configured = false; /* re-init PHY when resume */
2552 }
2553 2356
2554 return 0; 2357 return 0;
2555} 2358}
@@ -2562,8 +2365,7 @@ static int atl1c_resume(struct device *dev)
2562 struct atl1c_adapter *adapter = netdev_priv(netdev); 2365 struct atl1c_adapter *adapter = netdev_priv(netdev);
2563 2366
2564 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); 2367 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
2565 atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE | 2368 atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE);
2566 ATL1C_PCIE_PHY_RESET);
2567 2369
2568 atl1c_phy_reset(&adapter->hw); 2370 atl1c_phy_reset(&adapter->hw);
2569 atl1c_reset_mac(&adapter->hw); 2371 atl1c_reset_mac(&adapter->hw);
@@ -2616,7 +2418,6 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2616 SET_NETDEV_DEV(netdev, &pdev->dev); 2418 SET_NETDEV_DEV(netdev, &pdev->dev);
2617 pci_set_drvdata(pdev, netdev); 2419 pci_set_drvdata(pdev, netdev);
2618 2420
2619 netdev->irq = pdev->irq;
2620 netdev->netdev_ops = &atl1c_netdev_ops; 2421 netdev->netdev_ops = &atl1c_netdev_ops;
2621 netdev->watchdog_timeo = AT_TX_WATCHDOG; 2422 netdev->watchdog_timeo = AT_TX_WATCHDOG;
2622 atl1c_set_ethtool_ops(netdev); 2423 atl1c_set_ethtool_ops(netdev);
@@ -2706,14 +2507,13 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
2706 dev_err(&pdev->dev, "cannot map device registers\n"); 2507 dev_err(&pdev->dev, "cannot map device registers\n");
2707 goto err_ioremap; 2508 goto err_ioremap;
2708 } 2509 }
2709 netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
2710 2510
2711 /* init mii data */ 2511 /* init mii data */
2712 adapter->mii.dev = netdev; 2512 adapter->mii.dev = netdev;
2713 adapter->mii.mdio_read = atl1c_mdio_read; 2513 adapter->mii.mdio_read = atl1c_mdio_read;
2714 adapter->mii.mdio_write = atl1c_mdio_write; 2514 adapter->mii.mdio_write = atl1c_mdio_write;
2715 adapter->mii.phy_id_mask = 0x1f; 2515 adapter->mii.phy_id_mask = 0x1f;
2716 adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK; 2516 adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
2717 netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64); 2517 netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64);
2718 setup_timer(&adapter->phy_config_timer, atl1c_phy_config, 2518 setup_timer(&adapter->phy_config_timer, atl1c_phy_config,
2719 (unsigned long)adapter); 2519 (unsigned long)adapter);
@@ -2723,8 +2523,7 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
2723 dev_err(&pdev->dev, "net device private data init failed\n"); 2523 dev_err(&pdev->dev, "net device private data init failed\n");
2724 goto err_sw_init; 2524 goto err_sw_init;
2725 } 2525 }
2726 atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE | 2526 atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE);
2727 ATL1C_PCIE_PHY_RESET);
2728 2527
2729 /* Init GPHY as early as possible due to power saving issue */ 2528 /* Init GPHY as early as possible due to power saving issue */
2730 atl1c_phy_reset(&adapter->hw); 2529 atl1c_phy_reset(&adapter->hw);
@@ -2752,7 +2551,7 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
2752 dev_dbg(&pdev->dev, "mac address : %pM\n", 2551 dev_dbg(&pdev->dev, "mac address : %pM\n",
2753 adapter->hw.mac_addr); 2552 adapter->hw.mac_addr);
2754 2553
2755 atl1c_hw_set_mac_addr(&adapter->hw); 2554 atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr);
2756 INIT_WORK(&adapter->common_task, atl1c_common_task); 2555 INIT_WORK(&adapter->common_task, atl1c_common_task);
2757 adapter->work_event = 0; 2556 adapter->work_event = 0;
2758 err = register_netdev(netdev); 2557 err = register_netdev(netdev);
@@ -2796,6 +2595,8 @@ static void __devexit atl1c_remove(struct pci_dev *pdev)
2796 struct atl1c_adapter *adapter = netdev_priv(netdev); 2595 struct atl1c_adapter *adapter = netdev_priv(netdev);
2797 2596
2798 unregister_netdev(netdev); 2597 unregister_netdev(netdev);
2598 /* restore permanent address */
2599 atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.perm_mac_addr);
2799 atl1c_phy_disable(&adapter->hw); 2600 atl1c_phy_disable(&adapter->hw);
2800 2601
2801 iounmap(adapter->hw.hw_addr); 2602 iounmap(adapter->hw.hw_addr);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 93ff2b231284..1220e511ced6 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1883,27 +1883,24 @@ static int atl1e_request_irq(struct atl1e_adapter *adapter)
1883 int err = 0; 1883 int err = 0;
1884 1884
1885 adapter->have_msi = true; 1885 adapter->have_msi = true;
1886 err = pci_enable_msi(adapter->pdev); 1886 err = pci_enable_msi(pdev);
1887 if (err) { 1887 if (err) {
1888 netdev_dbg(adapter->netdev, 1888 netdev_dbg(netdev,
1889 "Unable to allocate MSI interrupt Error: %d\n", err); 1889 "Unable to allocate MSI interrupt Error: %d\n", err);
1890 adapter->have_msi = false; 1890 adapter->have_msi = false;
1891 } else 1891 }
1892 netdev->irq = pdev->irq;
1893
1894 1892
1895 if (!adapter->have_msi) 1893 if (!adapter->have_msi)
1896 flags |= IRQF_SHARED; 1894 flags |= IRQF_SHARED;
1897 err = request_irq(adapter->pdev->irq, atl1e_intr, flags, 1895 err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev);
1898 netdev->name, netdev);
1899 if (err) { 1896 if (err) {
1900 netdev_dbg(adapter->netdev, 1897 netdev_dbg(adapter->netdev,
1901 "Unable to allocate interrupt Error: %d\n", err); 1898 "Unable to allocate interrupt Error: %d\n", err);
1902 if (adapter->have_msi) 1899 if (adapter->have_msi)
1903 pci_disable_msi(adapter->pdev); 1900 pci_disable_msi(pdev);
1904 return err; 1901 return err;
1905 } 1902 }
1906 netdev_dbg(adapter->netdev, "atl1e_request_irq OK\n"); 1903 netdev_dbg(netdev, "atl1e_request_irq OK\n");
1907 return err; 1904 return err;
1908} 1905}
1909 1906
@@ -2233,7 +2230,6 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2233 SET_NETDEV_DEV(netdev, &pdev->dev); 2230 SET_NETDEV_DEV(netdev, &pdev->dev);
2234 pci_set_drvdata(pdev, netdev); 2231 pci_set_drvdata(pdev, netdev);
2235 2232
2236 netdev->irq = pdev->irq;
2237 netdev->netdev_ops = &atl1e_netdev_ops; 2233 netdev->netdev_ops = &atl1e_netdev_ops;
2238 2234
2239 netdev->watchdog_timeo = AT_TX_WATCHDOG; 2235 netdev->watchdog_timeo = AT_TX_WATCHDOG;
@@ -2319,7 +2315,6 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
2319 netdev_err(netdev, "cannot map device registers\n"); 2315 netdev_err(netdev, "cannot map device registers\n");
2320 goto err_ioremap; 2316 goto err_ioremap;
2321 } 2317 }
2322 netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
2323 2318
2324 /* init mii data */ 2319 /* init mii data */
2325 adapter->mii.dev = netdev; 2320 adapter->mii.dev = netdev;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 40ac41436549..5d10884e5080 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -266,7 +266,7 @@ static s32 atl1_reset_hw(struct atl1_hw *hw)
266 * interrupts & Clear any pending interrupt events 266 * interrupts & Clear any pending interrupt events
267 */ 267 */
268 /* 268 /*
269 * iowrite32(0, hw->hw_addr + REG_IMR); 269 * atlx_irq_disable(adapter);
270 * iowrite32(0xffffffff, hw->hw_addr + REG_ISR); 270 * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
271 */ 271 */
272 272
@@ -1917,7 +1917,7 @@ next:
1917 return num_alloc; 1917 return num_alloc;
1918} 1918}
1919 1919
1920static void atl1_intr_rx(struct atl1_adapter *adapter) 1920static int atl1_intr_rx(struct atl1_adapter *adapter, int budget)
1921{ 1921{
1922 int i, count; 1922 int i, count;
1923 u16 length; 1923 u16 length;
@@ -1933,7 +1933,7 @@ static void atl1_intr_rx(struct atl1_adapter *adapter)
1933 1933
1934 rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); 1934 rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
1935 1935
1936 while (1) { 1936 while (count < budget) {
1937 rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); 1937 rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
1938 i = 1; 1938 i = 1;
1939 if (likely(rrd->xsz.valid)) { /* packet valid */ 1939 if (likely(rrd->xsz.valid)) { /* packet valid */
@@ -2032,7 +2032,7 @@ rrd_ok:
2032 2032
2033 __vlan_hwaccel_put_tag(skb, vlan_tag); 2033 __vlan_hwaccel_put_tag(skb, vlan_tag);
2034 } 2034 }
2035 netif_rx(skb); 2035 netif_receive_skb(skb);
2036 2036
2037 /* let protocol layer free skb */ 2037 /* let protocol layer free skb */
2038 buffer_info->skb = NULL; 2038 buffer_info->skb = NULL;
@@ -2065,14 +2065,17 @@ rrd_ok:
2065 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); 2065 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
2066 spin_unlock(&adapter->mb_lock); 2066 spin_unlock(&adapter->mb_lock);
2067 } 2067 }
2068
2069 return count;
2068} 2070}
2069 2071
2070static void atl1_intr_tx(struct atl1_adapter *adapter) 2072static int atl1_intr_tx(struct atl1_adapter *adapter)
2071{ 2073{
2072 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 2074 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
2073 struct atl1_buffer *buffer_info; 2075 struct atl1_buffer *buffer_info;
2074 u16 sw_tpd_next_to_clean; 2076 u16 sw_tpd_next_to_clean;
2075 u16 cmb_tpd_next_to_clean; 2077 u16 cmb_tpd_next_to_clean;
2078 int count = 0;
2076 2079
2077 sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); 2080 sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
2078 cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); 2081 cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
@@ -2092,12 +2095,16 @@ static void atl1_intr_tx(struct atl1_adapter *adapter)
2092 2095
2093 if (++sw_tpd_next_to_clean == tpd_ring->count) 2096 if (++sw_tpd_next_to_clean == tpd_ring->count)
2094 sw_tpd_next_to_clean = 0; 2097 sw_tpd_next_to_clean = 0;
2098
2099 count++;
2095 } 2100 }
2096 atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); 2101 atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
2097 2102
2098 if (netif_queue_stopped(adapter->netdev) && 2103 if (netif_queue_stopped(adapter->netdev) &&
2099 netif_carrier_ok(adapter->netdev)) 2104 netif_carrier_ok(adapter->netdev))
2100 netif_wake_queue(adapter->netdev); 2105 netif_wake_queue(adapter->netdev);
2106
2107 return count;
2101} 2108}
2102 2109
2103static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring) 2110static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
@@ -2439,6 +2446,49 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
2439 return NETDEV_TX_OK; 2446 return NETDEV_TX_OK;
2440} 2447}
2441 2448
2449static int atl1_rings_clean(struct napi_struct *napi, int budget)
2450{
2451 struct atl1_adapter *adapter = container_of(napi, struct atl1_adapter, napi);
2452 int work_done = atl1_intr_rx(adapter, budget);
2453
2454 if (atl1_intr_tx(adapter))
2455 work_done = budget;
2456
2457 /* Let's come again to process some more packets */
2458 if (work_done >= budget)
2459 return work_done;
2460
2461 napi_complete(napi);
2462 /* re-enable Interrupt */
2463 if (likely(adapter->int_enabled))
2464 atlx_imr_set(adapter, IMR_NORMAL_MASK);
2465 return work_done;
2466}
2467
2468static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter)
2469{
2470 if (!napi_schedule_prep(&adapter->napi))
2471 /* It is possible in case even the RX/TX ints are disabled via IMR
2472 * register the ISR bits are set anyway (but do not produce IRQ).
2473 * To handle such situation the napi functions used to check is
2474 * something scheduled or not.
2475 */
2476 return 0;
2477
2478 __napi_schedule(&adapter->napi);
2479
2480 /*
2481 * Disable RX/TX ints via IMR register if it is
2482 * allowed. NAPI handler must reenable them in same
2483 * way.
2484 */
2485 if (!adapter->int_enabled)
2486 return 1;
2487
2488 atlx_imr_set(adapter, IMR_NORXTX_MASK);
2489 return 1;
2490}
2491
2442/* 2492/*
2443 * atl1_intr - Interrupt Handler 2493 * atl1_intr - Interrupt Handler
2444 * @irq: interrupt number 2494 * @irq: interrupt number
@@ -2449,78 +2499,74 @@ static irqreturn_t atl1_intr(int irq, void *data)
2449{ 2499{
2450 struct atl1_adapter *adapter = netdev_priv(data); 2500 struct atl1_adapter *adapter = netdev_priv(data);
2451 u32 status; 2501 u32 status;
2452 int max_ints = 10;
2453 2502
2454 status = adapter->cmb.cmb->int_stats; 2503 status = adapter->cmb.cmb->int_stats;
2455 if (!status) 2504 if (!status)
2456 return IRQ_NONE; 2505 return IRQ_NONE;
2457 2506
2458 do { 2507 /* clear CMB interrupt status at once,
2459 /* clear CMB interrupt status at once */ 2508 * but leave rx/tx interrupt status in case it should be dropped
2460 adapter->cmb.cmb->int_stats = 0; 2509 * only if rx/tx processing queued. In other case interrupt
2461 2510 * can be lost.
2462 if (status & ISR_GPHY) /* clear phy status */ 2511 */
2463 atlx_clear_phy_int(adapter); 2512 adapter->cmb.cmb->int_stats = status & (ISR_CMB_TX | ISR_CMB_RX);
2464 2513
2465 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ 2514 if (status & ISR_GPHY) /* clear phy status */
2466 iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); 2515 atlx_clear_phy_int(adapter);
2467 2516
2468 /* check if SMB intr */ 2517 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
2469 if (status & ISR_SMB) 2518 iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
2470 atl1_inc_smb(adapter);
2471 2519
2472 /* check if PCIE PHY Link down */ 2520 /* check if SMB intr */
2473 if (status & ISR_PHY_LINKDOWN) { 2521 if (status & ISR_SMB)
2474 if (netif_msg_intr(adapter)) 2522 atl1_inc_smb(adapter);
2475 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
2476 "pcie phy link down %x\n", status);
2477 if (netif_running(adapter->netdev)) { /* reset MAC */
2478 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
2479 schedule_work(&adapter->pcie_dma_to_rst_task);
2480 return IRQ_HANDLED;
2481 }
2482 }
2483 2523
2484 /* check if DMA read/write error ? */ 2524 /* check if PCIE PHY Link down */
2485 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { 2525 if (status & ISR_PHY_LINKDOWN) {
2486 if (netif_msg_intr(adapter)) 2526 if (netif_msg_intr(adapter))
2487 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 2527 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
2488 "pcie DMA r/w error (status = 0x%x)\n", 2528 "pcie phy link down %x\n", status);
2489 status); 2529 if (netif_running(adapter->netdev)) { /* reset MAC */
2490 iowrite32(0, adapter->hw.hw_addr + REG_IMR); 2530 atlx_irq_disable(adapter);
2491 schedule_work(&adapter->pcie_dma_to_rst_task); 2531 schedule_work(&adapter->reset_dev_task);
2492 return IRQ_HANDLED; 2532 return IRQ_HANDLED;
2493 } 2533 }
2534 }
2494 2535
2495 /* link event */ 2536 /* check if DMA read/write error ? */
2496 if (status & ISR_GPHY) { 2537 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
2497 adapter->soft_stats.tx_carrier_errors++; 2538 if (netif_msg_intr(adapter))
2498 atl1_check_for_link(adapter); 2539 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
2499 } 2540 "pcie DMA r/w error (status = 0x%x)\n",
2541 status);
2542 atlx_irq_disable(adapter);
2543 schedule_work(&adapter->reset_dev_task);
2544 return IRQ_HANDLED;
2545 }
2500 2546
2501 /* transmit event */ 2547 /* link event */
2502 if (status & ISR_CMB_TX) 2548 if (status & ISR_GPHY) {
2503 atl1_intr_tx(adapter); 2549 adapter->soft_stats.tx_carrier_errors++;
2504 2550 atl1_check_for_link(adapter);
2505 /* rx exception */ 2551 }
2506 if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
2507 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
2508 ISR_HOST_RRD_OV | ISR_CMB_RX))) {
2509 if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
2510 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
2511 ISR_HOST_RRD_OV))
2512 if (netif_msg_intr(adapter))
2513 dev_printk(KERN_DEBUG,
2514 &adapter->pdev->dev,
2515 "rx exception, ISR = 0x%x\n",
2516 status);
2517 atl1_intr_rx(adapter);
2518 }
2519 2552
2520 if (--max_ints < 0) 2553 /* transmit or receive event */
2521 break; 2554 if (status & (ISR_CMB_TX | ISR_CMB_RX) &&
2555 atl1_sched_rings_clean(adapter))
2556 adapter->cmb.cmb->int_stats = adapter->cmb.cmb->int_stats &
2557 ~(ISR_CMB_TX | ISR_CMB_RX);
2522 2558
2523 } while ((status = adapter->cmb.cmb->int_stats)); 2559 /* rx exception */
2560 if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
2561 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
2562 ISR_HOST_RRD_OV))) {
2563 if (netif_msg_intr(adapter))
2564 dev_printk(KERN_DEBUG,
2565 &adapter->pdev->dev,
2566 "rx exception, ISR = 0x%x\n",
2567 status);
2568 atl1_sched_rings_clean(adapter);
2569 }
2524 2570
2525 /* re-enable Interrupt */ 2571 /* re-enable Interrupt */
2526 iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); 2572 iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
@@ -2599,6 +2645,7 @@ static s32 atl1_up(struct atl1_adapter *adapter)
2599 if (unlikely(err)) 2645 if (unlikely(err))
2600 goto err_up; 2646 goto err_up;
2601 2647
2648 napi_enable(&adapter->napi);
2602 atlx_irq_enable(adapter); 2649 atlx_irq_enable(adapter);
2603 atl1_check_link(adapter); 2650 atl1_check_link(adapter);
2604 netif_start_queue(netdev); 2651 netif_start_queue(netdev);
@@ -2615,6 +2662,7 @@ static void atl1_down(struct atl1_adapter *adapter)
2615{ 2662{
2616 struct net_device *netdev = adapter->netdev; 2663 struct net_device *netdev = adapter->netdev;
2617 2664
2665 napi_disable(&adapter->napi);
2618 netif_stop_queue(netdev); 2666 netif_stop_queue(netdev);
2619 del_timer_sync(&adapter->phy_config_timer); 2667 del_timer_sync(&adapter->phy_config_timer);
2620 adapter->phy_timer_pending = false; 2668 adapter->phy_timer_pending = false;
@@ -2633,10 +2681,10 @@ static void atl1_down(struct atl1_adapter *adapter)
2633 atl1_clean_rx_ring(adapter); 2681 atl1_clean_rx_ring(adapter);
2634} 2682}
2635 2683
2636static void atl1_tx_timeout_task(struct work_struct *work) 2684static void atl1_reset_dev_task(struct work_struct *work)
2637{ 2685{
2638 struct atl1_adapter *adapter = 2686 struct atl1_adapter *adapter =
2639 container_of(work, struct atl1_adapter, tx_timeout_task); 2687 container_of(work, struct atl1_adapter, reset_dev_task);
2640 struct net_device *netdev = adapter->netdev; 2688 struct net_device *netdev = adapter->netdev;
2641 2689
2642 netif_device_detach(netdev); 2690 netif_device_detach(netdev);
@@ -2971,6 +3019,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
2971 3019
2972 netdev->netdev_ops = &atl1_netdev_ops; 3020 netdev->netdev_ops = &atl1_netdev_ops;
2973 netdev->watchdog_timeo = 5 * HZ; 3021 netdev->watchdog_timeo = 5 * HZ;
3022 netif_napi_add(netdev, &adapter->napi, atl1_rings_clean, 64);
2974 3023
2975 netdev->ethtool_ops = &atl1_ethtool_ops; 3024 netdev->ethtool_ops = &atl1_ethtool_ops;
2976 adapter->bd_number = cards_found; 3025 adapter->bd_number = cards_found;
@@ -3038,12 +3087,10 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
3038 (unsigned long)adapter); 3087 (unsigned long)adapter);
3039 adapter->phy_timer_pending = false; 3088 adapter->phy_timer_pending = false;
3040 3089
3041 INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task); 3090 INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task);
3042 3091
3043 INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task); 3092 INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
3044 3093
3045 INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
3046
3047 err = register_netdev(netdev); 3094 err = register_netdev(netdev);
3048 if (err) 3095 if (err)
3049 goto err_common; 3096 goto err_common;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h
index 109d6da8be97..3bf79a56220d 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.h
+++ b/drivers/net/ethernet/atheros/atlx/atl1.h
@@ -275,13 +275,17 @@ static u32 atl1_check_link(struct atl1_adapter *adapter);
275#define ISR_DIS_SMB 0x20000000 275#define ISR_DIS_SMB 0x20000000
276#define ISR_DIS_DMA 0x40000000 276#define ISR_DIS_DMA 0x40000000
277 277
278/* Normal Interrupt mask */ 278/* Normal Interrupt mask without RX/TX enabled */
279#define IMR_NORMAL_MASK (\ 279#define IMR_NORXTX_MASK (\
280 ISR_SMB |\ 280 ISR_SMB |\
281 ISR_GPHY |\ 281 ISR_GPHY |\
282 ISR_PHY_LINKDOWN|\ 282 ISR_PHY_LINKDOWN|\
283 ISR_DMAR_TO_RST |\ 283 ISR_DMAR_TO_RST |\
284 ISR_DMAW_TO_RST |\ 284 ISR_DMAW_TO_RST)
285
286/* Normal Interrupt mask */
287#define IMR_NORMAL_MASK (\
288 IMR_NORXTX_MASK |\
285 ISR_CMB_TX |\ 289 ISR_CMB_TX |\
286 ISR_CMB_RX) 290 ISR_CMB_RX)
287 291
@@ -758,9 +762,9 @@ struct atl1_adapter {
758 u16 link_speed; 762 u16 link_speed;
759 u16 link_duplex; 763 u16 link_duplex;
760 spinlock_t lock; 764 spinlock_t lock;
761 struct work_struct tx_timeout_task; 765 struct napi_struct napi;
766 struct work_struct reset_dev_task;
762 struct work_struct link_chg_task; 767 struct work_struct link_chg_task;
763 struct work_struct pcie_dma_to_rst_task;
764 768
765 struct timer_list phy_config_timer; 769 struct timer_list phy_config_timer;
766 bool phy_timer_pending; 770 bool phy_timer_pending;
@@ -782,6 +786,12 @@ struct atl1_adapter {
782 u16 ict; /* interrupt clear timer (2us resolution */ 786 u16 ict; /* interrupt clear timer (2us resolution */
783 struct mii_if_info mii; /* MII interface info */ 787 struct mii_if_info mii; /* MII interface info */
784 788
789 /*
790 * Use this value to check is napi handler allowed to
791 * enable ints or not
792 */
793 bool int_enabled;
794
785 u32 bd_number; /* board number */ 795 u32 bd_number; /* board number */
786 bool pci_using_64; 796 bool pci_using_64;
787 struct atl1_hw hw; 797 struct atl1_hw hw;
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index 3cd8837236dc..b4f3aa49a7fc 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -155,14 +155,21 @@ static void atlx_set_multi(struct net_device *netdev)
155 } 155 }
156} 156}
157 157
158static inline void atlx_imr_set(struct atlx_adapter *adapter,
159 unsigned int imr)
160{
161 iowrite32(imr, adapter->hw.hw_addr + REG_IMR);
162 ioread32(adapter->hw.hw_addr + REG_IMR);
163}
164
158/* 165/*
159 * atlx_irq_enable - Enable default interrupt generation settings 166 * atlx_irq_enable - Enable default interrupt generation settings
160 * @adapter: board private structure 167 * @adapter: board private structure
161 */ 168 */
162static void atlx_irq_enable(struct atlx_adapter *adapter) 169static void atlx_irq_enable(struct atlx_adapter *adapter)
163{ 170{
164 iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR); 171 atlx_imr_set(adapter, IMR_NORMAL_MASK);
165 ioread32(adapter->hw.hw_addr + REG_IMR); 172 adapter->int_enabled = true;
166} 173}
167 174
168/* 175/*
@@ -171,8 +178,8 @@ static void atlx_irq_enable(struct atlx_adapter *adapter)
171 */ 178 */
172static void atlx_irq_disable(struct atlx_adapter *adapter) 179static void atlx_irq_disable(struct atlx_adapter *adapter)
173{ 180{
174 iowrite32(0, adapter->hw.hw_addr + REG_IMR); 181 adapter->int_enabled = false;
175 ioread32(adapter->hw.hw_addr + REG_IMR); 182 atlx_imr_set(adapter, 0);
176 synchronize_irq(adapter->pdev->irq); 183 synchronize_irq(adapter->pdev->irq);
177} 184}
178 185
@@ -194,7 +201,7 @@ static void atlx_tx_timeout(struct net_device *netdev)
194{ 201{
195 struct atlx_adapter *adapter = netdev_priv(netdev); 202 struct atlx_adapter *adapter = netdev_priv(netdev);
196 /* Do the reset outside of interrupt context */ 203 /* Do the reset outside of interrupt context */
197 schedule_work(&adapter->tx_timeout_task); 204 schedule_work(&adapter->reset_dev_task);
198} 205}
199 206
200/* 207/*
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 8297e2868736..ac7b74488531 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -3006,7 +3006,7 @@ error:
3006 3006
3007 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 3007 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3008 PCI_DMA_FROMDEVICE); 3008 PCI_DMA_FROMDEVICE);
3009 skb = build_skb(data); 3009 skb = build_skb(data, 0);
3010 if (!skb) { 3010 if (!skb) {
3011 kfree(data); 3011 kfree(data);
3012 goto error; 3012 goto error;
@@ -7343,8 +7343,7 @@ static struct {
7343 { "rx_fw_discards" }, 7343 { "rx_fw_discards" },
7344}; 7344};
7345 7345
7346#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\ 7346#define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7347 sizeof(bnx2_stats_str_arr[0]))
7348 7347
7349#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4) 7348#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7350 7349
@@ -7976,7 +7975,6 @@ static int __devinit
7976bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) 7975bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7977{ 7976{
7978 struct bnx2 *bp; 7977 struct bnx2 *bp;
7979 unsigned long mem_len;
7980 int rc, i, j; 7978 int rc, i, j;
7981 u32 reg; 7979 u32 reg;
7982 u64 dma_mask, persist_dma_mask; 7980 u64 dma_mask, persist_dma_mask;
@@ -8036,13 +8034,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8036#endif 8034#endif
8037 INIT_WORK(&bp->reset_task, bnx2_reset_task); 8035 INIT_WORK(&bp->reset_task, bnx2_reset_task);
8038 8036
8039 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); 8037 bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8040 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1); 8038 TX_MAX_TSS_RINGS + 1));
8041 dev->mem_end = dev->mem_start + mem_len;
8042 dev->irq = pdev->irq;
8043
8044 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
8045
8046 if (!bp->regview) { 8039 if (!bp->regview) {
8047 dev_err(&pdev->dev, "Cannot map register space, aborting\n"); 8040 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8048 rc = -ENOMEM; 8041 rc = -ENOMEM;
@@ -8346,10 +8339,8 @@ err_out_unmap:
8346 bp->flags &= ~BNX2_FLAG_AER_ENABLED; 8339 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8347 } 8340 }
8348 8341
8349 if (bp->regview) { 8342 pci_iounmap(pdev, bp->regview);
8350 iounmap(bp->regview); 8343 bp->regview = NULL;
8351 bp->regview = NULL;
8352 }
8353 8344
8354err_out_release: 8345err_out_release:
8355 pci_release_regions(pdev); 8346 pci_release_regions(pdev);
@@ -8432,7 +8423,7 @@ static int __devinit
8432bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 8423bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8433{ 8424{
8434 static int version_printed = 0; 8425 static int version_printed = 0;
8435 struct net_device *dev = NULL; 8426 struct net_device *dev;
8436 struct bnx2 *bp; 8427 struct bnx2 *bp;
8437 int rc; 8428 int rc;
8438 char str[40]; 8429 char str[40];
@@ -8442,15 +8433,12 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8442 8433
8443 /* dev zeroed in init_etherdev */ 8434 /* dev zeroed in init_etherdev */
8444 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS); 8435 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8445
8446 if (!dev) 8436 if (!dev)
8447 return -ENOMEM; 8437 return -ENOMEM;
8448 8438
8449 rc = bnx2_init_board(pdev, dev); 8439 rc = bnx2_init_board(pdev, dev);
8450 if (rc < 0) { 8440 if (rc < 0)
8451 free_netdev(dev); 8441 goto err_free;
8452 return rc;
8453 }
8454 8442
8455 dev->netdev_ops = &bnx2_netdev_ops; 8443 dev->netdev_ops = &bnx2_netdev_ops;
8456 dev->watchdog_timeo = TX_TIMEOUT; 8444 dev->watchdog_timeo = TX_TIMEOUT;
@@ -8480,22 +8468,21 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8480 goto error; 8468 goto error;
8481 } 8469 }
8482 8470
8483 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n", 8471 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8484 board_info[ent->driver_data].name, 8472 "node addr %pM\n", board_info[ent->driver_data].name,
8485 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', 8473 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8486 ((CHIP_ID(bp) & 0x0ff0) >> 4), 8474 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8487 bnx2_bus_string(bp, str), 8475 bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8488 dev->base_addr, 8476 pdev->irq, dev->dev_addr);
8489 bp->pdev->irq, dev->dev_addr);
8490 8477
8491 return 0; 8478 return 0;
8492 8479
8493error: 8480error:
8494 if (bp->regview) 8481 iounmap(bp->regview);
8495 iounmap(bp->regview);
8496 pci_release_regions(pdev); 8482 pci_release_regions(pdev);
8497 pci_disable_device(pdev); 8483 pci_disable_device(pdev);
8498 pci_set_drvdata(pdev, NULL); 8484 pci_set_drvdata(pdev, NULL);
8485err_free:
8499 free_netdev(dev); 8486 free_netdev(dev);
8500 return rc; 8487 return rc;
8501} 8488}
@@ -8511,8 +8498,7 @@ bnx2_remove_one(struct pci_dev *pdev)
8511 del_timer_sync(&bp->timer); 8498 del_timer_sync(&bp->timer);
8512 cancel_work_sync(&bp->reset_task); 8499 cancel_work_sync(&bp->reset_task);
8513 8500
8514 if (bp->regview) 8501 pci_iounmap(bp->pdev, bp->regview);
8515 iounmap(bp->regview);
8516 8502
8517 kfree(bp->temp_stats_blk); 8503 kfree(bp->temp_stats_blk);
8518 8504
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 2c9ee552dffc..e30e2a2f354c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,13 +23,17 @@
23 * (you will need to reboot afterwards) */ 23 * (you will need to reboot afterwards) */
24/* #define BNX2X_STOP_ON_ERROR */ 24/* #define BNX2X_STOP_ON_ERROR */
25 25
26#define DRV_MODULE_VERSION "1.72.10-0" 26#define DRV_MODULE_VERSION "1.72.50-0"
27#define DRV_MODULE_RELDATE "2012/02/20" 27#define DRV_MODULE_RELDATE "2012/04/23"
28#define BNX2X_BC_VER 0x040200 28#define BNX2X_BC_VER 0x040200
29 29
30#if defined(CONFIG_DCB) 30#if defined(CONFIG_DCB)
31#define BCM_DCBNL 31#define BCM_DCBNL
32#endif 32#endif
33
34
35#include "bnx2x_hsi.h"
36
33#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 37#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
34#define BCM_CNIC 1 38#define BCM_CNIC 1
35#include "../cnic_if.h" 39#include "../cnic_if.h"
@@ -345,7 +349,6 @@ union db_prod {
345#define SGE_PAGE_SIZE PAGE_SIZE 349#define SGE_PAGE_SIZE PAGE_SIZE
346#define SGE_PAGE_SHIFT PAGE_SHIFT 350#define SGE_PAGE_SHIFT PAGE_SHIFT
347#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) 351#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
348#define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE)
349 352
350/* SGE ring related macros */ 353/* SGE ring related macros */
351#define NUM_RX_SGE_PAGES 2 354#define NUM_RX_SGE_PAGES 2
@@ -815,6 +818,8 @@ struct bnx2x_common {
815#define CHIP_NUM_57800_MF 0x16a5 818#define CHIP_NUM_57800_MF 0x16a5
816#define CHIP_NUM_57810 0x168e 819#define CHIP_NUM_57810 0x168e
817#define CHIP_NUM_57810_MF 0x16ae 820#define CHIP_NUM_57810_MF 0x16ae
821#define CHIP_NUM_57811 0x163d
822#define CHIP_NUM_57811_MF 0x163e
818#define CHIP_NUM_57840 0x168d 823#define CHIP_NUM_57840 0x168d
819#define CHIP_NUM_57840_MF 0x16ab 824#define CHIP_NUM_57840_MF 0x16ab
820#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) 825#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
@@ -826,6 +831,8 @@ struct bnx2x_common {
826#define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF) 831#define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF)
827#define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810) 832#define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810)
828#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF) 833#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF)
834#define CHIP_IS_57811(bp) (CHIP_NUM(bp) == CHIP_NUM_57811)
835#define CHIP_IS_57811_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_MF)
829#define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840) 836#define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840)
830#define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF) 837#define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF)
831#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ 838#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
@@ -836,6 +843,8 @@ struct bnx2x_common {
836 CHIP_IS_57800_MF(bp) || \ 843 CHIP_IS_57800_MF(bp) || \
837 CHIP_IS_57810(bp) || \ 844 CHIP_IS_57810(bp) || \
838 CHIP_IS_57810_MF(bp) || \ 845 CHIP_IS_57810_MF(bp) || \
846 CHIP_IS_57811(bp) || \
847 CHIP_IS_57811_MF(bp) || \
839 CHIP_IS_57840(bp) || \ 848 CHIP_IS_57840(bp) || \
840 CHIP_IS_57840_MF(bp)) 849 CHIP_IS_57840_MF(bp))
841#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp))) 850#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
@@ -1053,6 +1062,13 @@ struct bnx2x_slowpath {
1053 struct flow_control_configuration pfc_config; 1062 struct flow_control_configuration pfc_config;
1054 } func_rdata; 1063 } func_rdata;
1055 1064
1065 /* afex ramrod can not be a part of func_rdata union because these
1066 * events might arrive in parallel to other events from func_rdata.
1067 * Therefore, if they would have been defined in the same union,
1068 * data can get corrupted.
1069 */
1070 struct afex_vif_list_ramrod_data func_afex_rdata;
1071
1056 /* used by dmae command executer */ 1072 /* used by dmae command executer */
1057 struct dmae_command dmae[MAX_DMAE_C]; 1073 struct dmae_command dmae[MAX_DMAE_C];
1058 1074
@@ -1169,6 +1185,7 @@ struct bnx2x_fw_stats_data {
1169enum { 1185enum {
1170 BNX2X_SP_RTNL_SETUP_TC, 1186 BNX2X_SP_RTNL_SETUP_TC,
1171 BNX2X_SP_RTNL_TX_TIMEOUT, 1187 BNX2X_SP_RTNL_TX_TIMEOUT,
1188 BNX2X_SP_RTNL_AFEX_F_UPDATE,
1172 BNX2X_SP_RTNL_FAN_FAILURE, 1189 BNX2X_SP_RTNL_FAN_FAILURE,
1173}; 1190};
1174 1191
@@ -1222,7 +1239,6 @@ struct bnx2x {
1222#define ETH_MAX_JUMBO_PACKET_SIZE 9600 1239#define ETH_MAX_JUMBO_PACKET_SIZE 9600
1223/* TCP with Timestamp Option (32) + IPv6 (40) */ 1240/* TCP with Timestamp Option (32) + IPv6 (40) */
1224#define ETH_MAX_TPA_HEADER_SIZE 72 1241#define ETH_MAX_TPA_HEADER_SIZE 72
1225#define ETH_MIN_TPA_HEADER_SIZE 40
1226 1242
1227 /* Max supported alignment is 256 (8 shift) */ 1243 /* Max supported alignment is 256 (8 shift) */
1228#define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT) 1244#define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT)
@@ -1300,6 +1316,7 @@ struct bnx2x {
1300#define NO_ISCSI_FLAG (1 << 14) 1316#define NO_ISCSI_FLAG (1 << 14)
1301#define NO_FCOE_FLAG (1 << 15) 1317#define NO_FCOE_FLAG (1 << 15)
1302#define BC_SUPPORTS_PFC_STATS (1 << 17) 1318#define BC_SUPPORTS_PFC_STATS (1 << 17)
1319#define USING_SINGLE_MSIX_FLAG (1 << 20)
1303 1320
1304#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) 1321#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
1305#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) 1322#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1329,21 +1346,20 @@ struct bnx2x {
1329 struct bnx2x_common common; 1346 struct bnx2x_common common;
1330 struct bnx2x_port port; 1347 struct bnx2x_port port;
1331 1348
1332 struct cmng_struct_per_port cmng; 1349 struct cmng_init cmng;
1333 u32 vn_weight_sum; 1350
1334 u32 mf_config[E1HVN_MAX]; 1351 u32 mf_config[E1HVN_MAX];
1335 u32 mf2_config[E2_FUNC_MAX]; 1352 u32 mf_ext_config;
1336 u32 path_has_ovlan; /* E3 */ 1353 u32 path_has_ovlan; /* E3 */
1337 u16 mf_ov; 1354 u16 mf_ov;
1338 u8 mf_mode; 1355 u8 mf_mode;
1339#define IS_MF(bp) (bp->mf_mode != 0) 1356#define IS_MF(bp) (bp->mf_mode != 0)
1340#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI) 1357#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI)
1341#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD) 1358#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD)
1359#define IS_MF_AFEX(bp) (bp->mf_mode == MULTI_FUNCTION_AFEX)
1342 1360
1343 u8 wol; 1361 u8 wol;
1344 1362
1345 bool gro_check;
1346
1347 int rx_ring_size; 1363 int rx_ring_size;
1348 1364
1349 u16 tx_quick_cons_trip_int; 1365 u16 tx_quick_cons_trip_int;
@@ -1371,7 +1387,6 @@ struct bnx2x {
1371#define BNX2X_STATE_DIAG 0xe000 1387#define BNX2X_STATE_DIAG 0xe000
1372#define BNX2X_STATE_ERROR 0xf000 1388#define BNX2X_STATE_ERROR 0xf000
1373 1389
1374 int multi_mode;
1375#define BNX2X_MAX_PRIORITY 8 1390#define BNX2X_MAX_PRIORITY 8
1376#define BNX2X_MAX_ENTRIES_PER_PRI 16 1391#define BNX2X_MAX_ENTRIES_PER_PRI 16
1377#define BNX2X_MAX_COS 3 1392#define BNX2X_MAX_COS 3
@@ -1582,6 +1597,9 @@ struct bnx2x {
1582 struct dcbx_features dcbx_remote_feat; 1597 struct dcbx_features dcbx_remote_feat;
1583 u32 dcbx_remote_flags; 1598 u32 dcbx_remote_flags;
1584#endif 1599#endif
1600 /* AFEX: store default vlan used */
1601 int afex_def_vlan_tag;
1602 enum mf_cfg_afex_vlan_mode afex_vlan_mode;
1585 u32 pending_max; 1603 u32 pending_max;
1586 1604
1587 /* multiple tx classes of service */ 1605 /* multiple tx classes of service */
@@ -2138,9 +2156,16 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
2138#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) 2156#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp))
2139#define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) 2157#define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))
2140 2158
2159#define BNX2X_MF_EXT_PROTOCOL_FCOE(bp) ((bp)->mf_ext_config & \
2160 MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
2161
2162#define IS_MF_FCOE_AFEX(bp) (IS_MF_AFEX(bp) && BNX2X_MF_EXT_PROTOCOL_FCOE(bp))
2141#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \ 2163#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \
2142 (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \ 2164 (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
2143 BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) 2165 BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
2166#else
2167#define IS_MF_FCOE_AFEX(bp) false
2144#endif 2168#endif
2145 2169
2170
2146#endif /* bnx2x.h */ 2171#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4b054812713a..ad0743bf4bde 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -23,7 +23,6 @@
23#include <linux/ip.h> 23#include <linux/ip.h>
24#include <net/ipv6.h> 24#include <net/ipv6.h>
25#include <net/ip6_checksum.h> 25#include <net/ip6_checksum.h>
26#include <linux/firmware.h>
27#include <linux/prefetch.h> 26#include <linux/prefetch.h>
28#include "bnx2x_cmn.h" 27#include "bnx2x_cmn.h"
29#include "bnx2x_init.h" 28#include "bnx2x_init.h"
@@ -329,16 +328,6 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
329 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len); 328 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
330 tpa_info->full_page = 329 tpa_info->full_page =
331 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size; 330 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
332 /*
333 * FW 7.2.16 BUG workaround:
334 * if SGE size is (exactly) multiple gro_size
335 * fw will place one less frag on SGE.
336 * the calculation is done only for potentially
337 * dangerous MTUs.
338 */
339 if (unlikely(bp->gro_check))
340 if (!(SGE_PAGE_SIZE * PAGES_PER_SGE % gro_size))
341 tpa_info->full_page -= gro_size;
342 tpa_info->gro_size = gro_size; 331 tpa_info->gro_size = gro_size;
343 } 332 }
344 333
@@ -369,8 +358,8 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
369 * Approximate value of the MSS for this aggregation calculated using 358 * Approximate value of the MSS for this aggregation calculated using
370 * the first packet of it. 359 * the first packet of it.
371 */ 360 */
372static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, 361static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
373 u16 len_on_bd) 362 u16 len_on_bd)
374{ 363{
375 /* 364 /*
376 * TPA arrgregation won't have either IP options or TCP options 365 * TPA arrgregation won't have either IP options or TCP options
@@ -396,6 +385,36 @@ static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
396 return len_on_bd - hdrs_len; 385 return len_on_bd - hdrs_len;
397} 386}
398 387
388static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
389 struct bnx2x_fastpath *fp, u16 index)
390{
391 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
392 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
393 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
394 dma_addr_t mapping;
395
396 if (unlikely(page == NULL)) {
397 BNX2X_ERR("Can't alloc sge\n");
398 return -ENOMEM;
399 }
400
401 mapping = dma_map_page(&bp->pdev->dev, page, 0,
402 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
403 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
404 __free_pages(page, PAGES_PER_SGE_SHIFT);
405 BNX2X_ERR("Can't map sge\n");
406 return -ENOMEM;
407 }
408
409 sw_buf->page = page;
410 dma_unmap_addr_set(sw_buf, mapping, mapping);
411
412 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
413 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
414
415 return 0;
416}
417
399static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, 418static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
400 struct bnx2x_agg_info *tpa_info, 419 struct bnx2x_agg_info *tpa_info,
401 u16 pages, 420 u16 pages,
@@ -494,11 +513,11 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
494 return 0; 513 return 0;
495} 514}
496 515
497static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, 516static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
498 struct bnx2x_agg_info *tpa_info, 517 struct bnx2x_agg_info *tpa_info,
499 u16 pages, 518 u16 pages,
500 struct eth_end_agg_rx_cqe *cqe, 519 struct eth_end_agg_rx_cqe *cqe,
501 u16 cqe_idx) 520 u16 cqe_idx)
502{ 521{
503 struct sw_rx_bd *rx_buf = &tpa_info->first_buf; 522 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
504 u8 pad = tpa_info->placement_offset; 523 u8 pad = tpa_info->placement_offset;
@@ -524,7 +543,7 @@ static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
524 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), 543 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
525 fp->rx_buf_size, DMA_FROM_DEVICE); 544 fp->rx_buf_size, DMA_FROM_DEVICE);
526 if (likely(new_data)) 545 if (likely(new_data))
527 skb = build_skb(data); 546 skb = build_skb(data, 0);
528 547
529 if (likely(skb)) { 548 if (likely(skb)) {
530#ifdef BNX2X_STOP_ON_ERROR 549#ifdef BNX2X_STOP_ON_ERROR
@@ -568,6 +587,36 @@ drop:
568 fp->eth_q_stats.rx_skb_alloc_failed++; 587 fp->eth_q_stats.rx_skb_alloc_failed++;
569} 588}
570 589
590static int bnx2x_alloc_rx_data(struct bnx2x *bp,
591 struct bnx2x_fastpath *fp, u16 index)
592{
593 u8 *data;
594 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
595 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
596 dma_addr_t mapping;
597
598 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
599 if (unlikely(data == NULL))
600 return -ENOMEM;
601
602 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
603 fp->rx_buf_size,
604 DMA_FROM_DEVICE);
605 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
606 kfree(data);
607 BNX2X_ERR("Can't map rx data\n");
608 return -ENOMEM;
609 }
610
611 rx_buf->data = data;
612 dma_unmap_addr_set(rx_buf, mapping, mapping);
613
614 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
615 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
616
617 return 0;
618}
619
571 620
572int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) 621int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
573{ 622{
@@ -732,7 +781,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
732 dma_unmap_addr(rx_buf, mapping), 781 dma_unmap_addr(rx_buf, mapping),
733 fp->rx_buf_size, 782 fp->rx_buf_size,
734 DMA_FROM_DEVICE); 783 DMA_FROM_DEVICE);
735 skb = build_skb(data); 784 skb = build_skb(data, 0);
736 if (unlikely(!skb)) { 785 if (unlikely(!skb)) {
737 kfree(data); 786 kfree(data);
738 fp->eth_q_stats.rx_skb_alloc_failed++; 787 fp->eth_q_stats.rx_skb_alloc_failed++;
@@ -881,8 +930,8 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
881 * 930 *
882 * It uses a none-atomic bit operations because is called under the mutex. 931 * It uses a none-atomic bit operations because is called under the mutex.
883 */ 932 */
884static inline void bnx2x_fill_report_data(struct bnx2x *bp, 933static void bnx2x_fill_report_data(struct bnx2x *bp,
885 struct bnx2x_link_report_data *data) 934 struct bnx2x_link_report_data *data)
886{ 935{
887 u16 line_speed = bnx2x_get_mf_speed(bp); 936 u16 line_speed = bnx2x_get_mf_speed(bp);
888 937
@@ -1000,6 +1049,47 @@ void __bnx2x_link_report(struct bnx2x *bp)
1000 } 1049 }
1001} 1050}
1002 1051
1052static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1053{
1054 int i;
1055
1056 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1057 struct eth_rx_sge *sge;
1058
1059 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1060 sge->addr_hi =
1061 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1062 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1063
1064 sge->addr_lo =
1065 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1066 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1067 }
1068}
1069
1070static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1071 struct bnx2x_fastpath *fp, int last)
1072{
1073 int i;
1074
1075 for (i = 0; i < last; i++) {
1076 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1077 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1078 u8 *data = first_buf->data;
1079
1080 if (data == NULL) {
1081 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1082 continue;
1083 }
1084 if (tpa_info->tpa_state == BNX2X_TPA_START)
1085 dma_unmap_single(&bp->pdev->dev,
1086 dma_unmap_addr(first_buf, mapping),
1087 fp->rx_buf_size, DMA_FROM_DEVICE);
1088 kfree(data);
1089 first_buf->data = NULL;
1090 }
1091}
1092
1003void bnx2x_init_rx_rings(struct bnx2x *bp) 1093void bnx2x_init_rx_rings(struct bnx2x *bp)
1004{ 1094{
1005 int func = BP_FUNC(bp); 1095 int func = BP_FUNC(bp);
@@ -1212,16 +1302,15 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1212 1302
1213void bnx2x_free_irq(struct bnx2x *bp) 1303void bnx2x_free_irq(struct bnx2x *bp)
1214{ 1304{
1215 if (bp->flags & USING_MSIX_FLAG) 1305 if (bp->flags & USING_MSIX_FLAG &&
1306 !(bp->flags & USING_SINGLE_MSIX_FLAG))
1216 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) + 1307 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1217 CNIC_PRESENT + 1); 1308 CNIC_PRESENT + 1);
1218 else if (bp->flags & USING_MSI_FLAG)
1219 free_irq(bp->pdev->irq, bp->dev);
1220 else 1309 else
1221 free_irq(bp->pdev->irq, bp->dev); 1310 free_irq(bp->dev->irq, bp->dev);
1222} 1311}
1223 1312
1224int bnx2x_enable_msix(struct bnx2x *bp) 1313int __devinit bnx2x_enable_msix(struct bnx2x *bp)
1225{ 1314{
1226 int msix_vec = 0, i, rc, req_cnt; 1315 int msix_vec = 0, i, rc, req_cnt;
1227 1316
@@ -1261,8 +1350,8 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1261 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc); 1350 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1262 1351
1263 if (rc) { 1352 if (rc) {
1264 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); 1353 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1265 return rc; 1354 goto no_msix;
1266 } 1355 }
1267 /* 1356 /*
1268 * decrease number of queues by number of unallocated entries 1357 * decrease number of queues by number of unallocated entries
@@ -1270,18 +1359,34 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1270 bp->num_queues -= diff; 1359 bp->num_queues -= diff;
1271 1360
1272 BNX2X_DEV_INFO("New queue configuration set: %d\n", 1361 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1273 bp->num_queues); 1362 bp->num_queues);
1274 } else if (rc) { 1363 } else if (rc > 0) {
1275 /* fall to INTx if not enough memory */ 1364 /* Get by with single vector */
1276 if (rc == -ENOMEM) 1365 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1277 bp->flags |= DISABLE_MSI_FLAG; 1366 if (rc) {
1367 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1368 rc);
1369 goto no_msix;
1370 }
1371
1372 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1373 bp->flags |= USING_SINGLE_MSIX_FLAG;
1374
1375 } else if (rc < 0) {
1278 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); 1376 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1279 return rc; 1377 goto no_msix;
1280 } 1378 }
1281 1379
1282 bp->flags |= USING_MSIX_FLAG; 1380 bp->flags |= USING_MSIX_FLAG;
1283 1381
1284 return 0; 1382 return 0;
1383
1384no_msix:
1385 /* fall to INTx if not enough memory */
1386 if (rc == -ENOMEM)
1387 bp->flags |= DISABLE_MSI_FLAG;
1388
1389 return rc;
1285} 1390}
1286 1391
1287static int bnx2x_req_msix_irqs(struct bnx2x *bp) 1392static int bnx2x_req_msix_irqs(struct bnx2x *bp)
@@ -1343,22 +1448,26 @@ int bnx2x_enable_msi(struct bnx2x *bp)
1343static int bnx2x_req_irq(struct bnx2x *bp) 1448static int bnx2x_req_irq(struct bnx2x *bp)
1344{ 1449{
1345 unsigned long flags; 1450 unsigned long flags;
1346 int rc; 1451 unsigned int irq;
1347 1452
1348 if (bp->flags & USING_MSI_FLAG) 1453 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1349 flags = 0; 1454 flags = 0;
1350 else 1455 else
1351 flags = IRQF_SHARED; 1456 flags = IRQF_SHARED;
1352 1457
1353 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, 1458 if (bp->flags & USING_MSIX_FLAG)
1354 bp->dev->name, bp->dev); 1459 irq = bp->msix_table[0].vector;
1355 return rc; 1460 else
1461 irq = bp->pdev->irq;
1462
1463 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1356} 1464}
1357 1465
1358static inline int bnx2x_setup_irqs(struct bnx2x *bp) 1466static int bnx2x_setup_irqs(struct bnx2x *bp)
1359{ 1467{
1360 int rc = 0; 1468 int rc = 0;
1361 if (bp->flags & USING_MSIX_FLAG) { 1469 if (bp->flags & USING_MSIX_FLAG &&
1470 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1362 rc = bnx2x_req_msix_irqs(bp); 1471 rc = bnx2x_req_msix_irqs(bp);
1363 if (rc) 1472 if (rc)
1364 return rc; 1473 return rc;
@@ -1371,15 +1480,20 @@ static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1371 } 1480 }
1372 if (bp->flags & USING_MSI_FLAG) { 1481 if (bp->flags & USING_MSI_FLAG) {
1373 bp->dev->irq = bp->pdev->irq; 1482 bp->dev->irq = bp->pdev->irq;
1374 netdev_info(bp->dev, "using MSI IRQ %d\n", 1483 netdev_info(bp->dev, "using MSI IRQ %d\n",
1375 bp->pdev->irq); 1484 bp->dev->irq);
1485 }
1486 if (bp->flags & USING_MSIX_FLAG) {
1487 bp->dev->irq = bp->msix_table[0].vector;
1488 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1489 bp->dev->irq);
1376 } 1490 }
1377 } 1491 }
1378 1492
1379 return 0; 1493 return 0;
1380} 1494}
1381 1495
1382static inline void bnx2x_napi_enable(struct bnx2x *bp) 1496static void bnx2x_napi_enable(struct bnx2x *bp)
1383{ 1497{
1384 int i; 1498 int i;
1385 1499
@@ -1387,7 +1501,7 @@ static inline void bnx2x_napi_enable(struct bnx2x *bp)
1387 napi_enable(&bnx2x_fp(bp, i, napi)); 1501 napi_enable(&bnx2x_fp(bp, i, napi));
1388} 1502}
1389 1503
1390static inline void bnx2x_napi_disable(struct bnx2x *bp) 1504static void bnx2x_napi_disable(struct bnx2x *bp)
1391{ 1505{
1392 int i; 1506 int i;
1393 1507
@@ -1437,24 +1551,15 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1437 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); 1551 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1438} 1552}
1439 1553
1554
1440void bnx2x_set_num_queues(struct bnx2x *bp) 1555void bnx2x_set_num_queues(struct bnx2x *bp)
1441{ 1556{
1442 switch (bp->multi_mode) { 1557 /* RSS queues */
1443 case ETH_RSS_MODE_DISABLED: 1558 bp->num_queues = bnx2x_calc_num_queues(bp);
1444 bp->num_queues = 1;
1445 break;
1446 case ETH_RSS_MODE_REGULAR:
1447 bp->num_queues = bnx2x_calc_num_queues(bp);
1448 break;
1449
1450 default:
1451 bp->num_queues = 1;
1452 break;
1453 }
1454 1559
1455#ifdef BCM_CNIC 1560#ifdef BCM_CNIC
1456 /* override in STORAGE SD mode */ 1561 /* override in STORAGE SD modes */
1457 if (IS_MF_STORAGE_SD(bp)) 1562 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1458 bp->num_queues = 1; 1563 bp->num_queues = 1;
1459#endif 1564#endif
1460 /* Add special queues */ 1565 /* Add special queues */
@@ -1483,7 +1588,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
1483 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() 1588 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1484 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). 1589 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1485 */ 1590 */
1486static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) 1591static int bnx2x_set_real_num_queues(struct bnx2x *bp)
1487{ 1592{
1488 int rc, tx, rx; 1593 int rc, tx, rx;
1489 1594
@@ -1515,7 +1620,7 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1515 return rc; 1620 return rc;
1516} 1621}
1517 1622
1518static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) 1623static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1519{ 1624{
1520 int i; 1625 int i;
1521 1626
@@ -1543,22 +1648,19 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1543 } 1648 }
1544} 1649}
1545 1650
1546static inline int bnx2x_init_rss_pf(struct bnx2x *bp) 1651static int bnx2x_init_rss_pf(struct bnx2x *bp)
1547{ 1652{
1548 int i; 1653 int i;
1549 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; 1654 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1550 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); 1655 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1551 1656
1552 /* 1657 /* Prepare the initial contents fo the indirection table if RSS is
1553 * Prepare the inital contents fo the indirection table if RSS is
1554 * enabled 1658 * enabled
1555 */ 1659 */
1556 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { 1660 for (i = 0; i < sizeof(ind_table); i++)
1557 for (i = 0; i < sizeof(ind_table); i++) 1661 ind_table[i] =
1558 ind_table[i] = 1662 bp->fp->cl_id +
1559 bp->fp->cl_id + 1663 ethtool_rxfh_indir_default(i, num_eth_queues);
1560 ethtool_rxfh_indir_default(i, num_eth_queues);
1561 }
1562 1664
1563 /* 1665 /*
1564 * For 57710 and 57711 SEARCHER configuration (rss_keys) is 1666 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
@@ -1568,11 +1670,12 @@ static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1568 * For 57712 and newer on the other hand it's a per-function 1670 * For 57712 and newer on the other hand it's a per-function
1569 * configuration. 1671 * configuration.
1570 */ 1672 */
1571 return bnx2x_config_rss_pf(bp, ind_table, 1673 return bnx2x_config_rss_eth(bp, ind_table,
1572 bp->port.pmf || !CHIP_IS_E1x(bp)); 1674 bp->port.pmf || !CHIP_IS_E1x(bp));
1573} 1675}
1574 1676
1575int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash) 1677int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1678 u8 *ind_table, bool config_hash)
1576{ 1679{
1577 struct bnx2x_config_rss_params params = {NULL}; 1680 struct bnx2x_config_rss_params params = {NULL};
1578 int i; 1681 int i;
@@ -1584,58 +1687,35 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1584 * bp->multi_mode = ETH_RSS_MODE_DISABLED; 1687 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1585 */ 1688 */
1586 1689
1587 params.rss_obj = &bp->rss_conf_obj; 1690 params.rss_obj = rss_obj;
1588 1691
1589 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags); 1692 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1590 1693
1591 /* RSS mode */ 1694 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1592 switch (bp->multi_mode) {
1593 case ETH_RSS_MODE_DISABLED:
1594 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1595 break;
1596 case ETH_RSS_MODE_REGULAR:
1597 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1598 break;
1599 case ETH_RSS_MODE_VLAN_PRI:
1600 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1601 break;
1602 case ETH_RSS_MODE_E1HOV_PRI:
1603 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1604 break;
1605 case ETH_RSS_MODE_IP_DSCP:
1606 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1607 break;
1608 default:
1609 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1610 return -EINVAL;
1611 }
1612 1695
1613 /* If RSS is enabled */ 1696 /* RSS configuration */
1614 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { 1697 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1615 /* RSS configuration */ 1698 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1616 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags); 1699 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1617 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags); 1700 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1618 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1619 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1620 1701
1621 /* Hash bits */ 1702 /* Hash bits */
1622 params.rss_result_mask = MULTI_MASK; 1703 params.rss_result_mask = MULTI_MASK;
1623 1704
1624 memcpy(params.ind_table, ind_table, sizeof(params.ind_table)); 1705 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1625 1706
1626 if (config_hash) { 1707 if (config_hash) {
1627 /* RSS keys */ 1708 /* RSS keys */
1628 for (i = 0; i < sizeof(params.rss_key) / 4; i++) 1709 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1629 params.rss_key[i] = random32(); 1710 params.rss_key[i] = random32();
1630 1711
1631 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags); 1712 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1632 }
1633 } 1713 }
1634 1714
1635 return bnx2x_config_rss(bp, &params); 1715 return bnx2x_config_rss(bp, &params);
1636} 1716}
1637 1717
1638static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) 1718static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1639{ 1719{
1640 struct bnx2x_func_state_params func_params = {NULL}; 1720 struct bnx2x_func_state_params func_params = {NULL};
1641 1721
@@ -1744,6 +1824,87 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1744 return true; 1824 return true;
1745} 1825}
1746 1826
1827/**
1828 * bnx2x_bz_fp - zero content of the fastpath structure.
1829 *
1830 * @bp: driver handle
1831 * @index: fastpath index to be zeroed
1832 *
1833 * Makes sure the contents of the bp->fp[index].napi is kept
1834 * intact.
1835 */
1836static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1837{
1838 struct bnx2x_fastpath *fp = &bp->fp[index];
1839 struct napi_struct orig_napi = fp->napi;
1840 /* bzero bnx2x_fastpath contents */
1841 if (bp->stats_init)
1842 memset(fp, 0, sizeof(*fp));
1843 else {
1844 /* Keep Queue statistics */
1845 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1846 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
1847
1848 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1849 GFP_KERNEL);
1850 if (tmp_eth_q_stats)
1851 memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
1852 sizeof(struct bnx2x_eth_q_stats));
1853
1854 tmp_eth_q_stats_old =
1855 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1856 GFP_KERNEL);
1857 if (tmp_eth_q_stats_old)
1858 memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
1859 sizeof(struct bnx2x_eth_q_stats_old));
1860
1861 memset(fp, 0, sizeof(*fp));
1862
1863 if (tmp_eth_q_stats) {
1864 memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
1865 sizeof(struct bnx2x_eth_q_stats));
1866 kfree(tmp_eth_q_stats);
1867 }
1868
1869 if (tmp_eth_q_stats_old) {
1870 memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
1871 sizeof(struct bnx2x_eth_q_stats_old));
1872 kfree(tmp_eth_q_stats_old);
1873 }
1874
1875 }
1876
1877 /* Restore the NAPI object as it has been already initialized */
1878 fp->napi = orig_napi;
1879
1880 fp->bp = bp;
1881 fp->index = index;
1882 if (IS_ETH_FP(fp))
1883 fp->max_cos = bp->max_cos;
1884 else
1885 /* Special queues support only one CoS */
1886 fp->max_cos = 1;
1887
1888 /*
1889 * set the tpa flag for each queue. The tpa flag determines the queue
1890 * minimal size so it must be set prior to queue memory allocation
1891 */
1892 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1893 (bp->flags & GRO_ENABLE_FLAG &&
1894 bnx2x_mtu_allows_gro(bp->dev->mtu)));
1895 if (bp->flags & TPA_ENABLE_FLAG)
1896 fp->mode = TPA_MODE_LRO;
1897 else if (bp->flags & GRO_ENABLE_FLAG)
1898 fp->mode = TPA_MODE_GRO;
1899
1900#ifdef BCM_CNIC
1901 /* We don't want TPA on an FCoE L2 ring */
1902 if (IS_FCOE_FP(fp))
1903 fp->disable_tpa = 1;
1904#endif
1905}
1906
1907
1747/* must be called with rtnl_lock */ 1908/* must be called with rtnl_lock */
1748int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 1909int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1749{ 1910{
@@ -1911,8 +2072,14 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1911 SHMEM2_WR(bp, dcc_support, 2072 SHMEM2_WR(bp, dcc_support,
1912 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | 2073 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1913 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); 2074 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2075 if (SHMEM2_HAS(bp, afex_driver_support))
2076 SHMEM2_WR(bp, afex_driver_support,
2077 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
1914 } 2078 }
1915 2079
2080 /* Set AFEX default VLAN tag to an invalid value */
2081 bp->afex_def_vlan_tag = -1;
2082
1916 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; 2083 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1917 rc = bnx2x_func_start(bp); 2084 rc = bnx2x_func_start(bp);
1918 if (rc) { 2085 if (rc) {
@@ -2968,6 +3135,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2968 3135
2969 netdev_tx_sent_queue(txq, skb->len); 3136 netdev_tx_sent_queue(txq, skb->len);
2970 3137
3138 skb_tx_timestamp(skb);
3139
2971 txdata->tx_pkt_prod++; 3140 txdata->tx_pkt_prod++;
2972 /* 3141 /*
2973 * Make sure that the BD data is updated before updating the producer 3142 * Make sure that the BD data is updated before updating the producer
@@ -3084,7 +3253,8 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3084 } 3253 }
3085 3254
3086#ifdef BCM_CNIC 3255#ifdef BCM_CNIC
3087 if (IS_MF_STORAGE_SD(bp) && !is_zero_ether_addr(addr->sa_data)) { 3256 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3257 !is_zero_ether_addr(addr->sa_data)) {
3088 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n"); 3258 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3089 return -EINVAL; 3259 return -EINVAL;
3090 } 3260 }
@@ -3181,7 +3351,7 @@ void bnx2x_free_fp_mem(struct bnx2x *bp)
3181 bnx2x_free_fp_mem_at(bp, i); 3351 bnx2x_free_fp_mem_at(bp, i);
3182} 3352}
3183 3353
3184static inline void set_sb_shortcuts(struct bnx2x *bp, int index) 3354static void set_sb_shortcuts(struct bnx2x *bp, int index)
3185{ 3355{
3186 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); 3356 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3187 if (!CHIP_IS_E1x(bp)) { 3357 if (!CHIP_IS_E1x(bp)) {
@@ -3197,6 +3367,63 @@ static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3197 } 3367 }
3198} 3368}
3199 3369
3370/* Returns the number of actually allocated BDs */
3371static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3372 int rx_ring_size)
3373{
3374 struct bnx2x *bp = fp->bp;
3375 u16 ring_prod, cqe_ring_prod;
3376 int i, failure_cnt = 0;
3377
3378 fp->rx_comp_cons = 0;
3379 cqe_ring_prod = ring_prod = 0;
3380
3381 /* This routine is called only during fo init so
3382 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3383 */
3384 for (i = 0; i < rx_ring_size; i++) {
3385 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3386 failure_cnt++;
3387 continue;
3388 }
3389 ring_prod = NEXT_RX_IDX(ring_prod);
3390 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3391 WARN_ON(ring_prod <= (i - failure_cnt));
3392 }
3393
3394 if (failure_cnt)
3395 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3396 i - failure_cnt, fp->index);
3397
3398 fp->rx_bd_prod = ring_prod;
3399 /* Limit the CQE producer by the CQE ring size */
3400 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3401 cqe_ring_prod);
3402 fp->rx_pkt = fp->rx_calls = 0;
3403
3404 fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3405
3406 return i - failure_cnt;
3407}
3408
3409static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3410{
3411 int i;
3412
3413 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3414 struct eth_rx_cqe_next_page *nextpg;
3415
3416 nextpg = (struct eth_rx_cqe_next_page *)
3417 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3418 nextpg->addr_hi =
3419 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3420 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3421 nextpg->addr_lo =
3422 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3423 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3424 }
3425}
3426
3200static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) 3427static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3201{ 3428{
3202 union host_hc_status_block *sb; 3429 union host_hc_status_block *sb;
@@ -3206,7 +3433,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3206 int rx_ring_size = 0; 3433 int rx_ring_size = 0;
3207 3434
3208#ifdef BCM_CNIC 3435#ifdef BCM_CNIC
3209 if (!bp->rx_ring_size && IS_MF_STORAGE_SD(bp)) { 3436 if (!bp->rx_ring_size &&
3437 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3210 rx_ring_size = MIN_RX_SIZE_NONTPA; 3438 rx_ring_size = MIN_RX_SIZE_NONTPA;
3211 bp->rx_ring_size = rx_ring_size; 3439 bp->rx_ring_size = rx_ring_size;
3212 } else 3440 } else
@@ -3528,8 +3756,6 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3528 */ 3756 */
3529 dev->mtu = new_mtu; 3757 dev->mtu = new_mtu;
3530 3758
3531 bp->gro_check = bnx2x_need_gro_check(new_mtu);
3532
3533 return bnx2x_reload_if_running(dev); 3759 return bnx2x_reload_if_running(dev);
3534} 3760}
3535 3761
@@ -3687,9 +3913,9 @@ void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3687 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); 3913 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3688} 3914}
3689 3915
3690static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, 3916static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3691 u8 fw_sb_id, u8 sb_index, 3917 u8 fw_sb_id, u8 sb_index,
3692 u8 ticks) 3918 u8 ticks)
3693{ 3919{
3694 3920
3695 u32 addr = BAR_CSTRORM_INTMEM + 3921 u32 addr = BAR_CSTRORM_INTMEM +
@@ -3700,9 +3926,9 @@ static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3700 port, fw_sb_id, sb_index, ticks); 3926 port, fw_sb_id, sb_index, ticks);
3701} 3927}
3702 3928
3703static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port, 3929static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3704 u16 fw_sb_id, u8 sb_index, 3930 u16 fw_sb_id, u8 sb_index,
3705 u8 disable) 3931 u8 disable)
3706{ 3932{
3707 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 3933 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3708 u32 addr = BAR_CSTRORM_INTMEM + 3934 u32 addr = BAR_CSTRORM_INTMEM +
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 5c27454d2ec2..7cd99b75347a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -86,13 +86,15 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
86void bnx2x_send_unload_done(struct bnx2x *bp); 86void bnx2x_send_unload_done(struct bnx2x *bp);
87 87
88/** 88/**
89 * bnx2x_config_rss_pf - configure RSS parameters. 89 * bnx2x_config_rss_pf - configure RSS parameters in a PF.
90 * 90 *
91 * @bp: driver handle 91 * @bp: driver handle
92 * @rss_obj RSS object to use
92 * @ind_table: indirection table to configure 93 * @ind_table: indirection table to configure
93 * @config_hash: re-configure RSS hash keys configuration 94 * @config_hash: re-configure RSS hash keys configuration
94 */ 95 */
95int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash); 96int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
97 u8 *ind_table, bool config_hash);
96 98
97/** 99/**
98 * bnx2x__init_func_obj - init function object 100 * bnx2x__init_func_obj - init function object
@@ -485,7 +487,7 @@ void bnx2x_netif_start(struct bnx2x *bp);
485 * fills msix_table, requests vectors, updates num_queues 487 * fills msix_table, requests vectors, updates num_queues
486 * according to number of available vectors. 488 * according to number of available vectors.
487 */ 489 */
488int bnx2x_enable_msix(struct bnx2x *bp); 490int __devinit bnx2x_enable_msix(struct bnx2x *bp);
489 491
490/** 492/**
491 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly 493 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
@@ -610,53 +612,6 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
610 barrier(); 612 barrier();
611} 613}
612 614
613static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
614 u8 idu_sb_id, bool is_Pf)
615{
616 u32 data, ctl, cnt = 100;
617 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
618 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
619 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
620 u32 sb_bit = 1 << (idu_sb_id%32);
621 u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
622 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
623
624 /* Not supported in BC mode */
625 if (CHIP_INT_MODE_IS_BC(bp))
626 return;
627
628 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
629 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
630 IGU_REGULAR_CLEANUP_SET |
631 IGU_REGULAR_BCLEANUP;
632
633 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
634 func_encode << IGU_CTRL_REG_FID_SHIFT |
635 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
636
637 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
638 data, igu_addr_data);
639 REG_WR(bp, igu_addr_data, data);
640 mmiowb();
641 barrier();
642 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
643 ctl, igu_addr_ctl);
644 REG_WR(bp, igu_addr_ctl, ctl);
645 mmiowb();
646 barrier();
647
648 /* wait for clean up to finish */
649 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
650 msleep(20);
651
652
653 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
654 DP(NETIF_MSG_HW,
655 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
656 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
657 }
658}
659
660static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, 615static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
661 u8 storm, u16 index, u8 op, u8 update) 616 u8 storm, u16 index, u8 op, u8 update)
662{ 617{
@@ -843,7 +798,7 @@ static inline void bnx2x_disable_msi(struct bnx2x *bp)
843{ 798{
844 if (bp->flags & USING_MSIX_FLAG) { 799 if (bp->flags & USING_MSIX_FLAG) {
845 pci_disable_msix(bp->pdev); 800 pci_disable_msix(bp->pdev);
846 bp->flags &= ~USING_MSIX_FLAG; 801 bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG);
847 } else if (bp->flags & USING_MSI_FLAG) { 802 } else if (bp->flags & USING_MSI_FLAG) {
848 pci_disable_msi(bp->pdev); 803 pci_disable_msi(bp->pdev);
849 bp->flags &= ~USING_MSI_FLAG; 804 bp->flags &= ~USING_MSI_FLAG;
@@ -883,66 +838,6 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
883 bnx2x_clear_sge_mask_next_elems(fp); 838 bnx2x_clear_sge_mask_next_elems(fp);
884} 839}
885 840
886static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
887 struct bnx2x_fastpath *fp, u16 index)
888{
889 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
890 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
891 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
892 dma_addr_t mapping;
893
894 if (unlikely(page == NULL)) {
895 BNX2X_ERR("Can't alloc sge\n");
896 return -ENOMEM;
897 }
898
899 mapping = dma_map_page(&bp->pdev->dev, page, 0,
900 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
901 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
902 __free_pages(page, PAGES_PER_SGE_SHIFT);
903 BNX2X_ERR("Can't map sge\n");
904 return -ENOMEM;
905 }
906
907 sw_buf->page = page;
908 dma_unmap_addr_set(sw_buf, mapping, mapping);
909
910 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
911 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
912
913 return 0;
914}
915
916static inline int bnx2x_alloc_rx_data(struct bnx2x *bp,
917 struct bnx2x_fastpath *fp, u16 index)
918{
919 u8 *data;
920 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
921 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
922 dma_addr_t mapping;
923
924 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
925 if (unlikely(data == NULL))
926 return -ENOMEM;
927
928 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
929 fp->rx_buf_size,
930 DMA_FROM_DEVICE);
931 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
932 kfree(data);
933 BNX2X_ERR("Can't map rx data\n");
934 return -ENOMEM;
935 }
936
937 rx_buf->data = data;
938 dma_unmap_addr_set(rx_buf, mapping, mapping);
939
940 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
941 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
942
943 return 0;
944}
945
946/* note that we are not allocating a new buffer, 841/* note that we are not allocating a new buffer,
947 * we are just moving one from cons to prod 842 * we are just moving one from cons to prod
948 * we are not creating a new mapping, 843 * we are not creating a new mapping,
@@ -964,6 +859,19 @@ static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp,
964 859
965/************************* Init ******************************************/ 860/************************* Init ******************************************/
966 861
862/* returns func by VN for current port */
863static inline int func_by_vn(struct bnx2x *bp, int vn)
864{
865 return 2 * vn + BP_PORT(bp);
866}
867
868static inline int bnx2x_config_rss_eth(struct bnx2x *bp, u8 *ind_table,
869 bool config_hash)
870{
871 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, ind_table,
872 config_hash);
873}
874
967/** 875/**
968 * bnx2x_func_start - init function 876 * bnx2x_func_start - init function
969 * 877 *
@@ -1027,66 +935,6 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1027 bnx2x_free_rx_sge(bp, fp, i); 935 bnx2x_free_rx_sge(bp, fp, i);
1028} 936}
1029 937
1030static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
1031 struct bnx2x_fastpath *fp, int last)
1032{
1033 int i;
1034
1035 for (i = 0; i < last; i++) {
1036 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1037 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1038 u8 *data = first_buf->data;
1039
1040 if (data == NULL) {
1041 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1042 continue;
1043 }
1044 if (tpa_info->tpa_state == BNX2X_TPA_START)
1045 dma_unmap_single(&bp->pdev->dev,
1046 dma_unmap_addr(first_buf, mapping),
1047 fp->rx_buf_size, DMA_FROM_DEVICE);
1048 kfree(data);
1049 first_buf->data = NULL;
1050 }
1051}
1052
1053static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
1054{
1055 int i;
1056
1057 for (i = 1; i <= NUM_TX_RINGS; i++) {
1058 struct eth_tx_next_bd *tx_next_bd =
1059 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
1060
1061 tx_next_bd->addr_hi =
1062 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
1063 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
1064 tx_next_bd->addr_lo =
1065 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
1066 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
1067 }
1068
1069 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
1070 txdata->tx_db.data.zero_fill1 = 0;
1071 txdata->tx_db.data.prod = 0;
1072
1073 txdata->tx_pkt_prod = 0;
1074 txdata->tx_pkt_cons = 0;
1075 txdata->tx_bd_prod = 0;
1076 txdata->tx_bd_cons = 0;
1077 txdata->tx_pkt = 0;
1078}
1079
1080static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
1081{
1082 int i;
1083 u8 cos;
1084
1085 for_each_tx_queue(bp, i)
1086 for_each_cos_in_tx_queue(&bp->fp[i], cos)
1087 bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
1088}
1089
1090static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) 938static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
1091{ 939{
1092 int i; 940 int i;
@@ -1104,80 +952,6 @@ static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
1104 } 952 }
1105} 953}
1106 954
1107static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1108{
1109 int i;
1110
1111 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1112 struct eth_rx_sge *sge;
1113
1114 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1115 sge->addr_hi =
1116 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1117 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1118
1119 sge->addr_lo =
1120 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1121 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1122 }
1123}
1124
1125static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
1126{
1127 int i;
1128 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
1129 struct eth_rx_cqe_next_page *nextpg;
1130
1131 nextpg = (struct eth_rx_cqe_next_page *)
1132 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
1133 nextpg->addr_hi =
1134 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
1135 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
1136 nextpg->addr_lo =
1137 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
1138 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
1139 }
1140}
1141
1142/* Returns the number of actually allocated BDs */
1143static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
1144 int rx_ring_size)
1145{
1146 struct bnx2x *bp = fp->bp;
1147 u16 ring_prod, cqe_ring_prod;
1148 int i, failure_cnt = 0;
1149
1150 fp->rx_comp_cons = 0;
1151 cqe_ring_prod = ring_prod = 0;
1152
1153 /* This routine is called only during fo init so
1154 * fp->eth_q_stats.rx_skb_alloc_failed = 0
1155 */
1156 for (i = 0; i < rx_ring_size; i++) {
1157 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
1158 failure_cnt++;
1159 continue;
1160 }
1161 ring_prod = NEXT_RX_IDX(ring_prod);
1162 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
1163 WARN_ON(ring_prod <= (i - failure_cnt));
1164 }
1165
1166 if (failure_cnt)
1167 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
1168 i - failure_cnt, fp->index);
1169
1170 fp->rx_bd_prod = ring_prod;
1171 /* Limit the CQE producer by the CQE ring size */
1172 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
1173 cqe_ring_prod);
1174 fp->rx_pkt = fp->rx_calls = 0;
1175
1176 fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1177
1178 return i - failure_cnt;
1179}
1180
1181/* Statistics ID are global per chip/path, while Client IDs for E1x are per 955/* Statistics ID are global per chip/path, while Client IDs for E1x are per
1182 * port. 956 * port.
1183 */ 957 */
@@ -1406,30 +1180,6 @@ static inline void __storm_memset_struct(struct bnx2x *bp,
1406 REG_WR(bp, addr + (i * 4), data[i]); 1180 REG_WR(bp, addr + (i * 4), data[i]);
1407} 1181}
1408 1182
1409static inline void storm_memset_func_cfg(struct bnx2x *bp,
1410 struct tstorm_eth_function_common_config *tcfg,
1411 u16 abs_fid)
1412{
1413 size_t size = sizeof(struct tstorm_eth_function_common_config);
1414
1415 u32 addr = BAR_TSTRORM_INTMEM +
1416 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
1417
1418 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
1419}
1420
1421static inline void storm_memset_cmng(struct bnx2x *bp,
1422 struct cmng_struct_per_port *cmng,
1423 u8 port)
1424{
1425 size_t size = sizeof(struct cmng_struct_per_port);
1426
1427 u32 addr = BAR_XSTRORM_INTMEM +
1428 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
1429
1430 __storm_memset_struct(bp, addr, size, (u32 *)cmng);
1431}
1432
1433/** 1183/**
1434 * bnx2x_wait_sp_comp - wait for the outstanding SP commands. 1184 * bnx2x_wait_sp_comp - wait for the outstanding SP commands.
1435 * 1185 *
@@ -1512,93 +1262,6 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
1512 */ 1262 */
1513 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; 1263 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
1514} 1264}
1515
1516static inline bool bnx2x_need_gro_check(int mtu)
1517{
1518 return (SGE_PAGES / (mtu - ETH_MAX_TPA_HEADER_SIZE - 1)) !=
1519 (SGE_PAGES / (mtu - ETH_MIN_TPA_HEADER_SIZE + 1));
1520}
1521
1522/**
1523 * bnx2x_bz_fp - zero content of the fastpath structure.
1524 *
1525 * @bp: driver handle
1526 * @index: fastpath index to be zeroed
1527 *
1528 * Makes sure the contents of the bp->fp[index].napi is kept
1529 * intact.
1530 */
1531static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
1532{
1533 struct bnx2x_fastpath *fp = &bp->fp[index];
1534 struct napi_struct orig_napi = fp->napi;
1535 /* bzero bnx2x_fastpath contents */
1536 if (bp->stats_init)
1537 memset(fp, 0, sizeof(*fp));
1538 else {
1539 /* Keep Queue statistics */
1540 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1541 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
1542
1543 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1544 GFP_KERNEL);
1545 if (tmp_eth_q_stats)
1546 memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
1547 sizeof(struct bnx2x_eth_q_stats));
1548
1549 tmp_eth_q_stats_old =
1550 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1551 GFP_KERNEL);
1552 if (tmp_eth_q_stats_old)
1553 memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
1554 sizeof(struct bnx2x_eth_q_stats_old));
1555
1556 memset(fp, 0, sizeof(*fp));
1557
1558 if (tmp_eth_q_stats) {
1559 memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
1560 sizeof(struct bnx2x_eth_q_stats));
1561 kfree(tmp_eth_q_stats);
1562 }
1563
1564 if (tmp_eth_q_stats_old) {
1565 memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
1566 sizeof(struct bnx2x_eth_q_stats_old));
1567 kfree(tmp_eth_q_stats_old);
1568 }
1569
1570 }
1571
1572 /* Restore the NAPI object as it has been already initialized */
1573 fp->napi = orig_napi;
1574
1575 fp->bp = bp;
1576 fp->index = index;
1577 if (IS_ETH_FP(fp))
1578 fp->max_cos = bp->max_cos;
1579 else
1580 /* Special queues support only one CoS */
1581 fp->max_cos = 1;
1582
1583 /*
1584 * set the tpa flag for each queue. The tpa flag determines the queue
1585 * minimal size so it must be set prior to queue memory allocation
1586 */
1587 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1588 (bp->flags & GRO_ENABLE_FLAG &&
1589 bnx2x_mtu_allows_gro(bp->dev->mtu)));
1590 if (bp->flags & TPA_ENABLE_FLAG)
1591 fp->mode = TPA_MODE_LRO;
1592 else if (bp->flags & GRO_ENABLE_FLAG)
1593 fp->mode = TPA_MODE_GRO;
1594
1595#ifdef BCM_CNIC
1596 /* We don't want TPA on an FCoE L2 ring */
1597 if (IS_FCOE_FP(fp))
1598 fp->disable_tpa = 1;
1599#endif
1600}
1601
1602#ifdef BCM_CNIC 1265#ifdef BCM_CNIC
1603/** 1266/**
1604 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. 1267 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
@@ -1608,11 +1271,6 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
1608 */ 1271 */
1609void bnx2x_get_iscsi_info(struct bnx2x *bp); 1272void bnx2x_get_iscsi_info(struct bnx2x *bp);
1610#endif 1273#endif
1611/* returns func by VN for current port */
1612static inline int func_by_vn(struct bnx2x *bp, int vn)
1613{
1614 return 2 * vn + BP_PORT(bp);
1615}
1616 1274
1617/** 1275/**
1618 * bnx2x_link_sync_notify - send notification to other functions. 1276 * bnx2x_link_sync_notify - send notification to other functions.
@@ -1667,7 +1325,8 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
1667 if (is_valid_ether_addr(addr)) 1325 if (is_valid_ether_addr(addr))
1668 return true; 1326 return true;
1669#ifdef BCM_CNIC 1327#ifdef BCM_CNIC
1670 if (is_zero_ether_addr(addr) && IS_MF_STORAGE_SD(bp)) 1328 if (is_zero_ether_addr(addr) &&
1329 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))
1671 return true; 1330 return true;
1672#endif 1331#endif
1673 return false; 1332 return false;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 2cc0a1703970..ddc18ee5c5ae 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -22,13 +22,10 @@
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/sched.h> 23#include <linux/sched.h>
24#include <linux/crc32.h> 24#include <linux/crc32.h>
25
26
27#include "bnx2x.h" 25#include "bnx2x.h"
28#include "bnx2x_cmn.h" 26#include "bnx2x_cmn.h"
29#include "bnx2x_dump.h" 27#include "bnx2x_dump.h"
30#include "bnx2x_init.h" 28#include "bnx2x_init.h"
31#include "bnx2x_sp.h"
32 29
33/* Note: in the format strings below %s is replaced by the queue-name which is 30/* Note: in the format strings below %s is replaced by the queue-name which is
34 * either its index or 'fcoe' for the fcoe queue. Make sure the format string 31 * either its index or 'fcoe' for the fcoe queue. Make sure the format string
@@ -595,8 +592,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
595#define IS_E3_ONLINE(info) (((info) & RI_E3_ONLINE) == RI_E3_ONLINE) 592#define IS_E3_ONLINE(info) (((info) & RI_E3_ONLINE) == RI_E3_ONLINE)
596#define IS_E3B0_ONLINE(info) (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE) 593#define IS_E3B0_ONLINE(info) (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE)
597 594
598static inline bool bnx2x_is_reg_online(struct bnx2x *bp, 595static bool bnx2x_is_reg_online(struct bnx2x *bp,
599 const struct reg_addr *reg_info) 596 const struct reg_addr *reg_info)
600{ 597{
601 if (CHIP_IS_E1(bp)) 598 if (CHIP_IS_E1(bp))
602 return IS_E1_ONLINE(reg_info->info); 599 return IS_E1_ONLINE(reg_info->info);
@@ -613,7 +610,7 @@ static inline bool bnx2x_is_reg_online(struct bnx2x *bp,
613} 610}
614 611
615/******* Paged registers info selectors ********/ 612/******* Paged registers info selectors ********/
616static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp) 613static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
617{ 614{
618 if (CHIP_IS_E2(bp)) 615 if (CHIP_IS_E2(bp))
619 return page_vals_e2; 616 return page_vals_e2;
@@ -623,7 +620,7 @@ static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
623 return NULL; 620 return NULL;
624} 621}
625 622
626static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp) 623static u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
627{ 624{
628 if (CHIP_IS_E2(bp)) 625 if (CHIP_IS_E2(bp))
629 return PAGE_MODE_VALUES_E2; 626 return PAGE_MODE_VALUES_E2;
@@ -633,7 +630,7 @@ static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
633 return 0; 630 return 0;
634} 631}
635 632
636static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp) 633static const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
637{ 634{
638 if (CHIP_IS_E2(bp)) 635 if (CHIP_IS_E2(bp))
639 return page_write_regs_e2; 636 return page_write_regs_e2;
@@ -643,7 +640,7 @@ static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
643 return NULL; 640 return NULL;
644} 641}
645 642
646static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp) 643static u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
647{ 644{
648 if (CHIP_IS_E2(bp)) 645 if (CHIP_IS_E2(bp))
649 return PAGE_WRITE_REGS_E2; 646 return PAGE_WRITE_REGS_E2;
@@ -653,7 +650,7 @@ static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
653 return 0; 650 return 0;
654} 651}
655 652
656static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp) 653static const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
657{ 654{
658 if (CHIP_IS_E2(bp)) 655 if (CHIP_IS_E2(bp))
659 return page_read_regs_e2; 656 return page_read_regs_e2;
@@ -663,7 +660,7 @@ static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
663 return NULL; 660 return NULL;
664} 661}
665 662
666static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp) 663static u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
667{ 664{
668 if (CHIP_IS_E2(bp)) 665 if (CHIP_IS_E2(bp))
669 return PAGE_READ_REGS_E2; 666 return PAGE_READ_REGS_E2;
@@ -673,7 +670,7 @@ static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
673 return 0; 670 return 0;
674} 671}
675 672
676static inline int __bnx2x_get_regs_len(struct bnx2x *bp) 673static int __bnx2x_get_regs_len(struct bnx2x *bp)
677{ 674{
678 int num_pages = __bnx2x_get_page_reg_num(bp); 675 int num_pages = __bnx2x_get_page_reg_num(bp);
679 int page_write_num = __bnx2x_get_page_write_num(bp); 676 int page_write_num = __bnx2x_get_page_write_num(bp);
@@ -718,7 +715,7 @@ static int bnx2x_get_regs_len(struct net_device *dev)
718 * ("read address"). There may be more than one write address per "page" and 715 * ("read address"). There may be more than one write address per "page" and
719 * more than one read address per write address. 716 * more than one read address per write address.
720 */ 717 */
721static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p) 718static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
722{ 719{
723 u32 i, j, k, n; 720 u32 i, j, k, n;
724 /* addresses of the paged registers */ 721 /* addresses of the paged registers */
@@ -747,7 +744,7 @@ static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
747 } 744 }
748} 745}
749 746
750static inline void __bnx2x_get_regs(struct bnx2x *bp, u32 *p) 747static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
751{ 748{
752 u32 i, j; 749 u32 i, j;
753 750
@@ -1433,7 +1430,7 @@ static void bnx2x_get_ringparam(struct net_device *dev,
1433 else 1430 else
1434 ering->rx_pending = MAX_RX_AVAIL; 1431 ering->rx_pending = MAX_RX_AVAIL;
1435 1432
1436 ering->tx_max_pending = MAX_TX_AVAIL; 1433 ering->tx_max_pending = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
1437 ering->tx_pending = bp->tx_ring_size; 1434 ering->tx_pending = bp->tx_ring_size;
1438} 1435}
1439 1436
@@ -1451,7 +1448,7 @@ static int bnx2x_set_ringparam(struct net_device *dev,
1451 if ((ering->rx_pending > MAX_RX_AVAIL) || 1448 if ((ering->rx_pending > MAX_RX_AVAIL) ||
1452 (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA : 1449 (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
1453 MIN_RX_SIZE_TPA)) || 1450 MIN_RX_SIZE_TPA)) ||
1454 (ering->tx_pending > MAX_TX_AVAIL) || 1451 (ering->tx_pending > (IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL)) ||
1455 (ering->tx_pending <= MAX_SKB_FRAGS + 4)) { 1452 (ering->tx_pending <= MAX_SKB_FRAGS + 4)) {
1456 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); 1453 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
1457 return -EINVAL; 1454 return -EINVAL;
@@ -2212,7 +2209,7 @@ static void bnx2x_self_test(struct net_device *dev,
2212/* ethtool statistics are displayed for all regular ethernet queues and the 2209/* ethtool statistics are displayed for all regular ethernet queues and the
2213 * fcoe L2 queue if not disabled 2210 * fcoe L2 queue if not disabled
2214 */ 2211 */
2215static inline int bnx2x_num_stat_queues(struct bnx2x *bp) 2212static int bnx2x_num_stat_queues(struct bnx2x *bp)
2216{ 2213{
2217 return BNX2X_NUM_ETH_QUEUES(bp); 2214 return BNX2X_NUM_ETH_QUEUES(bp);
2218} 2215}
@@ -2396,10 +2393,7 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2396 2393
2397static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev) 2394static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
2398{ 2395{
2399 struct bnx2x *bp = netdev_priv(dev); 2396 return T_ETH_INDIRECTION_TABLE_SIZE;
2400
2401 return (bp->multi_mode == ETH_RSS_MODE_DISABLED ?
2402 0 : T_ETH_INDIRECTION_TABLE_SIZE);
2403} 2397}
2404 2398
2405static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir) 2399static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
@@ -2445,7 +2439,7 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
2445 ind_table[i] = indir[i] + bp->fp->cl_id; 2439 ind_table[i] = indir[i] + bp->fp->cl_id;
2446 } 2440 }
2447 2441
2448 return bnx2x_config_rss_pf(bp, ind_table, false); 2442 return bnx2x_config_rss_eth(bp, ind_table, false);
2449} 2443}
2450 2444
2451static const struct ethtool_ops bnx2x_ethtool_ops = { 2445static const struct ethtool_ops bnx2x_ethtool_ops = {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index b9b263323436..426f77aa721a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -387,7 +387,7 @@
387 387
388#define STATS_QUERY_CMD_COUNT 16 388#define STATS_QUERY_CMD_COUNT 16
389 389
390#define NIV_LIST_TABLE_SIZE 4096 390#define AFEX_LIST_TABLE_SIZE 4096
391 391
392#define INVALID_VNIC_ID 0xFF 392#define INVALID_VNIC_ID 0xFF
393 393
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index dbff5915b81a..a440a8ba85f2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -833,6 +833,7 @@ struct shared_feat_cfg { /* NVRAM Offset */
833 #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100 833 #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100
834 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 834 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
835 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 835 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
836 #define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400
836 837
837 /* The interval in seconds between sending LLDP packets. Set to zero 838 /* The interval in seconds between sending LLDP packets. Set to zero
838 to disable the feature */ 839 to disable the feature */
@@ -1235,6 +1236,8 @@ struct drv_func_mb {
1235 #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006 1236 #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006
1236 #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 1237 #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
1237 #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 1238 #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
1239 #define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED 0xa2000000
1240 #define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002
1238 #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014 1241 #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
1239 #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201 1242 #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201
1240 1243
@@ -1242,6 +1245,13 @@ struct drv_func_mb {
1242 #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 1245 #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
1243 1246
1244 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 1247 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
1248
1249 #define DRV_MSG_CODE_AFEX_DRIVER_SETMAC 0xd0000000
1250 #define DRV_MSG_CODE_AFEX_LISTGET_ACK 0xd1000000
1251 #define DRV_MSG_CODE_AFEX_LISTSET_ACK 0xd2000000
1252 #define DRV_MSG_CODE_AFEX_STATSGET_ACK 0xd3000000
1253 #define DRV_MSG_CODE_AFEX_VIFSET_ACK 0xd4000000
1254
1245 #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000 1255 #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000
1246 #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000 1256 #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000
1247 1257
@@ -1299,6 +1309,14 @@ struct drv_func_mb {
1299 #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000 1309 #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000
1300 #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000 1310 #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000
1301 #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 1311 #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
1312 #define FW_MSG_CODE_HW_SET_INVALID_IMAGE 0xb0100000
1313
1314 #define FW_MSG_CODE_AFEX_DRIVER_SETMAC_DONE 0xd0100000
1315 #define FW_MSG_CODE_AFEX_LISTGET_ACK 0xd1100000
1316 #define FW_MSG_CODE_AFEX_LISTSET_ACK 0xd2100000
1317 #define FW_MSG_CODE_AFEX_STATSGET_ACK 0xd3100000
1318 #define FW_MSG_CODE_AFEX_VIFSET_ACK 0xd4100000
1319
1302 #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000 1320 #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000
1303 #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000 1321 #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000
1304 1322
@@ -1357,6 +1375,12 @@ struct drv_func_mb {
1357 1375
1358 #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000 1376 #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000
1359 #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000 1377 #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000
1378 #define DRV_STATUS_AFEX_EVENT_MASK 0x03f00000
1379 #define DRV_STATUS_AFEX_LISTGET_REQ 0x00100000
1380 #define DRV_STATUS_AFEX_LISTSET_REQ 0x00200000
1381 #define DRV_STATUS_AFEX_STATSGET_REQ 0x00400000
1382 #define DRV_STATUS_AFEX_VIFSET_REQ 0x00800000
1383
1360 #define DRV_STATUS_DRV_INFO_REQ 0x04000000 1384 #define DRV_STATUS_DRV_INFO_REQ 0x04000000
1361 1385
1362 u32 virt_mac_upper; 1386 u32 virt_mac_upper;
@@ -1448,7 +1472,26 @@ struct func_mf_cfg {
1448 #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0 1472 #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0
1449 #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK 1473 #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK
1450 1474
1451 u32 reserved[2]; 1475 /* afex default VLAN ID - 12 bits */
1476 #define FUNC_MF_CFG_AFEX_VLAN_MASK 0x0fff0000
1477 #define FUNC_MF_CFG_AFEX_VLAN_SHIFT 16
1478
1479 u32 afex_config;
1480 #define FUNC_MF_CFG_AFEX_COS_FILTER_MASK 0x000000ff
1481 #define FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT 0
1482 #define FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK 0x0000ff00
1483 #define FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT 8
1484 #define FUNC_MF_CFG_AFEX_MBA_ENABLED_VAL 0x00000100
1485 #define FUNC_MF_CFG_AFEX_VLAN_MODE_MASK 0x000f0000
1486 #define FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT 16
1487
1488 u32 reserved;
1489};
1490
1491enum mf_cfg_afex_vlan_mode {
1492 FUNC_MF_CFG_AFEX_VLAN_TRUNK_MODE = 0,
1493 FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE,
1494 FUNC_MF_CFG_AFEX_VLAN_TRUNK_TAG_NATIVE_MODE
1452}; 1495};
1453 1496
1454/* This structure is not applicable and should not be accessed on 57711 */ 1497/* This structure is not applicable and should not be accessed on 57711 */
@@ -1945,18 +1988,29 @@ struct shmem2_region {
1945 1988
1946 u32 nvm_retain_bitmap_addr; /* 0x0070 */ 1989 u32 nvm_retain_bitmap_addr; /* 0x0070 */
1947 1990
1948 u32 reserved1; /* 0x0074 */ 1991 /* afex support of that driver */
1992 u32 afex_driver_support; /* 0x0074 */
1993 #define SHMEM_AFEX_VERSION_MASK 0x100f
1994 #define SHMEM_AFEX_SUPPORTED_VERSION_ONE 0x1001
1995 #define SHMEM_AFEX_REDUCED_DRV_LOADED 0x8000
1949 1996
1950 u32 reserved2[E2_FUNC_MAX]; 1997 /* driver receives addr in scratchpad to which it should respond */
1998 u32 afex_scratchpad_addr_to_write[E2_FUNC_MAX];
1951 1999
1952 u32 reserved3[E2_FUNC_MAX];/* 0x0088 */ 2000 /* generic params from MCP to driver (value depends on the msg sent
1953 u32 reserved4[E2_FUNC_MAX];/* 0x0098 */ 2001 * to driver
2002 */
2003 u32 afex_param1_to_driver[E2_FUNC_MAX]; /* 0x0088 */
2004 u32 afex_param2_to_driver[E2_FUNC_MAX]; /* 0x0098 */
1954 2005
1955 u32 swim_base_addr; /* 0x0108 */ 2006 u32 swim_base_addr; /* 0x0108 */
1956 u32 swim_funcs; 2007 u32 swim_funcs;
1957 u32 swim_main_cb; 2008 u32 swim_main_cb;
1958 2009
1959 u32 reserved5[2]; 2010 /* bitmap notifying which VIF profiles stored in nvram are enabled by
2011 * switch
2012 */
2013 u32 afex_profiles_enabled[2];
1960 2014
1961 /* generic flags controlled by the driver */ 2015 /* generic flags controlled by the driver */
1962 u32 drv_flags; 2016 u32 drv_flags;
@@ -2696,10 +2750,51 @@ union drv_info_to_mcp {
2696 struct fcoe_stats_info fcoe_stat; 2750 struct fcoe_stats_info fcoe_stat;
2697 struct iscsi_stats_info iscsi_stat; 2751 struct iscsi_stats_info iscsi_stat;
2698}; 2752};
2753
2754/* stats collected for afex.
2755 * NOTE: structure is exactly as expected to be received by the switch.
2756 * order must remain exactly as is unless protocol changes !
2757 */
2758struct afex_stats {
2759 u32 tx_unicast_frames_hi;
2760 u32 tx_unicast_frames_lo;
2761 u32 tx_unicast_bytes_hi;
2762 u32 tx_unicast_bytes_lo;
2763 u32 tx_multicast_frames_hi;
2764 u32 tx_multicast_frames_lo;
2765 u32 tx_multicast_bytes_hi;
2766 u32 tx_multicast_bytes_lo;
2767 u32 tx_broadcast_frames_hi;
2768 u32 tx_broadcast_frames_lo;
2769 u32 tx_broadcast_bytes_hi;
2770 u32 tx_broadcast_bytes_lo;
2771 u32 tx_frames_discarded_hi;
2772 u32 tx_frames_discarded_lo;
2773 u32 tx_frames_dropped_hi;
2774 u32 tx_frames_dropped_lo;
2775
2776 u32 rx_unicast_frames_hi;
2777 u32 rx_unicast_frames_lo;
2778 u32 rx_unicast_bytes_hi;
2779 u32 rx_unicast_bytes_lo;
2780 u32 rx_multicast_frames_hi;
2781 u32 rx_multicast_frames_lo;
2782 u32 rx_multicast_bytes_hi;
2783 u32 rx_multicast_bytes_lo;
2784 u32 rx_broadcast_frames_hi;
2785 u32 rx_broadcast_frames_lo;
2786 u32 rx_broadcast_bytes_hi;
2787 u32 rx_broadcast_bytes_lo;
2788 u32 rx_frames_discarded_hi;
2789 u32 rx_frames_discarded_lo;
2790 u32 rx_frames_dropped_hi;
2791 u32 rx_frames_dropped_lo;
2792};
2793
2699#define BCM_5710_FW_MAJOR_VERSION 7 2794#define BCM_5710_FW_MAJOR_VERSION 7
2700#define BCM_5710_FW_MINOR_VERSION 2 2795#define BCM_5710_FW_MINOR_VERSION 2
2701#define BCM_5710_FW_REVISION_VERSION 16 2796#define BCM_5710_FW_REVISION_VERSION 51
2702#define BCM_5710_FW_ENGINEERING_VERSION 0 2797#define BCM_5710_FW_ENGINEERING_VERSION 0
2703#define BCM_5710_FW_COMPILE_FLAGS 1 2798#define BCM_5710_FW_COMPILE_FLAGS 1
2704 2799
2705 2800
@@ -3389,7 +3484,7 @@ struct client_init_tx_data {
3389#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4) 3484#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4)
3390#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4 3485#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4
3391 u8 default_vlan_flg; 3486 u8 default_vlan_flg;
3392 u8 reserved2; 3487 u8 force_default_pri_flg;
3393 __le32 reserved3; 3488 __le32 reserved3;
3394}; 3489};
3395 3490
@@ -4375,8 +4470,21 @@ struct fcoe_statistics_params {
4375 4470
4376 4471
4377/* 4472/*
4473 * The data afex vif list ramrod need
4474 */
4475struct afex_vif_list_ramrod_data {
4476 u8 afex_vif_list_command;
4477 u8 func_bit_map;
4478 __le16 vif_list_index;
4479 u8 func_to_clear;
4480 u8 echo;
4481 __le16 reserved1;
4482};
4483
4484
4485/*
4378 * cfc delete event data 4486 * cfc delete event data
4379*/ 4487 */
4380struct cfc_del_event_data { 4488struct cfc_del_event_data {
4381 u32 cid; 4489 u32 cid;
4382 u32 reserved0; 4490 u32 reserved0;
@@ -4448,6 +4556,65 @@ struct cmng_struct_per_port {
4448 struct cmng_flags_per_port flags; 4556 struct cmng_flags_per_port flags;
4449}; 4557};
4450 4558
4559/*
4560 * a single rate shaping counter. can be used as protocol or vnic counter
4561 */
4562struct rate_shaping_counter {
4563 u32 quota;
4564#if defined(__BIG_ENDIAN)
4565 u16 __reserved0;
4566 u16 rate;
4567#elif defined(__LITTLE_ENDIAN)
4568 u16 rate;
4569 u16 __reserved0;
4570#endif
4571};
4572
4573/*
4574 * per-vnic rate shaping variables
4575 */
4576struct rate_shaping_vars_per_vn {
4577 struct rate_shaping_counter vn_counter;
4578};
4579
4580/*
4581 * per-vnic fairness variables
4582 */
4583struct fairness_vars_per_vn {
4584 u32 cos_credit_delta[MAX_COS_NUMBER];
4585 u32 vn_credit_delta;
4586 u32 __reserved0;
4587};
4588
4589/*
4590 * cmng port init state
4591 */
4592struct cmng_vnic {
4593 struct rate_shaping_vars_per_vn vnic_max_rate[4];
4594 struct fairness_vars_per_vn vnic_min_rate[4];
4595};
4596
4597/*
4598 * cmng port init state
4599 */
4600struct cmng_init {
4601 struct cmng_struct_per_port port;
4602 struct cmng_vnic vnic;
4603};
4604
4605
4606/*
4607 * driver parameters for congestion management init, all rates are in Mbps
4608 */
4609struct cmng_init_input {
4610 u32 port_rate;
4611 u16 vnic_min_rate[4];
4612 u16 vnic_max_rate[4];
4613 u16 cos_min_rate[MAX_COS_NUMBER];
4614 u16 cos_to_pause_mask[MAX_COS_NUMBER];
4615 struct cmng_flags_per_port flags;
4616};
4617
4451 4618
4452/* 4619/*
4453 * Protocol-common command ID for slow path elements 4620 * Protocol-common command ID for slow path elements
@@ -4462,7 +4629,7 @@ enum common_spqe_cmd_id {
4462 RAMROD_CMD_ID_COMMON_STAT_QUERY, 4629 RAMROD_CMD_ID_COMMON_STAT_QUERY,
4463 RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 4630 RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
4464 RAMROD_CMD_ID_COMMON_START_TRAFFIC, 4631 RAMROD_CMD_ID_COMMON_START_TRAFFIC,
4465 RAMROD_CMD_ID_COMMON_RESERVED1, 4632 RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS,
4466 MAX_COMMON_SPQE_CMD_ID 4633 MAX_COMMON_SPQE_CMD_ID
4467}; 4634};
4468 4635
@@ -4670,6 +4837,17 @@ struct malicious_vf_event_data {
4670}; 4837};
4671 4838
4672/* 4839/*
4840 * vif list event data
4841 */
4842struct vif_list_event_data {
4843 u8 func_bit_map;
4844 u8 echo;
4845 __le16 reserved0;
4846 __le32 reserved1;
4847 __le32 reserved2;
4848};
4849
4850/*
4673 * union for all event ring message types 4851 * union for all event ring message types
4674 */ 4852 */
4675union event_data { 4853union event_data {
@@ -4678,6 +4856,7 @@ union event_data {
4678 struct cfc_del_event_data cfc_del_event; 4856 struct cfc_del_event_data cfc_del_event;
4679 struct vf_flr_event_data vf_flr_event; 4857 struct vf_flr_event_data vf_flr_event;
4680 struct malicious_vf_event_data malicious_vf_event; 4858 struct malicious_vf_event_data malicious_vf_event;
4859 struct vif_list_event_data vif_list_event;
4681}; 4860};
4682 4861
4683 4862
@@ -4743,7 +4922,7 @@ enum event_ring_opcode {
4743 EVENT_RING_OPCODE_FORWARD_SETUP, 4922 EVENT_RING_OPCODE_FORWARD_SETUP,
4744 EVENT_RING_OPCODE_RSS_UPDATE_RULES, 4923 EVENT_RING_OPCODE_RSS_UPDATE_RULES,
4745 EVENT_RING_OPCODE_FUNCTION_UPDATE, 4924 EVENT_RING_OPCODE_FUNCTION_UPDATE,
4746 EVENT_RING_OPCODE_RESERVED1, 4925 EVENT_RING_OPCODE_AFEX_VIF_LISTS,
4747 EVENT_RING_OPCODE_SET_MAC, 4926 EVENT_RING_OPCODE_SET_MAC,
4748 EVENT_RING_OPCODE_CLASSIFICATION_RULES, 4927 EVENT_RING_OPCODE_CLASSIFICATION_RULES,
4749 EVENT_RING_OPCODE_FILTERS_RULES, 4928 EVENT_RING_OPCODE_FILTERS_RULES,
@@ -4763,16 +4942,6 @@ enum fairness_mode {
4763 4942
4764 4943
4765/* 4944/*
4766 * per-vnic fairness variables
4767 */
4768struct fairness_vars_per_vn {
4769 u32 cos_credit_delta[MAX_COS_NUMBER];
4770 u32 vn_credit_delta;
4771 u32 __reserved0;
4772};
4773
4774
4775/*
4776 * Priority and cos 4945 * Priority and cos
4777 */ 4946 */
4778struct priority_cos { 4947struct priority_cos {
@@ -4800,12 +4969,27 @@ struct flow_control_configuration {
4800struct function_start_data { 4969struct function_start_data {
4801 __le16 function_mode; 4970 __le16 function_mode;
4802 __le16 sd_vlan_tag; 4971 __le16 sd_vlan_tag;
4803 u16 reserved; 4972 __le16 vif_id;
4804 u8 path_id; 4973 u8 path_id;
4805 u8 network_cos_mode; 4974 u8 network_cos_mode;
4806}; 4975};
4807 4976
4808 4977
4978struct function_update_data {
4979 u8 vif_id_change_flg;
4980 u8 afex_default_vlan_change_flg;
4981 u8 allowed_priorities_change_flg;
4982 u8 network_cos_mode_change_flg;
4983 __le16 vif_id;
4984 __le16 afex_default_vlan;
4985 u8 allowed_priorities;
4986 u8 network_cos_mode;
4987 u8 lb_mode_en;
4988 u8 reserved0;
4989 __le32 reserved1;
4990};
4991
4992
4809/* 4993/*
4810 * FW version stored in the Xstorm RAM 4994 * FW version stored in the Xstorm RAM
4811 */ 4995 */
@@ -5003,7 +5187,7 @@ enum mf_mode {
5003 SINGLE_FUNCTION, 5187 SINGLE_FUNCTION,
5004 MULTI_FUNCTION_SD, 5188 MULTI_FUNCTION_SD,
5005 MULTI_FUNCTION_SI, 5189 MULTI_FUNCTION_SI,
5006 MULTI_FUNCTION_RESERVED, 5190 MULTI_FUNCTION_AFEX,
5007 MAX_MF_MODE 5191 MAX_MF_MODE
5008}; 5192};
5009 5193
@@ -5128,6 +5312,7 @@ union protocol_common_specific_data {
5128 u8 protocol_data[8]; 5312 u8 protocol_data[8];
5129 struct regpair phy_address; 5313 struct regpair phy_address;
5130 struct regpair mac_config_addr; 5314 struct regpair mac_config_addr;
5315 struct afex_vif_list_ramrod_data afex_vif_list_data;
5131}; 5316};
5132 5317
5133/* 5318/*
@@ -5140,29 +5325,6 @@ struct protocol_common_spe {
5140 5325
5141 5326
5142/* 5327/*
5143 * a single rate shaping counter. can be used as protocol or vnic counter
5144 */
5145struct rate_shaping_counter {
5146 u32 quota;
5147#if defined(__BIG_ENDIAN)
5148 u16 __reserved0;
5149 u16 rate;
5150#elif defined(__LITTLE_ENDIAN)
5151 u16 rate;
5152 u16 __reserved0;
5153#endif
5154};
5155
5156
5157/*
5158 * per-vnic rate shaping variables
5159 */
5160struct rate_shaping_vars_per_vn {
5161 struct rate_shaping_counter vn_counter;
5162};
5163
5164
5165/*
5166 * The send queue element 5328 * The send queue element
5167 */ 5329 */
5168struct slow_path_element { 5330struct slow_path_element {
@@ -5330,6 +5492,18 @@ enum vf_pf_channel_state {
5330 5492
5331 5493
5332/* 5494/*
5495 * vif_list_rule_kind
5496 */
5497enum vif_list_rule_kind {
5498 VIF_LIST_RULE_SET,
5499 VIF_LIST_RULE_GET,
5500 VIF_LIST_RULE_CLEAR_ALL,
5501 VIF_LIST_RULE_CLEAR_FUNC,
5502 MAX_VIF_LIST_RULE_KIND
5503};
5504
5505
5506/*
5333 * zone A per-queue data 5507 * zone A per-queue data
5334 */ 5508 */
5335struct xstorm_queue_zone_data { 5509struct xstorm_queue_zone_data {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 29f5c3cca31a..559c396d45cc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -125,7 +125,7 @@ enum {
125 MODE_MF = 0x00000100, 125 MODE_MF = 0x00000100,
126 MODE_MF_SD = 0x00000200, 126 MODE_MF_SD = 0x00000200,
127 MODE_MF_SI = 0x00000400, 127 MODE_MF_SI = 0x00000400,
128 MODE_MF_NIV = 0x00000800, 128 MODE_MF_AFEX = 0x00000800,
129 MODE_E3_A0 = 0x00001000, 129 MODE_E3_A0 = 0x00001000,
130 MODE_E3_B0 = 0x00002000, 130 MODE_E3_B0 = 0x00002000,
131 MODE_COS3 = 0x00004000, 131 MODE_COS3 = 0x00004000,
@@ -241,7 +241,8 @@ static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos)
241 REG_WR(bp, reg_addr, reg_bit_map | q_bit_map); 241 REG_WR(bp, reg_addr, reg_bit_map | q_bit_map);
242 242
243 /* set/clear queue bit in command-queue bit map 243 /* set/clear queue bit in command-queue bit map
244 (E2/E3A0 only, valid COS values are 0/1) */ 244 * (E2/E3A0 only, valid COS values are 0/1)
245 */
245 if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) { 246 if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) {
246 reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num); 247 reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num);
247 reg_bit_map = REG_RD(bp, reg_addr); 248 reg_bit_map = REG_RD(bp, reg_addr);
@@ -277,7 +278,215 @@ static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
277} 278}
278 279
279 280
280/* Returns the index of start or end of a specific block stage in ops array*/ 281/* congestion managment port init api description
282 * the api works as follows:
283 * the driver should pass the cmng_init_input struct, the port_init function
284 * will prepare the required internal ram structure which will be passed back
285 * to the driver (cmng_init) that will write it into the internal ram.
286 *
287 * IMPORTANT REMARKS:
288 * 1. the cmng_init struct does not represent the contiguous internal ram
289 * structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET
290 * offset in order to write the port sub struct and the
291 * PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other
292 * words - don't use memcpy!).
293 * 2. although the cmng_init struct is filled for the maximal vnic number
294 * possible, the driver should only write the valid vnics into the internal
295 * ram according to the appropriate port mode.
296 */
297#define BITS_TO_BYTES(x) ((x)/8)
298
299/* CMNG constants, as derived from system spec calculations */
300
301/* default MIN rate in case VNIC min rate is configured to zero- 100Mbps */
302#define DEF_MIN_RATE 100
303
304/* resolution of the rate shaping timer - 400 usec */
305#define RS_PERIODIC_TIMEOUT_USEC 400
306
307/* number of bytes in single QM arbitration cycle -
308 * coefficient for calculating the fairness timer
309 */
310#define QM_ARB_BYTES 160000
311
312/* resolution of Min algorithm 1:100 */
313#define MIN_RES 100
314
315/* how many bytes above threshold for
316 * the minimal credit of Min algorithm
317 */
318#define MIN_ABOVE_THRESH 32768
319
320/* Fairness algorithm integration time coefficient -
321 * for calculating the actual Tfair
322 */
323#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
324
325/* Memory of fairness algorithm - 2 cycles */
326#define FAIR_MEM 2
327#define SAFC_TIMEOUT_USEC 52
328
329#define SDM_TICKS 4
330
331
332static inline void bnx2x_init_max(const struct cmng_init_input *input_data,
333 u32 r_param, struct cmng_init *ram_data)
334{
335 u32 vnic;
336 struct cmng_vnic *vdata = &ram_data->vnic;
337 struct cmng_struct_per_port *pdata = &ram_data->port;
338 /* rate shaping per-port variables
339 * 100 micro seconds in SDM ticks = 25
340 * since each tick is 4 microSeconds
341 */
342
343 pdata->rs_vars.rs_periodic_timeout =
344 RS_PERIODIC_TIMEOUT_USEC / SDM_TICKS;
345
346 /* this is the threshold below which no timer arming will occur.
347 * 1.25 coefficient is for the threshold to be a little bigger
348 * then the real time to compensate for timer in-accuracy
349 */
350 pdata->rs_vars.rs_threshold =
351 (5 * RS_PERIODIC_TIMEOUT_USEC * r_param)/4;
352
353 /* rate shaping per-vnic variables */
354 for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
355 /* global vnic counter */
356 vdata->vnic_max_rate[vnic].vn_counter.rate =
357 input_data->vnic_max_rate[vnic];
358 /* maximal Mbps for this vnic
359 * the quota in each timer period - number of bytes
360 * transmitted in this period
361 */
362 vdata->vnic_max_rate[vnic].vn_counter.quota =
363 RS_PERIODIC_TIMEOUT_USEC *
364 (u32)vdata->vnic_max_rate[vnic].vn_counter.rate / 8;
365 }
366
367}
368
369static inline void bnx2x_init_min(const struct cmng_init_input *input_data,
370 u32 r_param, struct cmng_init *ram_data)
371{
372 u32 vnic, fair_periodic_timeout_usec, vnicWeightSum, tFair;
373 struct cmng_vnic *vdata = &ram_data->vnic;
374 struct cmng_struct_per_port *pdata = &ram_data->port;
375
376 /* this is the resolution of the fairness timer */
377 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
378
379 /* fairness per-port variables
380 * for 10G it is 1000usec. for 1G it is 10000usec.
381 */
382 tFair = T_FAIR_COEF / input_data->port_rate;
383
384 /* this is the threshold below which we won't arm the timer anymore */
385 pdata->fair_vars.fair_threshold = QM_ARB_BYTES;
386
387 /* we multiply by 1e3/8 to get bytes/msec. We don't want the credits
388 * to pass a credit of the T_FAIR*FAIR_MEM (algorithm resolution)
389 */
390 pdata->fair_vars.upper_bound = r_param * tFair * FAIR_MEM;
391
392 /* since each tick is 4 microSeconds */
393 pdata->fair_vars.fairness_timeout =
394 fair_periodic_timeout_usec / SDM_TICKS;
395
396 /* calculate sum of weights */
397 vnicWeightSum = 0;
398
399 for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++)
400 vnicWeightSum += input_data->vnic_min_rate[vnic];
401
402 /* global vnic counter */
403 if (vnicWeightSum > 0) {
404 /* fairness per-vnic variables */
405 for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
406 /* this is the credit for each period of the fairness
407 * algorithm - number of bytes in T_FAIR (this vnic
408 * share of the port rate)
409 */
410 vdata->vnic_min_rate[vnic].vn_credit_delta =
411 (u32)input_data->vnic_min_rate[vnic] * 100 *
412 (T_FAIR_COEF / (8 * 100 * vnicWeightSum));
413 if (vdata->vnic_min_rate[vnic].vn_credit_delta <
414 pdata->fair_vars.fair_threshold +
415 MIN_ABOVE_THRESH) {
416 vdata->vnic_min_rate[vnic].vn_credit_delta =
417 pdata->fair_vars.fair_threshold +
418 MIN_ABOVE_THRESH;
419 }
420 }
421 }
422}
423
424static inline void bnx2x_init_fw_wrr(const struct cmng_init_input *input_data,
425 u32 r_param, struct cmng_init *ram_data)
426{
427 u32 vnic, cos;
428 u32 cosWeightSum = 0;
429 struct cmng_vnic *vdata = &ram_data->vnic;
430 struct cmng_struct_per_port *pdata = &ram_data->port;
431
432 for (cos = 0; cos < MAX_COS_NUMBER; cos++)
433 cosWeightSum += input_data->cos_min_rate[cos];
434
435 if (cosWeightSum > 0) {
436
437 for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
438 /* Since cos and vnic shouldn't work together the rate
439 * to divide between the coses is the port rate.
440 */
441 u32 *ccd = vdata->vnic_min_rate[vnic].cos_credit_delta;
442 for (cos = 0; cos < MAX_COS_NUMBER; cos++) {
443 /* this is the credit for each period of
444 * the fairness algorithm - number of bytes
445 * in T_FAIR (this cos share of the vnic rate)
446 */
447 ccd[cos] =
448 (u32)input_data->cos_min_rate[cos] * 100 *
449 (T_FAIR_COEF / (8 * 100 * cosWeightSum));
450 if (ccd[cos] < pdata->fair_vars.fair_threshold
451 + MIN_ABOVE_THRESH) {
452 ccd[cos] =
453 pdata->fair_vars.fair_threshold +
454 MIN_ABOVE_THRESH;
455 }
456 }
457 }
458 }
459}
460
461static inline void bnx2x_init_safc(const struct cmng_init_input *input_data,
462 struct cmng_init *ram_data)
463{
464 /* in microSeconds */
465 ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC;
466}
467
468/* Congestion management port init */
469static inline void bnx2x_init_cmng(const struct cmng_init_input *input_data,
470 struct cmng_init *ram_data)
471{
472 u32 r_param;
473 memset(ram_data, 0, sizeof(struct cmng_init));
474
475 ram_data->port.flags = input_data->flags;
476
477 /* number of bytes transmitted in a rate of 10Gbps
478 * in one usec = 1.25KB.
479 */
480 r_param = BITS_TO_BYTES(input_data->port_rate);
481 bnx2x_init_max(input_data, r_param, ram_data);
482 bnx2x_init_min(input_data, r_param, ram_data);
483 bnx2x_init_fw_wrr(input_data, r_param, ram_data);
484 bnx2x_init_safc(input_data, ram_data);
485}
486
487
488
489/* Returns the index of start or end of a specific block stage in ops array */
281#define BLOCK_OPS_IDX(block, stage, end) \ 490#define BLOCK_OPS_IDX(block, stage, end) \
282 (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) 491 (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
283 492
@@ -499,9 +708,7 @@ static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
499 bnx2x_set_mcp_parity(bp, false); 708 bnx2x_set_mcp_parity(bp, false);
500} 709}
501 710
502/** 711/* Clear the parity error status registers. */
503 * Clear the parity error status registers.
504 */
505static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp) 712static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
506{ 713{
507 int i; 714 int i;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index ad95324dc042..a3fb7215cd89 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -138,7 +138,6 @@
138 138
139 139
140 140
141/* */
142#define SFP_EEPROM_CON_TYPE_ADDR 0x2 141#define SFP_EEPROM_CON_TYPE_ADDR 0x2
143 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 142 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
144 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 143 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
@@ -404,8 +403,7 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
404 403
405 DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n"); 404 DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n");
406 405
407 /* 406 /* mapping between entry priority to client number (0,1,2 -debug and
408 * mapping between entry priority to client number (0,1,2 -debug and
409 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) 407 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
410 * 3bits client num. 408 * 3bits client num.
411 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 409 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
@@ -413,8 +411,7 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
413 */ 411 */
414 412
415 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688); 413 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
416 /* 414 /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
417 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
418 * as strict. Bits 0,1,2 - debug and management entries, 3 - 415 * as strict. Bits 0,1,2 - debug and management entries, 3 -
419 * COS0 entry, 4 - COS1 entry. 416 * COS0 entry, 4 - COS1 entry.
420 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT 417 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
@@ -425,13 +422,11 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
425 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); 422 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
426 /* defines which entries (clients) are subjected to WFQ arbitration */ 423 /* defines which entries (clients) are subjected to WFQ arbitration */
427 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); 424 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
428 /* 425 /* For strict priority entries defines the number of consecutive
429 * For strict priority entries defines the number of consecutive
430 * slots for the highest priority. 426 * slots for the highest priority.
431 */ 427 */
432 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); 428 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
433 /* 429 /* mapping between the CREDIT_WEIGHT registers and actual client
434 * mapping between the CREDIT_WEIGHT registers and actual client
435 * numbers 430 * numbers
436 */ 431 */
437 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0); 432 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
@@ -443,8 +438,7 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
443 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0); 438 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
444 /* ETS mode disable */ 439 /* ETS mode disable */
445 REG_WR(bp, PBF_REG_ETS_ENABLED, 0); 440 REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
446 /* 441 /* If ETS mode is enabled (there is no strict priority) defines a WFQ
447 * If ETS mode is enabled (there is no strict priority) defines a WFQ
448 * weight for COS0/COS1. 442 * weight for COS0/COS1.
449 */ 443 */
450 REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710); 444 REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710);
@@ -471,10 +465,9 @@ static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
471 min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS; 465 min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
472 } else 466 } else
473 min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS; 467 min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
474 /** 468 /* If the link isn't up (static configuration for example ) The
475 * If the link isn't up (static configuration for example ) The 469 * link will be according to 20GBPS.
476 * link will be according to 20GBPS. 470 */
477 */
478 return min_w_val; 471 return min_w_val;
479} 472}
480/****************************************************************************** 473/******************************************************************************
@@ -538,8 +531,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
538 struct bnx2x *bp = params->bp; 531 struct bnx2x *bp = params->bp;
539 const u8 port = params->port; 532 const u8 port = params->port;
540 const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars); 533 const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars);
541 /** 534 /* Mapping between entry priority to client number (0,1,2 -debug and
542 * mapping between entry priority to client number (0,1,2 -debug and
543 * management clients, 3 - COS0 client, 4 - COS1, ... 8 - 535 * management clients, 3 - COS0 client, 4 - COS1, ... 8 -
544 * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by 536 * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by
545 * reset value or init tool 537 * reset value or init tool
@@ -551,18 +543,14 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
551 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210); 543 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210);
552 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8); 544 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8);
553 } 545 }
554 /** 546 /* For strict priority entries defines the number of consecutive
555 * For strict priority entries defines the number of consecutive 547 * slots for the highest priority.
556 * slots for the highest priority. 548 */
557 */
558 /* TODO_ETS - Should be done by reset value or init tool */
559 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS : 549 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
560 NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); 550 NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
561 /** 551 /* Mapping between the CREDIT_WEIGHT registers and actual client
562 * mapping between the CREDIT_WEIGHT registers and actual client
563 * numbers 552 * numbers
564 */ 553 */
565 /* TODO_ETS - Should be done by reset value or init tool */
566 if (port) { 554 if (port) {
567 /*Port 1 has 6 COS*/ 555 /*Port 1 has 6 COS*/
568 REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543); 556 REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543);
@@ -574,8 +562,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
574 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5); 562 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5);
575 } 563 }
576 564
577 /** 565 /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
578 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
579 * as strict. Bits 0,1,2 - debug and management entries, 3 - 566 * as strict. Bits 0,1,2 - debug and management entries, 3 -
580 * COS0 entry, 4 - COS1 entry. 567 * COS0 entry, 4 - COS1 entry.
581 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT 568 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
@@ -590,13 +577,12 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
590 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : 577 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
591 NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); 578 NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
592 579
593 /** 580 /* Please notice the register address are note continuous and a
594 * Please notice the register address are note continuous and a 581 * for here is note appropriate.In 2 port mode port0 only COS0-5
595 * for here is note appropriate.In 2 port mode port0 only COS0-5 582 * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
596 * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4 583 * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
597 * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT 584 * are never used for WFQ
598 * are never used for WFQ 585 */
599 */
600 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : 586 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
601 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0); 587 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0);
602 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : 588 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
@@ -633,10 +619,9 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
633 u32 base_upper_bound = 0; 619 u32 base_upper_bound = 0;
634 u8 max_cos = 0; 620 u8 max_cos = 0;
635 u8 i = 0; 621 u8 i = 0;
636 /** 622 /* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
637 * In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4 623 * port mode port1 has COS0-2 that can be used for WFQ.
638 * port mode port1 has COS0-2 that can be used for WFQ. 624 */
639 */
640 if (!port) { 625 if (!port) {
641 base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0; 626 base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
642 max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; 627 max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
@@ -666,8 +651,7 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
666 u32 base_weight = 0; 651 u32 base_weight = 0;
667 u8 max_cos = 0; 652 u8 max_cos = 0;
668 653
669 /** 654 /* Mapping between entry priority to client number 0 - COS0
670 * mapping between entry priority to client number 0 - COS0
671 * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num. 655 * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num.
672 * TODO_ETS - Should be done by reset value or init tool 656 * TODO_ETS - Should be done by reset value or init tool
673 */ 657 */
@@ -695,10 +679,9 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
695 679
696 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : 680 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
697 PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0); 681 PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0);
698 /** 682 /* In 2 port mode port0 has COS0-5 that can be used for WFQ.
699 * In 2 port mode port0 has COS0-5 that can be used for WFQ. 683 * In 4 port mode port1 has COS0-2 that can be used for WFQ.
700 * In 4 port mode port1 has COS0-2 that can be used for WFQ. 684 */
701 */
702 if (!port) { 685 if (!port) {
703 base_weight = PBF_REG_COS0_WEIGHT_P0; 686 base_weight = PBF_REG_COS0_WEIGHT_P0;
704 max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; 687 max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
@@ -738,7 +721,7 @@ static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
738/****************************************************************************** 721/******************************************************************************
739* Description: 722* Description:
740* Disable will return basicly the values to init values. 723* Disable will return basicly the values to init values.
741*. 724*
742******************************************************************************/ 725******************************************************************************/
743int bnx2x_ets_disabled(struct link_params *params, 726int bnx2x_ets_disabled(struct link_params *params,
744 struct link_vars *vars) 727 struct link_vars *vars)
@@ -867,7 +850,7 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
867/****************************************************************************** 850/******************************************************************************
868* Description: 851* Description:
869* Calculate the total BW.A value of 0 isn't legal. 852* Calculate the total BW.A value of 0 isn't legal.
870*. 853*
871******************************************************************************/ 854******************************************************************************/
872static int bnx2x_ets_e3b0_get_total_bw( 855static int bnx2x_ets_e3b0_get_total_bw(
873 const struct link_params *params, 856 const struct link_params *params,
@@ -879,7 +862,6 @@ static int bnx2x_ets_e3b0_get_total_bw(
879 u8 is_bw_cos_exist = 0; 862 u8 is_bw_cos_exist = 0;
880 863
881 *total_bw = 0 ; 864 *total_bw = 0 ;
882
883 /* Calculate total BW requested */ 865 /* Calculate total BW requested */
884 for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { 866 for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
885 if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) { 867 if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) {
@@ -887,10 +869,9 @@ static int bnx2x_ets_e3b0_get_total_bw(
887 if (!ets_params->cos[cos_idx].params.bw_params.bw) { 869 if (!ets_params->cos[cos_idx].params.bw_params.bw) {
888 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" 870 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
889 "was set to 0\n"); 871 "was set to 0\n");
890 /* 872 /* This is to prevent a state when ramrods
891 * This is to prevent a state when ramrods
892 * can't be sent 873 * can't be sent
893 */ 874 */
894 ets_params->cos[cos_idx].params.bw_params.bw 875 ets_params->cos[cos_idx].params.bw_params.bw
895 = 1; 876 = 1;
896 } 877 }
@@ -908,8 +889,7 @@ static int bnx2x_ets_e3b0_get_total_bw(
908 } 889 }
909 DP(NETIF_MSG_LINK, 890 DP(NETIF_MSG_LINK,
910 "bnx2x_ets_E3B0_config total BW should be 100\n"); 891 "bnx2x_ets_E3B0_config total BW should be 100\n");
911 /* 892 /* We can handle a case whre the BW isn't 100 this can happen
912 * We can handle a case whre the BW isn't 100 this can happen
913 * if the TC are joined. 893 * if the TC are joined.
914 */ 894 */
915 } 895 }
@@ -919,7 +899,7 @@ static int bnx2x_ets_e3b0_get_total_bw(
919/****************************************************************************** 899/******************************************************************************
920* Description: 900* Description:
921* Invalidate all the sp_pri_to_cos. 901* Invalidate all the sp_pri_to_cos.
922*. 902*
923******************************************************************************/ 903******************************************************************************/
924static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos) 904static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
925{ 905{
@@ -931,7 +911,7 @@ static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
931* Description: 911* Description:
932* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers 912* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
933* according to sp_pri_to_cos. 913* according to sp_pri_to_cos.
934*. 914*
935******************************************************************************/ 915******************************************************************************/
936static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, 916static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
937 u8 *sp_pri_to_cos, const u8 pri, 917 u8 *sp_pri_to_cos, const u8 pri,
@@ -942,6 +922,12 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
942 const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : 922 const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
943 DCBX_E3B0_MAX_NUM_COS_PORT0; 923 DCBX_E3B0_MAX_NUM_COS_PORT0;
944 924
925 if (pri >= max_num_of_cos) {
926 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
927 "parameter Illegal strict priority\n");
928 return -EINVAL;
929 }
930
945 if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) { 931 if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
946 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " 932 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
947 "parameter There can't be two COS's with " 933 "parameter There can't be two COS's with "
@@ -949,12 +935,6 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
949 return -EINVAL; 935 return -EINVAL;
950 } 936 }
951 937
952 if (pri > max_num_of_cos) {
953 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
954 "parameter Illegal strict priority\n");
955 return -EINVAL;
956 }
957
958 sp_pri_to_cos[pri] = cos_entry; 938 sp_pri_to_cos[pri] = cos_entry;
959 return 0; 939 return 0;
960 940
@@ -964,7 +944,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
964* Description: 944* Description:
965* Returns the correct value according to COS and priority in 945* Returns the correct value according to COS and priority in
966* the sp_pri_cli register. 946* the sp_pri_cli register.
967*. 947*
968******************************************************************************/ 948******************************************************************************/
969static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset, 949static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
970 const u8 pri_set, 950 const u8 pri_set,
@@ -981,7 +961,7 @@ static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
981* Description: 961* Description:
982* Returns the correct value according to COS and priority in the 962* Returns the correct value according to COS and priority in the
983* sp_pri_cli register for NIG. 963* sp_pri_cli register for NIG.
984*. 964*
985******************************************************************************/ 965******************************************************************************/
986static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set) 966static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
987{ 967{
@@ -997,7 +977,7 @@ static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
997* Description: 977* Description:
998* Returns the correct value according to COS and priority in the 978* Returns the correct value according to COS and priority in the
999* sp_pri_cli register for PBF. 979* sp_pri_cli register for PBF.
1000*. 980*
1001******************************************************************************/ 981******************************************************************************/
1002static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set) 982static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
1003{ 983{
@@ -1013,7 +993,7 @@ static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
1013* Description: 993* Description:
1014* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers 994* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
1015* according to sp_pri_to_cos.(which COS has higher priority) 995* according to sp_pri_to_cos.(which COS has higher priority)
1016*. 996*
1017******************************************************************************/ 997******************************************************************************/
1018static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params, 998static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
1019 u8 *sp_pri_to_cos) 999 u8 *sp_pri_to_cos)
@@ -1149,8 +1129,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
1149 return -EINVAL; 1129 return -EINVAL;
1150 } 1130 }
1151 1131
1152 /* 1132 /* Upper bound is set according to current link speed (min_w_val
1153 * Upper bound is set according to current link speed (min_w_val
1154 * should be the same for upper bound and COS credit val). 1133 * should be the same for upper bound and COS credit val).
1155 */ 1134 */
1156 bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig); 1135 bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
@@ -1160,8 +1139,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
1160 for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) { 1139 for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
1161 if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) { 1140 if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
1162 cos_bw_bitmap |= (1 << cos_entry); 1141 cos_bw_bitmap |= (1 << cos_entry);
1163 /* 1142 /* The function also sets the BW in HW(not the mappin
1164 * The function also sets the BW in HW(not the mappin
1165 * yet) 1143 * yet)
1166 */ 1144 */
1167 bnx2x_status = bnx2x_ets_e3b0_set_cos_bw( 1145 bnx2x_status = bnx2x_ets_e3b0_set_cos_bw(
@@ -1217,14 +1195,12 @@ static void bnx2x_ets_bw_limit_common(const struct link_params *params)
1217 /* ETS disabled configuration */ 1195 /* ETS disabled configuration */
1218 struct bnx2x *bp = params->bp; 1196 struct bnx2x *bp = params->bp;
1219 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); 1197 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
1220 /* 1198 /* Defines which entries (clients) are subjected to WFQ arbitration
1221 * defines which entries (clients) are subjected to WFQ arbitration
1222 * COS0 0x8 1199 * COS0 0x8
1223 * COS1 0x10 1200 * COS1 0x10
1224 */ 1201 */
1225 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18); 1202 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
1226 /* 1203 /* Mapping between the ARB_CREDIT_WEIGHT registers and actual
1227 * mapping between the ARB_CREDIT_WEIGHT registers and actual
1228 * client numbers (WEIGHT_0 does not actually have to represent 1204 * client numbers (WEIGHT_0 does not actually have to represent
1229 * client 0) 1205 * client 0)
1230 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 1206 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
@@ -1242,8 +1218,7 @@ static void bnx2x_ets_bw_limit_common(const struct link_params *params)
1242 1218
1243 /* Defines the number of consecutive slots for the strict priority */ 1219 /* Defines the number of consecutive slots for the strict priority */
1244 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); 1220 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
1245 /* 1221 /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
1246 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
1247 * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0 1222 * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
1248 * entry, 4 - COS1 entry. 1223 * entry, 4 - COS1 entry.
1249 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT 1224 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
@@ -1298,8 +1273,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
1298 u32 val = 0; 1273 u32 val = 0;
1299 1274
1300 DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n"); 1275 DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
1301 /* 1276 /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
1302 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
1303 * as strict. Bits 0,1,2 - debug and management entries, 1277 * as strict. Bits 0,1,2 - debug and management entries,
1304 * 3 - COS0 entry, 4 - COS1 entry. 1278 * 3 - COS0 entry, 4 - COS1 entry.
1305 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT 1279 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
@@ -1307,8 +1281,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
1307 * MCP and debug are strict 1281 * MCP and debug are strict
1308 */ 1282 */
1309 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F); 1283 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
1310 /* 1284 /* For strict priority entries defines the number of consecutive slots
1311 * For strict priority entries defines the number of consecutive slots
1312 * for the highest priority. 1285 * for the highest priority.
1313 */ 1286 */
1314 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); 1287 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
@@ -1320,8 +1293,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
1320 /* Defines the number of consecutive slots for the strict priority */ 1293 /* Defines the number of consecutive slots for the strict priority */
1321 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos); 1294 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
1322 1295
1323 /* 1296 /* Mapping between entry priority to client number (0,1,2 -debug and
1324 * mapping between entry priority to client number (0,1,2 -debug and
1325 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) 1297 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
1326 * 3bits client num. 1298 * 3bits client num.
1327 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 1299 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
@@ -1356,15 +1328,12 @@ static void bnx2x_update_pfc_xmac(struct link_params *params,
1356 if (!(params->feature_config_flags & 1328 if (!(params->feature_config_flags &
1357 FEATURE_CONFIG_PFC_ENABLED)) { 1329 FEATURE_CONFIG_PFC_ENABLED)) {
1358 1330
1359 /* 1331 /* RX flow control - Process pause frame in receive direction
1360 * RX flow control - Process pause frame in receive direction
1361 */ 1332 */
1362 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) 1333 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
1363 pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN; 1334 pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN;
1364 1335
1365 /* 1336 /* TX flow control - Send pause packet when buffer is full */
1366 * TX flow control - Send pause packet when buffer is full
1367 */
1368 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) 1337 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
1369 pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN; 1338 pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN;
1370 } else {/* PFC support */ 1339 } else {/* PFC support */
@@ -1457,8 +1426,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
1457static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port) 1426static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
1458{ 1427{
1459 u32 mode, emac_base; 1428 u32 mode, emac_base;
1460 /** 1429 /* Set clause 45 mode, slow down the MDIO clock to 2.5MHz
1461 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
1462 * (a value of 49==0x31) and make sure that the AUTO poll is off 1430 * (a value of 49==0x31) and make sure that the AUTO poll is off
1463 */ 1431 */
1464 1432
@@ -1578,15 +1546,6 @@ static void bnx2x_umac_enable(struct link_params *params,
1578 1546
1579 DP(NETIF_MSG_LINK, "enabling UMAC\n"); 1547 DP(NETIF_MSG_LINK, "enabling UMAC\n");
1580 1548
1581 /**
1582 * This register determines on which events the MAC will assert
1583 * error on the i/f to the NIG along w/ EOP.
1584 */
1585
1586 /**
1587 * BD REG_WR(bp, NIG_REG_P0_MAC_RSV_ERR_MASK +
1588 * params->port*0x14, 0xfffff.
1589 */
1590 /* This register opens the gate for the UMAC despite its name */ 1549 /* This register opens the gate for the UMAC despite its name */
1591 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); 1550 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
1592 1551
@@ -1649,8 +1608,7 @@ static void bnx2x_umac_enable(struct link_params *params,
1649 val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA; 1608 val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA;
1650 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); 1609 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
1651 1610
1652 /* 1611 /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame
1653 * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
1654 * length used by the MAC receive logic to check frames. 1612 * length used by the MAC receive logic to check frames.
1655 */ 1613 */
1656 REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); 1614 REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
@@ -1666,8 +1624,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
1666 struct bnx2x *bp = params->bp; 1624 struct bnx2x *bp = params->bp;
1667 u32 is_port4mode = bnx2x_is_4_port_mode(bp); 1625 u32 is_port4mode = bnx2x_is_4_port_mode(bp);
1668 1626
1669 /* 1627 /* In 4-port mode, need to set the mode only once, so if XMAC is
1670 * In 4-port mode, need to set the mode only once, so if XMAC is
1671 * already out of reset, it means the mode has already been set, 1628 * already out of reset, it means the mode has already been set,
1672 * and it must not* reset the XMAC again, since it controls both 1629 * and it must not* reset the XMAC again, since it controls both
1673 * ports of the path 1630 * ports of the path
@@ -1691,13 +1648,13 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
1691 if (is_port4mode) { 1648 if (is_port4mode) {
1692 DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n"); 1649 DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n");
1693 1650
1694 /* Set the number of ports on the system side to up to 2 */ 1651 /* Set the number of ports on the system side to up to 2 */
1695 REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1); 1652 REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1);
1696 1653
1697 /* Set the number of ports on the Warp Core to 10G */ 1654 /* Set the number of ports on the Warp Core to 10G */
1698 REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); 1655 REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
1699 } else { 1656 } else {
1700 /* Set the number of ports on the system side to 1 */ 1657 /* Set the number of ports on the system side to 1 */
1701 REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0); 1658 REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0);
1702 if (max_speed == SPEED_10000) { 1659 if (max_speed == SPEED_10000) {
1703 DP(NETIF_MSG_LINK, 1660 DP(NETIF_MSG_LINK,
@@ -1729,8 +1686,7 @@ static void bnx2x_xmac_disable(struct link_params *params)
1729 1686
1730 if (REG_RD(bp, MISC_REG_RESET_REG_2) & 1687 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
1731 MISC_REGISTERS_RESET_REG_2_XMAC) { 1688 MISC_REGISTERS_RESET_REG_2_XMAC) {
1732 /* 1689 /* Send an indication to change the state in the NIG back to XON
1733 * Send an indication to change the state in the NIG back to XON
1734 * Clearing this bit enables the next set of this bit to get 1690 * Clearing this bit enables the next set of this bit to get
1735 * rising edge 1691 * rising edge
1736 */ 1692 */
@@ -1755,13 +1711,11 @@ static int bnx2x_xmac_enable(struct link_params *params,
1755 1711
1756 bnx2x_xmac_init(params, vars->line_speed); 1712 bnx2x_xmac_init(params, vars->line_speed);
1757 1713
1758 /* 1714 /* This register determines on which events the MAC will assert
1759 * This register determines on which events the MAC will assert
1760 * error on the i/f to the NIG along w/ EOP. 1715 * error on the i/f to the NIG along w/ EOP.
1761 */ 1716 */
1762 1717
1763 /* 1718 /* This register tells the NIG whether to send traffic to UMAC
1764 * This register tells the NIG whether to send traffic to UMAC
1765 * or XMAC 1719 * or XMAC
1766 */ 1720 */
1767 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0); 1721 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
@@ -1863,8 +1817,7 @@ static int bnx2x_emac_enable(struct link_params *params,
1863 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); 1817 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
1864 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; 1818 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
1865 1819
1866 /* 1820 /* Setting this bit causes MAC control frames (except for pause
1867 * Setting this bit causes MAC control frames (except for pause
1868 * frames) to be passed on for processing. This setting has no 1821 * frames) to be passed on for processing. This setting has no
1869 * affect on the operation of the pause frames. This bit effects 1822 * affect on the operation of the pause frames. This bit effects
1870 * all packets regardless of RX Parser packet sorting logic. 1823 * all packets regardless of RX Parser packet sorting logic.
@@ -1963,8 +1916,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
1963 struct link_vars *vars, 1916 struct link_vars *vars,
1964 u8 is_lb) 1917 u8 is_lb)
1965{ 1918{
1966 /* 1919 /* Set rx control: Strip CRC and enable BigMAC to relay
1967 * Set rx control: Strip CRC and enable BigMAC to relay
1968 * control packets to the system as well 1920 * control packets to the system as well
1969 */ 1921 */
1970 u32 wb_data[2]; 1922 u32 wb_data[2];
@@ -2016,8 +1968,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
2016 1968
2017 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2); 1969 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
2018 1970
2019 /* 1971 /* Set Time (based unit is 512 bit time) between automatic
2020 * Set Time (based unit is 512 bit time) between automatic
2021 * re-sending of PP packets amd enable automatic re-send of 1972 * re-sending of PP packets amd enable automatic re-send of
2022 * Per-Priroity Packet as long as pp_gen is asserted and 1973 * Per-Priroity Packet as long as pp_gen is asserted and
2023 * pp_disable is low. 1974 * pp_disable is low.
@@ -2086,7 +2037,7 @@ static int bnx2x_pfc_brb_get_config_params(
2086 config_val->default_class1.full_xon = 0; 2037 config_val->default_class1.full_xon = 0;
2087 2038
2088 if (CHIP_IS_E2(bp)) { 2039 if (CHIP_IS_E2(bp)) {
2089 /* class0 defaults */ 2040 /* Class0 defaults */
2090 config_val->default_class0.pause_xoff = 2041 config_val->default_class0.pause_xoff =
2091 DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR; 2042 DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR;
2092 config_val->default_class0.pause_xon = 2043 config_val->default_class0.pause_xon =
@@ -2095,7 +2046,7 @@ static int bnx2x_pfc_brb_get_config_params(
2095 DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR; 2046 DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR;
2096 config_val->default_class0.full_xon = 2047 config_val->default_class0.full_xon =
2097 DEFAULT0_E2_BRB_MAC_FULL_XON_THR; 2048 DEFAULT0_E2_BRB_MAC_FULL_XON_THR;
2098 /* pause able*/ 2049 /* Pause able*/
2099 config_val->pauseable_th.pause_xoff = 2050 config_val->pauseable_th.pause_xoff =
2100 PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE; 2051 PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
2101 config_val->pauseable_th.pause_xon = 2052 config_val->pauseable_th.pause_xon =
@@ -2114,7 +2065,7 @@ static int bnx2x_pfc_brb_get_config_params(
2114 config_val->non_pauseable_th.full_xon = 2065 config_val->non_pauseable_th.full_xon =
2115 PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE; 2066 PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
2116 } else if (CHIP_IS_E3A0(bp)) { 2067 } else if (CHIP_IS_E3A0(bp)) {
2117 /* class0 defaults */ 2068 /* Class0 defaults */
2118 config_val->default_class0.pause_xoff = 2069 config_val->default_class0.pause_xoff =
2119 DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR; 2070 DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR;
2120 config_val->default_class0.pause_xon = 2071 config_val->default_class0.pause_xon =
@@ -2123,7 +2074,7 @@ static int bnx2x_pfc_brb_get_config_params(
2123 DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR; 2074 DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR;
2124 config_val->default_class0.full_xon = 2075 config_val->default_class0.full_xon =
2125 DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR; 2076 DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR;
2126 /* pause able */ 2077 /* Pause able */
2127 config_val->pauseable_th.pause_xoff = 2078 config_val->pauseable_th.pause_xoff =
2128 PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE; 2079 PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
2129 config_val->pauseable_th.pause_xon = 2080 config_val->pauseable_th.pause_xon =
@@ -2142,7 +2093,7 @@ static int bnx2x_pfc_brb_get_config_params(
2142 config_val->non_pauseable_th.full_xon = 2093 config_val->non_pauseable_th.full_xon =
2143 PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE; 2094 PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
2144 } else if (CHIP_IS_E3B0(bp)) { 2095 } else if (CHIP_IS_E3B0(bp)) {
2145 /* class0 defaults */ 2096 /* Class0 defaults */
2146 config_val->default_class0.pause_xoff = 2097 config_val->default_class0.pause_xoff =
2147 DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR; 2098 DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR;
2148 config_val->default_class0.pause_xon = 2099 config_val->default_class0.pause_xon =
@@ -2305,27 +2256,23 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
2305 reg_th_config = &config_val.non_pauseable_th; 2256 reg_th_config = &config_val.non_pauseable_th;
2306 } else 2257 } else
2307 reg_th_config = &config_val.default_class0; 2258 reg_th_config = &config_val.default_class0;
2308 /* 2259 /* The number of free blocks below which the pause signal to class 0
2309 * The number of free blocks below which the pause signal to class 0
2310 * of MAC #n is asserted. n=0,1 2260 * of MAC #n is asserted. n=0,1
2311 */ 2261 */
2312 REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 : 2262 REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 :
2313 BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , 2263 BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 ,
2314 reg_th_config->pause_xoff); 2264 reg_th_config->pause_xoff);
2315 /* 2265 /* The number of free blocks above which the pause signal to class 0
2316 * The number of free blocks above which the pause signal to class 0
2317 * of MAC #n is de-asserted. n=0,1 2266 * of MAC #n is de-asserted. n=0,1
2318 */ 2267 */
2319 REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 : 2268 REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 :
2320 BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon); 2269 BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon);
2321 /* 2270 /* The number of free blocks below which the full signal to class 0
2322 * The number of free blocks below which the full signal to class 0
2323 * of MAC #n is asserted. n=0,1 2271 * of MAC #n is asserted. n=0,1
2324 */ 2272 */
2325 REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 : 2273 REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 :
2326 BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff); 2274 BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff);
2327 /* 2275 /* The number of free blocks above which the full signal to class 0
2328 * The number of free blocks above which the full signal to class 0
2329 * of MAC #n is de-asserted. n=0,1 2276 * of MAC #n is de-asserted. n=0,1
2330 */ 2277 */
2331 REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 : 2278 REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
@@ -2339,30 +2286,26 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
2339 reg_th_config = &config_val.non_pauseable_th; 2286 reg_th_config = &config_val.non_pauseable_th;
2340 } else 2287 } else
2341 reg_th_config = &config_val.default_class1; 2288 reg_th_config = &config_val.default_class1;
2342 /* 2289 /* The number of free blocks below which the pause signal to
2343 * The number of free blocks below which the pause signal to
2344 * class 1 of MAC #n is asserted. n=0,1 2290 * class 1 of MAC #n is asserted. n=0,1
2345 */ 2291 */
2346 REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 : 2292 REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
2347 BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, 2293 BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
2348 reg_th_config->pause_xoff); 2294 reg_th_config->pause_xoff);
2349 2295
2350 /* 2296 /* The number of free blocks above which the pause signal to
2351 * The number of free blocks above which the pause signal to
2352 * class 1 of MAC #n is de-asserted. n=0,1 2297 * class 1 of MAC #n is de-asserted. n=0,1
2353 */ 2298 */
2354 REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 : 2299 REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
2355 BRB1_REG_PAUSE_1_XON_THRESHOLD_0, 2300 BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
2356 reg_th_config->pause_xon); 2301 reg_th_config->pause_xon);
2357 /* 2302 /* The number of free blocks below which the full signal to
2358 * The number of free blocks below which the full signal to
2359 * class 1 of MAC #n is asserted. n=0,1 2303 * class 1 of MAC #n is asserted. n=0,1
2360 */ 2304 */
2361 REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 : 2305 REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
2362 BRB1_REG_FULL_1_XOFF_THRESHOLD_0, 2306 BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
2363 reg_th_config->full_xoff); 2307 reg_th_config->full_xoff);
2364 /* 2308 /* The number of free blocks above which the full signal to
2365 * The number of free blocks above which the full signal to
2366 * class 1 of MAC #n is de-asserted. n=0,1 2309 * class 1 of MAC #n is de-asserted. n=0,1
2367 */ 2310 */
2368 REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 : 2311 REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
@@ -2379,49 +2322,41 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
2379 REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE, 2322 REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE,
2380 e3b0_val.per_class_guaranty_mode); 2323 e3b0_val.per_class_guaranty_mode);
2381 2324
2382 /* 2325 /* The hysteresis on the guarantied buffer space for the Lb
2383 * The hysteresis on the guarantied buffer space for the Lb
2384 * port before signaling XON. 2326 * port before signaling XON.
2385 */ 2327 */
2386 REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 2328 REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST,
2387 e3b0_val.lb_guarantied_hyst); 2329 e3b0_val.lb_guarantied_hyst);
2388 2330
2389 /* 2331 /* The number of free blocks below which the full signal to the
2390 * The number of free blocks below which the full signal to the
2391 * LB port is asserted. 2332 * LB port is asserted.
2392 */ 2333 */
2393 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 2334 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
2394 e3b0_val.full_lb_xoff_th); 2335 e3b0_val.full_lb_xoff_th);
2395 /* 2336 /* The number of free blocks above which the full signal to the
2396 * The number of free blocks above which the full signal to the
2397 * LB port is de-asserted. 2337 * LB port is de-asserted.
2398 */ 2338 */
2399 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 2339 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
2400 e3b0_val.full_lb_xon_threshold); 2340 e3b0_val.full_lb_xon_threshold);
2401 /* 2341 /* The number of blocks guarantied for the MAC #n port. n=0,1
2402 * The number of blocks guarantied for the MAC #n port. n=0,1
2403 */ 2342 */
2404 2343
2405 /* The number of blocks guarantied for the LB port.*/ 2344 /* The number of blocks guarantied for the LB port. */
2406 REG_WR(bp, BRB1_REG_LB_GUARANTIED, 2345 REG_WR(bp, BRB1_REG_LB_GUARANTIED,
2407 e3b0_val.lb_guarantied); 2346 e3b0_val.lb_guarantied);
2408 2347
2409 /* 2348 /* The number of blocks guarantied for the MAC #n port. */
2410 * The number of blocks guarantied for the MAC #n port.
2411 */
2412 REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0, 2349 REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
2413 2 * e3b0_val.mac_0_class_t_guarantied); 2350 2 * e3b0_val.mac_0_class_t_guarantied);
2414 REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1, 2351 REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
2415 2 * e3b0_val.mac_1_class_t_guarantied); 2352 2 * e3b0_val.mac_1_class_t_guarantied);
2416 /* 2353 /* The number of blocks guarantied for class #t in MAC0. t=0,1
2417 * The number of blocks guarantied for class #t in MAC0. t=0,1
2418 */ 2354 */
2419 REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED, 2355 REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
2420 e3b0_val.mac_0_class_t_guarantied); 2356 e3b0_val.mac_0_class_t_guarantied);
2421 REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED, 2357 REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
2422 e3b0_val.mac_0_class_t_guarantied); 2358 e3b0_val.mac_0_class_t_guarantied);
2423 /* 2359 /* The hysteresis on the guarantied buffer space for class in
2424 * The hysteresis on the guarantied buffer space for class in
2425 * MAC0. t=0,1 2360 * MAC0. t=0,1
2426 */ 2361 */
2427 REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST, 2362 REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
@@ -2429,15 +2364,13 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
2429 REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST, 2364 REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
2430 e3b0_val.mac_0_class_t_guarantied_hyst); 2365 e3b0_val.mac_0_class_t_guarantied_hyst);
2431 2366
2432 /* 2367 /* The number of blocks guarantied for class #t in MAC1.t=0,1
2433 * The number of blocks guarantied for class #t in MAC1.t=0,1
2434 */ 2368 */
2435 REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED, 2369 REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
2436 e3b0_val.mac_1_class_t_guarantied); 2370 e3b0_val.mac_1_class_t_guarantied);
2437 REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED, 2371 REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
2438 e3b0_val.mac_1_class_t_guarantied); 2372 e3b0_val.mac_1_class_t_guarantied);
2439 /* 2373 /* The hysteresis on the guarantied buffer space for class #t
2440 * The hysteresis on the guarantied buffer space for class #t
2441 * in MAC1. t=0,1 2374 * in MAC1. t=0,1
2442 */ 2375 */
2443 REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST, 2376 REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
@@ -2520,15 +2453,13 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
2520 FEATURE_CONFIG_PFC_ENABLED; 2453 FEATURE_CONFIG_PFC_ENABLED;
2521 DP(NETIF_MSG_LINK, "updating pfc nig parameters\n"); 2454 DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
2522 2455
2523 /* 2456 /* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
2524 * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
2525 * MAC control frames (that are not pause packets) 2457 * MAC control frames (that are not pause packets)
2526 * will be forwarded to the XCM. 2458 * will be forwarded to the XCM.
2527 */ 2459 */
2528 xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK : 2460 xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK :
2529 NIG_REG_LLH0_XCM_MASK); 2461 NIG_REG_LLH0_XCM_MASK);
2530 /* 2462 /* NIG params will override non PFC params, since it's possible to
2531 * nig params will override non PFC params, since it's possible to
2532 * do transition from PFC to SAFC 2463 * do transition from PFC to SAFC
2533 */ 2464 */
2534 if (set_pfc) { 2465 if (set_pfc) {
@@ -2548,7 +2479,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
2548 llfc_out_en = nig_params->llfc_out_en; 2479 llfc_out_en = nig_params->llfc_out_en;
2549 llfc_enable = nig_params->llfc_enable; 2480 llfc_enable = nig_params->llfc_enable;
2550 pause_enable = nig_params->pause_enable; 2481 pause_enable = nig_params->pause_enable;
2551 } else /*defaul non PFC mode - PAUSE */ 2482 } else /* Default non PFC mode - PAUSE */
2552 pause_enable = 1; 2483 pause_enable = 1;
2553 2484
2554 xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : 2485 xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
@@ -2608,8 +2539,7 @@ int bnx2x_update_pfc(struct link_params *params,
2608 struct link_vars *vars, 2539 struct link_vars *vars,
2609 struct bnx2x_nig_brb_pfc_port_params *pfc_params) 2540 struct bnx2x_nig_brb_pfc_port_params *pfc_params)
2610{ 2541{
2611 /* 2542 /* The PFC and pause are orthogonal to one another, meaning when
2612 * The PFC and pause are orthogonal to one another, meaning when
2613 * PFC is enabled, the pause are disabled, and when PFC is 2543 * PFC is enabled, the pause are disabled, and when PFC is
2614 * disabled, pause are set according to the pause result. 2544 * disabled, pause are set according to the pause result.
2615 */ 2545 */
@@ -3148,7 +3078,6 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
3148 EMAC_MDIO_STATUS_10MB); 3078 EMAC_MDIO_STATUS_10MB);
3149 3079
3150 /* address */ 3080 /* address */
3151
3152 tmp = ((phy->addr << 21) | (devad << 16) | reg | 3081 tmp = ((phy->addr << 21) | (devad << 16) | reg |
3153 EMAC_MDIO_COMM_COMMAND_ADDRESS | 3082 EMAC_MDIO_COMM_COMMAND_ADDRESS |
3154 EMAC_MDIO_COMM_START_BUSY); 3083 EMAC_MDIO_COMM_START_BUSY);
@@ -3337,8 +3266,7 @@ int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
3337 u8 devad, u16 reg, u16 *ret_val) 3266 u8 devad, u16 reg, u16 *ret_val)
3338{ 3267{
3339 u8 phy_index; 3268 u8 phy_index;
3340 /* 3269 /* Probe for the phy according to the given phy_addr, and execute
3341 * Probe for the phy according to the given phy_addr, and execute
3342 * the read request on it 3270 * the read request on it
3343 */ 3271 */
3344 for (phy_index = 0; phy_index < params->num_phys; phy_index++) { 3272 for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
@@ -3355,8 +3283,7 @@ int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
3355 u8 devad, u16 reg, u16 val) 3283 u8 devad, u16 reg, u16 val)
3356{ 3284{
3357 u8 phy_index; 3285 u8 phy_index;
3358 /* 3286 /* Probe for the phy according to the given phy_addr, and execute
3359 * Probe for the phy according to the given phy_addr, and execute
3360 * the write request on it 3287 * the write request on it
3361 */ 3288 */
3362 for (phy_index = 0; phy_index < params->num_phys; phy_index++) { 3289 for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
@@ -3382,7 +3309,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
3382 if (bnx2x_is_4_port_mode(bp)) { 3309 if (bnx2x_is_4_port_mode(bp)) {
3383 u32 port_swap, port_swap_ovr; 3310 u32 port_swap, port_swap_ovr;
3384 3311
3385 /*figure out path swap value */ 3312 /* Figure out path swap value */
3386 path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR); 3313 path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR);
3387 if (path_swap_ovr & 0x1) 3314 if (path_swap_ovr & 0x1)
3388 path_swap = (path_swap_ovr & 0x2); 3315 path_swap = (path_swap_ovr & 0x2);
@@ -3392,7 +3319,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
3392 if (path_swap) 3319 if (path_swap)
3393 path = path ^ 1; 3320 path = path ^ 1;
3394 3321
3395 /*figure out port swap value */ 3322 /* Figure out port swap value */
3396 port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR); 3323 port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR);
3397 if (port_swap_ovr & 0x1) 3324 if (port_swap_ovr & 0x1)
3398 port_swap = (port_swap_ovr & 0x2); 3325 port_swap = (port_swap_ovr & 0x2);
@@ -3405,7 +3332,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
3405 lane = (port<<1) + path; 3332 lane = (port<<1) + path;
3406 } else { /* two port mode - no port swap */ 3333 } else { /* two port mode - no port swap */
3407 3334
3408 /*figure out path swap value */ 3335 /* Figure out path swap value */
3409 path_swap_ovr = 3336 path_swap_ovr =
3410 REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR); 3337 REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR);
3411 if (path_swap_ovr & 0x1) { 3338 if (path_swap_ovr & 0x1) {
@@ -3437,8 +3364,7 @@ static void bnx2x_set_aer_mmd(struct link_params *params,
3437 3364
3438 if (USES_WARPCORE(bp)) { 3365 if (USES_WARPCORE(bp)) {
3439 aer_val = bnx2x_get_warpcore_lane(phy, params); 3366 aer_val = bnx2x_get_warpcore_lane(phy, params);
3440 /* 3367 /* In Dual-lane mode, two lanes are joined together,
3441 * In Dual-lane mode, two lanes are joined together,
3442 * so in order to configure them, the AER broadcast method is 3368 * so in order to configure them, the AER broadcast method is
3443 * used here. 3369 * used here.
3444 * 0x200 is the broadcast address for lanes 0,1 3370 * 0x200 is the broadcast address for lanes 0,1
@@ -3518,8 +3444,7 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
3518{ 3444{
3519 struct bnx2x *bp = params->bp; 3445 struct bnx2x *bp = params->bp;
3520 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; 3446 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
3521 /** 3447 /* Resolve pause mode and advertisement Please refer to Table
3522 * resolve pause mode and advertisement Please refer to Table
3523 * 28B-3 of the 802.3ab-1999 spec 3448 * 28B-3 of the 802.3ab-1999 spec
3524 */ 3449 */
3525 3450
@@ -3642,6 +3567,7 @@ static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
3642 vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE; 3567 vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
3643 if (pause_result & (1<<1)) 3568 if (pause_result & (1<<1))
3644 vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE; 3569 vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
3570
3645} 3571}
3646 3572
3647static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy, 3573static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
@@ -3698,6 +3624,7 @@ static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
3698 bnx2x_pause_resolve(vars, pause_result); 3624 bnx2x_pause_resolve(vars, pause_result);
3699 3625
3700} 3626}
3627
3701static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, 3628static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
3702 struct link_params *params, 3629 struct link_params *params,
3703 struct link_vars *vars) 3630 struct link_vars *vars)
@@ -3819,9 +3746,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3819 3746
3820 /* Advertise pause */ 3747 /* Advertise pause */
3821 bnx2x_ext_phy_set_pause(params, phy, vars); 3748 bnx2x_ext_phy_set_pause(params, phy, vars);
3822 3749 /* Set KR Autoneg Work-Around flag for Warpcore version older than D108
3823 /*
3824 * Set KR Autoneg Work-Around flag for Warpcore version older than D108
3825 */ 3750 */
3826 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3751 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3827 MDIO_WC_REG_UC_INFO_B1_VERSION, &val16); 3752 MDIO_WC_REG_UC_INFO_B1_VERSION, &val16);
@@ -3829,7 +3754,6 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3829 DP(NETIF_MSG_LINK, "Enable AN KR work-around\n"); 3754 DP(NETIF_MSG_LINK, "Enable AN KR work-around\n");
3830 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; 3755 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
3831 } 3756 }
3832
3833 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3757 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3834 MDIO_WC_REG_DIGITAL5_MISC7, &val16); 3758 MDIO_WC_REG_DIGITAL5_MISC7, &val16);
3835 3759
@@ -3903,7 +3827,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
3903 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 3827 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
3904 MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB); 3828 MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB);
3905 3829
3906 /*Enable encoded forced speed */ 3830 /* Enable encoded forced speed */
3907 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3831 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3908 MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30); 3832 MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30);
3909 3833
@@ -4265,8 +4189,7 @@ static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
4265 PORT_HW_CFG_E3_MOD_ABS_MASK) >> 4189 PORT_HW_CFG_E3_MOD_ABS_MASK) >>
4266 PORT_HW_CFG_E3_MOD_ABS_SHIFT; 4190 PORT_HW_CFG_E3_MOD_ABS_SHIFT;
4267 4191
4268 /* 4192 /* Should not happen. This function called upon interrupt
4269 * Should not happen. This function called upon interrupt
4270 * triggered by GPIO ( since EPIO can only generate interrupts 4193 * triggered by GPIO ( since EPIO can only generate interrupts
4271 * to MCP). 4194 * to MCP).
4272 * So if this function was called and none of the GPIOs was set, 4195 * So if this function was called and none of the GPIOs was set,
@@ -4366,7 +4289,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4366 "link up, rx_tx_asic_rst 0x%x\n", 4289 "link up, rx_tx_asic_rst 0x%x\n",
4367 vars->rx_tx_asic_rst); 4290 vars->rx_tx_asic_rst);
4368 } else { 4291 } else {
4369 /*reset the lane to see if link comes up.*/ 4292 /* Reset the lane to see if link comes up.*/
4370 bnx2x_warpcore_reset_lane(bp, phy, 1); 4293 bnx2x_warpcore_reset_lane(bp, phy, 1);
4371 bnx2x_warpcore_reset_lane(bp, phy, 0); 4294 bnx2x_warpcore_reset_lane(bp, phy, 0);
4372 4295
@@ -4387,7 +4310,6 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4387 } /*params->rx_tx_asic_rst*/ 4310 } /*params->rx_tx_asic_rst*/
4388 4311
4389} 4312}
4390
4391static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, 4313static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4392 struct link_params *params, 4314 struct link_params *params,
4393 struct link_vars *vars) 4315 struct link_vars *vars)
@@ -4545,7 +4467,7 @@ static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
4545 /* Update those 1-copy registers */ 4467 /* Update those 1-copy registers */
4546 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 4468 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
4547 MDIO_AER_BLOCK_AER_REG, 0); 4469 MDIO_AER_BLOCK_AER_REG, 0);
4548 /* Enable 1G MDIO (1-copy) */ 4470 /* Enable 1G MDIO (1-copy) */
4549 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4471 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4550 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, 4472 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
4551 &val16); 4473 &val16);
@@ -4624,43 +4546,43 @@ void bnx2x_sync_link(struct link_params *params,
4624 vars->duplex = DUPLEX_FULL; 4546 vars->duplex = DUPLEX_FULL;
4625 switch (vars->link_status & 4547 switch (vars->link_status &
4626 LINK_STATUS_SPEED_AND_DUPLEX_MASK) { 4548 LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
4627 case LINK_10THD: 4549 case LINK_10THD:
4628 vars->duplex = DUPLEX_HALF; 4550 vars->duplex = DUPLEX_HALF;
4629 /* fall thru */ 4551 /* Fall thru */
4630 case LINK_10TFD: 4552 case LINK_10TFD:
4631 vars->line_speed = SPEED_10; 4553 vars->line_speed = SPEED_10;
4632 break; 4554 break;
4633 4555
4634 case LINK_100TXHD: 4556 case LINK_100TXHD:
4635 vars->duplex = DUPLEX_HALF; 4557 vars->duplex = DUPLEX_HALF;
4636 /* fall thru */ 4558 /* Fall thru */
4637 case LINK_100T4: 4559 case LINK_100T4:
4638 case LINK_100TXFD: 4560 case LINK_100TXFD:
4639 vars->line_speed = SPEED_100; 4561 vars->line_speed = SPEED_100;
4640 break; 4562 break;
4641 4563
4642 case LINK_1000THD: 4564 case LINK_1000THD:
4643 vars->duplex = DUPLEX_HALF; 4565 vars->duplex = DUPLEX_HALF;
4644 /* fall thru */ 4566 /* Fall thru */
4645 case LINK_1000TFD: 4567 case LINK_1000TFD:
4646 vars->line_speed = SPEED_1000; 4568 vars->line_speed = SPEED_1000;
4647 break; 4569 break;
4648 4570
4649 case LINK_2500THD: 4571 case LINK_2500THD:
4650 vars->duplex = DUPLEX_HALF; 4572 vars->duplex = DUPLEX_HALF;
4651 /* fall thru */ 4573 /* Fall thru */
4652 case LINK_2500TFD: 4574 case LINK_2500TFD:
4653 vars->line_speed = SPEED_2500; 4575 vars->line_speed = SPEED_2500;
4654 break; 4576 break;
4655 4577
4656 case LINK_10GTFD: 4578 case LINK_10GTFD:
4657 vars->line_speed = SPEED_10000; 4579 vars->line_speed = SPEED_10000;
4658 break; 4580 break;
4659 case LINK_20GTFD: 4581 case LINK_20GTFD:
4660 vars->line_speed = SPEED_20000; 4582 vars->line_speed = SPEED_20000;
4661 break; 4583 break;
4662 default: 4584 default:
4663 break; 4585 break;
4664 } 4586 }
4665 vars->flow_ctrl = 0; 4587 vars->flow_ctrl = 0;
4666 if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED) 4588 if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
@@ -4835,9 +4757,8 @@ static void bnx2x_set_swap_lanes(struct link_params *params,
4835 struct bnx2x_phy *phy) 4757 struct bnx2x_phy *phy)
4836{ 4758{
4837 struct bnx2x *bp = params->bp; 4759 struct bnx2x *bp = params->bp;
4838 /* 4760 /* Each two bits represents a lane number:
4839 * Each two bits represents a lane number: 4761 * No swap is 0123 => 0x1b no need to enable the swap
4840 * No swap is 0123 => 0x1b no need to enable the swap
4841 */ 4762 */
4842 u16 rx_lane_swap, tx_lane_swap; 4763 u16 rx_lane_swap, tx_lane_swap;
4843 4764
@@ -5051,8 +4972,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
5051 MDIO_REG_BANK_COMBO_IEEE0, 4972 MDIO_REG_BANK_COMBO_IEEE0,
5052 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); 4973 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
5053 4974
5054 /* 4975 /* Program speed
5055 * program speed
5056 * - needed only if the speed is greater than 1G (2.5G or 10G) 4976 * - needed only if the speed is greater than 1G (2.5G or 10G)
5057 */ 4977 */
5058 CL22_RD_OVER_CL45(bp, phy, 4978 CL22_RD_OVER_CL45(bp, phy,
@@ -5087,8 +5007,6 @@ static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
5087 struct bnx2x *bp = params->bp; 5007 struct bnx2x *bp = params->bp;
5088 u16 val = 0; 5008 u16 val = 0;
5089 5009
5090 /* configure the 48 bits for BAM AN */
5091
5092 /* set extended capabilities */ 5010 /* set extended capabilities */
5093 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) 5011 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
5094 val |= MDIO_OVER_1G_UP1_2_5G; 5012 val |= MDIO_OVER_1G_UP1_2_5G;
@@ -5234,11 +5152,8 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
5234 } 5152 }
5235} 5153}
5236 5154
5237 5155/* Link management
5238/*
5239 * link management
5240 */ 5156 */
5241
5242static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy, 5157static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
5243 struct link_params *params) 5158 struct link_params *params)
5244{ 5159{
@@ -5383,8 +5298,7 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
5383 "ustat_val(0x8371) = 0x%x\n", ustat_val); 5298 "ustat_val(0x8371) = 0x%x\n", ustat_val);
5384 return; 5299 return;
5385 } 5300 }
5386 /* 5301 /* Step 3: Check CL37 Message Pages received to indicate LP
5387 * Step 3: Check CL37 Message Pages received to indicate LP
5388 * supports only CL37 5302 * supports only CL37
5389 */ 5303 */
5390 CL22_RD_OVER_CL45(bp, phy, 5304 CL22_RD_OVER_CL45(bp, phy,
@@ -5401,8 +5315,7 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
5401 cl37_fsm_received); 5315 cl37_fsm_received);
5402 return; 5316 return;
5403 } 5317 }
5404 /* 5318 /* The combined cl37/cl73 fsm state information indicating that
5405 * The combined cl37/cl73 fsm state information indicating that
5406 * we are connected to a device which does not support cl73, but 5319 * we are connected to a device which does not support cl73, but
5407 * does support cl37 BAM. In this case we disable cl73 and 5320 * does support cl37 BAM. In this case we disable cl73 and
5408 * restart cl37 auto-neg 5321 * restart cl37 auto-neg
@@ -5973,8 +5886,7 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
5973{ 5886{
5974 u32 latch_status = 0; 5887 u32 latch_status = 0;
5975 5888
5976 /* 5889 /* Disable the MI INT ( external phy int ) by writing 1 to the
5977 * Disable the MI INT ( external phy int ) by writing 1 to the
5978 * status register. Link down indication is high-active-signal, 5890 * status register. Link down indication is high-active-signal,
5979 * so in this case we need to write the status to clear the XOR 5891 * so in this case we need to write the status to clear the XOR
5980 */ 5892 */
@@ -6009,8 +5921,7 @@ static void bnx2x_link_int_ack(struct link_params *params,
6009 struct bnx2x *bp = params->bp; 5921 struct bnx2x *bp = params->bp;
6010 u8 port = params->port; 5922 u8 port = params->port;
6011 u32 mask; 5923 u32 mask;
6012 /* 5924 /* First reset all status we assume only one line will be
6013 * First reset all status we assume only one line will be
6014 * change at a time 5925 * change at a time
6015 */ 5926 */
6016 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 5927 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
@@ -6024,8 +5935,7 @@ static void bnx2x_link_int_ack(struct link_params *params,
6024 if (is_10g_plus) 5935 if (is_10g_plus)
6025 mask = NIG_STATUS_XGXS0_LINK10G; 5936 mask = NIG_STATUS_XGXS0_LINK10G;
6026 else if (params->switch_cfg == SWITCH_CFG_10G) { 5937 else if (params->switch_cfg == SWITCH_CFG_10G) {
6027 /* 5938 /* Disable the link interrupt by writing 1 to
6028 * Disable the link interrupt by writing 1 to
6029 * the relevant lane in the status register 5939 * the relevant lane in the status register
6030 */ 5940 */
6031 u32 ser_lane = 5941 u32 ser_lane =
@@ -6227,8 +6137,7 @@ int bnx2x_set_led(struct link_params *params,
6227 break; 6137 break;
6228 6138
6229 case LED_MODE_OPER: 6139 case LED_MODE_OPER:
6230 /* 6140 /* For all other phys, OPER mode is same as ON, so in case
6231 * For all other phys, OPER mode is same as ON, so in case
6232 * link is down, do nothing 6141 * link is down, do nothing
6233 */ 6142 */
6234 if (!vars->link_up) 6143 if (!vars->link_up)
@@ -6239,9 +6148,7 @@ int bnx2x_set_led(struct link_params *params,
6239 (params->phy[EXT_PHY1].type == 6148 (params->phy[EXT_PHY1].type ==
6240 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) && 6149 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) &&
6241 CHIP_IS_E2(bp) && params->num_phys == 2) { 6150 CHIP_IS_E2(bp) && params->num_phys == 2) {
6242 /* 6151 /* This is a work-around for E2+8727 Configurations */
6243 * This is a work-around for E2+8727 Configurations
6244 */
6245 if (mode == LED_MODE_ON || 6152 if (mode == LED_MODE_ON ||
6246 speed == SPEED_10000){ 6153 speed == SPEED_10000){
6247 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); 6154 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
@@ -6250,8 +6157,7 @@ int bnx2x_set_led(struct link_params *params,
6250 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 6157 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
6251 EMAC_WR(bp, EMAC_REG_EMAC_LED, 6158 EMAC_WR(bp, EMAC_REG_EMAC_LED,
6252 (tmp | EMAC_LED_OVERRIDE)); 6159 (tmp | EMAC_LED_OVERRIDE));
6253 /* 6160 /* Return here without enabling traffic
6254 * return here without enabling traffic
6255 * LED blink and setting rate in ON mode. 6161 * LED blink and setting rate in ON mode.
6256 * In oper mode, enabling LED blink 6162 * In oper mode, enabling LED blink
6257 * and setting rate is needed. 6163 * and setting rate is needed.
@@ -6260,8 +6166,7 @@ int bnx2x_set_led(struct link_params *params,
6260 return rc; 6166 return rc;
6261 } 6167 }
6262 } else if (SINGLE_MEDIA_DIRECT(params)) { 6168 } else if (SINGLE_MEDIA_DIRECT(params)) {
6263 /* 6169 /* This is a work-around for HW issue found when link
6264 * This is a work-around for HW issue found when link
6265 * is up in CL73 6170 * is up in CL73
6266 */ 6171 */
6267 if ((!CHIP_IS_E3(bp)) || 6172 if ((!CHIP_IS_E3(bp)) ||
@@ -6310,10 +6215,7 @@ int bnx2x_set_led(struct link_params *params,
6310 (speed == SPEED_1000) || 6215 (speed == SPEED_1000) ||
6311 (speed == SPEED_100) || 6216 (speed == SPEED_100) ||
6312 (speed == SPEED_10))) { 6217 (speed == SPEED_10))) {
6313 /* 6218 /* For speeds less than 10G LED scheme is different */
6314 * On Everest 1 Ax chip versions for speeds less than
6315 * 10G LED scheme is different
6316 */
6317 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 6219 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
6318 + port*4, 1); 6220 + port*4, 1);
6319 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + 6221 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
@@ -6333,8 +6235,7 @@ int bnx2x_set_led(struct link_params *params,
6333 6235
6334} 6236}
6335 6237
6336/* 6238/* This function comes to reflect the actual link state read DIRECTLY from the
6337 * This function comes to reflect the actual link state read DIRECTLY from the
6338 * HW 6239 * HW
6339 */ 6240 */
6340int bnx2x_test_link(struct link_params *params, struct link_vars *vars, 6241int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
@@ -6422,16 +6323,14 @@ static int bnx2x_link_initialize(struct link_params *params,
6422 int rc = 0; 6323 int rc = 0;
6423 u8 phy_index, non_ext_phy; 6324 u8 phy_index, non_ext_phy;
6424 struct bnx2x *bp = params->bp; 6325 struct bnx2x *bp = params->bp;
6425 /* 6326 /* In case of external phy existence, the line speed would be the
6426 * In case of external phy existence, the line speed would be the
6427 * line speed linked up by the external phy. In case it is direct 6327 * line speed linked up by the external phy. In case it is direct
6428 * only, then the line_speed during initialization will be 6328 * only, then the line_speed during initialization will be
6429 * equal to the req_line_speed 6329 * equal to the req_line_speed
6430 */ 6330 */
6431 vars->line_speed = params->phy[INT_PHY].req_line_speed; 6331 vars->line_speed = params->phy[INT_PHY].req_line_speed;
6432 6332
6433 /* 6333 /* Initialize the internal phy in case this is a direct board
6434 * Initialize the internal phy in case this is a direct board
6435 * (no external phys), or this board has external phy which requires 6334 * (no external phys), or this board has external phy which requires
6436 * to first. 6335 * to first.
6437 */ 6336 */
@@ -6463,8 +6362,7 @@ static int bnx2x_link_initialize(struct link_params *params,
6463 } else { 6362 } else {
6464 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 6363 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
6465 phy_index++) { 6364 phy_index++) {
6466 /* 6365 /* No need to initialize second phy in case of first
6467 * No need to initialize second phy in case of first
6468 * phy only selection. In case of second phy, we do 6366 * phy only selection. In case of second phy, we do
6469 * need to initialize the first phy, since they are 6367 * need to initialize the first phy, since they are
6470 * connected. 6368 * connected.
@@ -6492,7 +6390,6 @@ static int bnx2x_link_initialize(struct link_params *params,
6492 NIG_STATUS_XGXS0_LINK_STATUS | 6390 NIG_STATUS_XGXS0_LINK_STATUS |
6493 NIG_STATUS_SERDES0_LINK_STATUS | 6391 NIG_STATUS_SERDES0_LINK_STATUS |
6494 NIG_MASK_MI_INT)); 6392 NIG_MASK_MI_INT));
6495 bnx2x_update_mng(params, vars->link_status);
6496 return rc; 6393 return rc;
6497} 6394}
6498 6395
@@ -6577,7 +6474,7 @@ static int bnx2x_update_link_up(struct link_params *params,
6577 u8 link_10g) 6474 u8 link_10g)
6578{ 6475{
6579 struct bnx2x *bp = params->bp; 6476 struct bnx2x *bp = params->bp;
6580 u8 port = params->port; 6477 u8 phy_idx, port = params->port;
6581 int rc = 0; 6478 int rc = 0;
6582 6479
6583 vars->link_status |= (LINK_STATUS_LINK_UP | 6480 vars->link_status |= (LINK_STATUS_LINK_UP |
@@ -6641,11 +6538,18 @@ static int bnx2x_update_link_up(struct link_params *params,
6641 6538
6642 /* update shared memory */ 6539 /* update shared memory */
6643 bnx2x_update_mng(params, vars->link_status); 6540 bnx2x_update_mng(params, vars->link_status);
6541
6542 /* Check remote fault */
6543 for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
6544 if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
6545 bnx2x_check_half_open_conn(params, vars, 0);
6546 break;
6547 }
6548 }
6644 msleep(20); 6549 msleep(20);
6645 return rc; 6550 return rc;
6646} 6551}
6647/* 6552/* The bnx2x_link_update function should be called upon link
6648 * The bnx2x_link_update function should be called upon link
6649 * interrupt. 6553 * interrupt.
6650 * Link is considered up as follows: 6554 * Link is considered up as follows:
6651 * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs 6555 * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs
@@ -6702,8 +6606,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6702 if (!CHIP_IS_E3(bp)) 6606 if (!CHIP_IS_E3(bp))
6703 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 6607 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6704 6608
6705 /* 6609 /* Step 1:
6706 * Step 1:
6707 * Check external link change only for external phys, and apply 6610 * Check external link change only for external phys, and apply
6708 * priority selection between them in case the link on both phys 6611 * priority selection between them in case the link on both phys
6709 * is up. Note that instead of the common vars, a temporary 6612 * is up. Note that instead of the common vars, a temporary
@@ -6734,23 +6637,20 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6734 switch (bnx2x_phy_selection(params)) { 6637 switch (bnx2x_phy_selection(params)) {
6735 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 6638 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
6736 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 6639 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
6737 /* 6640 /* In this option, the first PHY makes sure to pass the
6738 * In this option, the first PHY makes sure to pass the
6739 * traffic through itself only. 6641 * traffic through itself only.
6740 * Its not clear how to reset the link on the second phy 6642 * Its not clear how to reset the link on the second phy
6741 */ 6643 */
6742 active_external_phy = EXT_PHY1; 6644 active_external_phy = EXT_PHY1;
6743 break; 6645 break;
6744 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 6646 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
6745 /* 6647 /* In this option, the first PHY makes sure to pass the
6746 * In this option, the first PHY makes sure to pass the
6747 * traffic through the second PHY. 6648 * traffic through the second PHY.
6748 */ 6649 */
6749 active_external_phy = EXT_PHY2; 6650 active_external_phy = EXT_PHY2;
6750 break; 6651 break;
6751 default: 6652 default:
6752 /* 6653 /* Link indication on both PHYs with the following cases
6753 * Link indication on both PHYs with the following cases
6754 * is invalid: 6654 * is invalid:
6755 * - FIRST_PHY means that second phy wasn't initialized, 6655 * - FIRST_PHY means that second phy wasn't initialized,
6756 * hence its link is expected to be down 6656 * hence its link is expected to be down
@@ -6767,8 +6667,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6767 } 6667 }
6768 } 6668 }
6769 prev_line_speed = vars->line_speed; 6669 prev_line_speed = vars->line_speed;
6770 /* 6670 /* Step 2:
6771 * Step 2:
6772 * Read the status of the internal phy. In case of 6671 * Read the status of the internal phy. In case of
6773 * DIRECT_SINGLE_MEDIA board, this link is the external link, 6672 * DIRECT_SINGLE_MEDIA board, this link is the external link,
6774 * otherwise this is the link between the 577xx and the first 6673 * otherwise this is the link between the 577xx and the first
@@ -6778,8 +6677,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6778 params->phy[INT_PHY].read_status( 6677 params->phy[INT_PHY].read_status(
6779 &params->phy[INT_PHY], 6678 &params->phy[INT_PHY],
6780 params, vars); 6679 params, vars);
6781 /* 6680 /* The INT_PHY flow control reside in the vars. This include the
6782 * The INT_PHY flow control reside in the vars. This include the
6783 * case where the speed or flow control are not set to AUTO. 6681 * case where the speed or flow control are not set to AUTO.
6784 * Otherwise, the active external phy flow control result is set 6682 * Otherwise, the active external phy flow control result is set
6785 * to the vars. The ext_phy_line_speed is needed to check if the 6683 * to the vars. The ext_phy_line_speed is needed to check if the
@@ -6788,14 +6686,12 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6788 */ 6686 */
6789 if (active_external_phy > INT_PHY) { 6687 if (active_external_phy > INT_PHY) {
6790 vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl; 6688 vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
6791 /* 6689 /* Link speed is taken from the XGXS. AN and FC result from
6792 * Link speed is taken from the XGXS. AN and FC result from
6793 * the external phy. 6690 * the external phy.
6794 */ 6691 */
6795 vars->link_status |= phy_vars[active_external_phy].link_status; 6692 vars->link_status |= phy_vars[active_external_phy].link_status;
6796 6693
6797 /* 6694 /* if active_external_phy is first PHY and link is up - disable
6798 * if active_external_phy is first PHY and link is up - disable
6799 * disable TX on second external PHY 6695 * disable TX on second external PHY
6800 */ 6696 */
6801 if (active_external_phy == EXT_PHY1) { 6697 if (active_external_phy == EXT_PHY1) {
@@ -6832,8 +6728,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6832 DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x," 6728 DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
6833 " ext_phy_line_speed = %d\n", vars->flow_ctrl, 6729 " ext_phy_line_speed = %d\n", vars->flow_ctrl,
6834 vars->link_status, ext_phy_line_speed); 6730 vars->link_status, ext_phy_line_speed);
6835 /* 6731 /* Upon link speed change set the NIG into drain mode. Comes to
6836 * Upon link speed change set the NIG into drain mode. Comes to
6837 * deals with possible FIFO glitch due to clk change when speed 6732 * deals with possible FIFO glitch due to clk change when speed
6838 * is decreased without link down indicator 6733 * is decreased without link down indicator
6839 */ 6734 */
@@ -6858,8 +6753,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6858 6753
6859 bnx2x_link_int_ack(params, vars, link_10g_plus); 6754 bnx2x_link_int_ack(params, vars, link_10g_plus);
6860 6755
6861 /* 6756 /* In case external phy link is up, and internal link is down
6862 * In case external phy link is up, and internal link is down
6863 * (not initialized yet probably after link initialization, it 6757 * (not initialized yet probably after link initialization, it
6864 * needs to be initialized. 6758 * needs to be initialized.
6865 * Note that after link down-up as result of cable plug, the xgxs 6759 * Note that after link down-up as result of cable plug, the xgxs
@@ -6887,8 +6781,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6887 vars); 6781 vars);
6888 } 6782 }
6889 } 6783 }
6890 /* 6784 /* Link is up only if both local phy and external phy (in case of
6891 * Link is up only if both local phy and external phy (in case of
6892 * non-direct board) are up and no fault detected on active PHY. 6785 * non-direct board) are up and no fault detected on active PHY.
6893 */ 6786 */
6894 vars->link_up = (vars->phy_link_up && 6787 vars->link_up = (vars->phy_link_up &&
@@ -6907,6 +6800,10 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6907 else 6800 else
6908 rc = bnx2x_update_link_down(params, vars); 6801 rc = bnx2x_update_link_down(params, vars);
6909 6802
6803 /* Update MCP link status was changed */
6804 if (params->feature_config_flags & FEATURE_CONFIG_BC_SUPPORTS_AFEX)
6805 bnx2x_fw_command(bp, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0);
6806
6910 return rc; 6807 return rc;
6911} 6808}
6912 6809
@@ -7120,8 +7017,7 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
7120 } 7017 }
7121 /* XAUI workaround in 8073 A0: */ 7018 /* XAUI workaround in 8073 A0: */
7122 7019
7123 /* 7020 /* After loading the boot ROM and restarting Autoneg, poll
7124 * After loading the boot ROM and restarting Autoneg, poll
7125 * Dev1, Reg $C820: 7021 * Dev1, Reg $C820:
7126 */ 7022 */
7127 7023
@@ -7130,8 +7026,7 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
7130 MDIO_PMA_DEVAD, 7026 MDIO_PMA_DEVAD,
7131 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, 7027 MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
7132 &val); 7028 &val);
7133 /* 7029 /* If bit [14] = 0 or bit [13] = 0, continue on with
7134 * If bit [14] = 0 or bit [13] = 0, continue on with
7135 * system initialization (XAUI work-around not required, as 7030 * system initialization (XAUI work-around not required, as
7136 * these bits indicate 2.5G or 1G link up). 7031 * these bits indicate 2.5G or 1G link up).
7137 */ 7032 */
@@ -7140,8 +7035,7 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
7140 return 0; 7035 return 0;
7141 } else if (!(val & (1<<15))) { 7036 } else if (!(val & (1<<15))) {
7142 DP(NETIF_MSG_LINK, "bit 15 went off\n"); 7037 DP(NETIF_MSG_LINK, "bit 15 went off\n");
7143 /* 7038 /* If bit 15 is 0, then poll Dev1, Reg $C841 until it's
7144 * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
7145 * MSB (bit15) goes to 1 (indicating that the XAUI 7039 * MSB (bit15) goes to 1 (indicating that the XAUI
7146 * workaround has completed), then continue on with 7040 * workaround has completed), then continue on with
7147 * system initialization. 7041 * system initialization.
@@ -7291,8 +7185,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
7291 val = (1<<7); 7185 val = (1<<7);
7292 } else if (phy->req_line_speed == SPEED_2500) { 7186 } else if (phy->req_line_speed == SPEED_2500) {
7293 val = (1<<5); 7187 val = (1<<5);
7294 /* 7188 /* Note that 2.5G works only when used with 1G
7295 * Note that 2.5G works only when used with 1G
7296 * advertisement 7189 * advertisement
7297 */ 7190 */
7298 } else 7191 } else
@@ -7343,8 +7236,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
7343 /* Add support for CL37 (passive mode) III */ 7236 /* Add support for CL37 (passive mode) III */
7344 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); 7237 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
7345 7238
7346 /* 7239 /* The SNR will improve about 2db by changing BW and FEE main
7347 * The SNR will improve about 2db by changing BW and FEE main
7348 * tap. Rest commands are executed after link is up 7240 * tap. Rest commands are executed after link is up
7349 * Change FFE main cursor to 5 in EDC register 7241 * Change FFE main cursor to 5 in EDC register
7350 */ 7242 */
@@ -7431,8 +7323,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
7431 7323
7432 link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1))); 7324 link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
7433 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) { 7325 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
7434 /* 7326 /* The SNR will improve about 2dbby changing the BW and FEE main
7435 * The SNR will improve about 2dbby changing the BW and FEE main
7436 * tap. The 1st write to change FFE main tap is set before 7327 * tap. The 1st write to change FFE main tap is set before
7437 * restart AN. Change PLL Bandwidth in EDC register 7328 * restart AN. Change PLL Bandwidth in EDC register
7438 */ 7329 */
@@ -7479,8 +7370,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
7479 bnx2x_cl45_read(bp, phy, 7370 bnx2x_cl45_read(bp, phy,
7480 MDIO_XS_DEVAD, 7371 MDIO_XS_DEVAD,
7481 MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1); 7372 MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
7482 /* 7373 /* Set bit 3 to invert Rx in 1G mode and clear this bit
7483 * Set bit 3 to invert Rx in 1G mode and clear this bit
7484 * when it`s in 10G mode. 7374 * when it`s in 10G mode.
7485 */ 7375 */
7486 if (vars->line_speed == SPEED_1000) { 7376 if (vars->line_speed == SPEED_1000) {
@@ -7602,8 +7492,7 @@ static void bnx2x_set_disable_pmd_transmit(struct link_params *params,
7602 u8 pmd_dis) 7492 u8 pmd_dis)
7603{ 7493{
7604 struct bnx2x *bp = params->bp; 7494 struct bnx2x *bp = params->bp;
7605 /* 7495 /* Disable transmitter only for bootcodes which can enable it afterwards
7606 * Disable transmitter only for bootcodes which can enable it afterwards
7607 * (for D3 link) 7496 * (for D3 link)
7608 */ 7497 */
7609 if (pmd_dis) { 7498 if (pmd_dis) {
@@ -7780,9 +7669,6 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7780 u32 data_array[4]; 7669 u32 data_array[4];
7781 u16 addr32; 7670 u16 addr32;
7782 struct bnx2x *bp = params->bp; 7671 struct bnx2x *bp = params->bp;
7783 /*DP(NETIF_MSG_LINK, "bnx2x_direct_read_sfp_module_eeprom:"
7784 " addr %d, cnt %d\n",
7785 addr, byte_cnt);*/
7786 if (byte_cnt > 16) { 7672 if (byte_cnt > 16) {
7787 DP(NETIF_MSG_LINK, 7673 DP(NETIF_MSG_LINK,
7788 "Reading from eeprom is limited to 16 bytes\n"); 7674 "Reading from eeprom is limited to 16 bytes\n");
@@ -7847,8 +7733,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7847 MDIO_PMA_DEVAD, 7733 MDIO_PMA_DEVAD,
7848 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 7734 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
7849 0x8002); 7735 0x8002);
7850 /* 7736 /* Wait appropriate time for two-wire command to finish before
7851 * Wait appropriate time for two-wire command to finish before
7852 * polling the status register 7737 * polling the status register
7853 */ 7738 */
7854 msleep(1); 7739 msleep(1);
@@ -7941,8 +7826,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
7941 { 7826 {
7942 u8 copper_module_type; 7827 u8 copper_module_type;
7943 phy->media_type = ETH_PHY_DA_TWINAX; 7828 phy->media_type = ETH_PHY_DA_TWINAX;
7944 /* 7829 /* Check if its active cable (includes SFP+ module)
7945 * Check if its active cable (includes SFP+ module)
7946 * of passive cable 7830 * of passive cable
7947 */ 7831 */
7948 if (bnx2x_read_sfp_module_eeprom(phy, 7832 if (bnx2x_read_sfp_module_eeprom(phy,
@@ -8019,8 +7903,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8019 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode); 7903 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
8020 return 0; 7904 return 0;
8021} 7905}
8022/* 7906/* This function read the relevant field from the module (SFP+), and verify it
8023 * This function read the relevant field from the module (SFP+), and verify it
8024 * is compliant with this board 7907 * is compliant with this board
8025 */ 7908 */
8026static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy, 7909static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
@@ -8102,8 +7985,7 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
8102 u8 val; 7985 u8 val;
8103 struct bnx2x *bp = params->bp; 7986 struct bnx2x *bp = params->bp;
8104 u16 timeout; 7987 u16 timeout;
8105 /* 7988 /* Initialization time after hot-plug may take up to 300ms for
8106 * Initialization time after hot-plug may take up to 300ms for
8107 * some phys type ( e.g. JDSU ) 7989 * some phys type ( e.g. JDSU )
8108 */ 7990 */
8109 7991
@@ -8125,8 +8007,7 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
8125 u8 is_power_up) { 8007 u8 is_power_up) {
8126 /* Make sure GPIOs are not using for LED mode */ 8008 /* Make sure GPIOs are not using for LED mode */
8127 u16 val; 8009 u16 val;
8128 /* 8010 /* In the GPIO register, bit 4 is use to determine if the GPIOs are
8129 * In the GPIO register, bit 4 is use to determine if the GPIOs are
8130 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for 8011 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
8131 * output 8012 * output
8132 * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0 8013 * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0
@@ -8142,8 +8023,7 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
8142 if (is_power_up) 8023 if (is_power_up)
8143 val = (1<<4); 8024 val = (1<<4);
8144 else 8025 else
8145 /* 8026 /* Set GPIO control to OUTPUT, and set the power bit
8146 * Set GPIO control to OUTPUT, and set the power bit
8147 * to according to the is_power_up 8027 * to according to the is_power_up
8148 */ 8028 */
8149 val = (1<<1); 8029 val = (1<<1);
@@ -8177,8 +8057,7 @@ static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
8177 8057
8178 DP(NETIF_MSG_LINK, "Setting LRM MODE\n"); 8058 DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
8179 8059
8180 /* 8060 /* Changing to LRM mode takes quite few seconds. So do it only
8181 * Changing to LRM mode takes quite few seconds. So do it only
8182 * if current mode is limiting (default is LRM) 8061 * if current mode is limiting (default is LRM)
8183 */ 8062 */
8184 if (cur_limiting_mode != EDC_MODE_LIMITING) 8063 if (cur_limiting_mode != EDC_MODE_LIMITING)
@@ -8313,8 +8192,7 @@ static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
8313 struct bnx2x *bp = params->bp; 8192 struct bnx2x *bp = params->bp;
8314 DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode); 8193 DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode);
8315 if (CHIP_IS_E3(bp)) { 8194 if (CHIP_IS_E3(bp)) {
8316 /* 8195 /* Low ==> if SFP+ module is supported otherwise
8317 * Low ==> if SFP+ module is supported otherwise
8318 * High ==> if SFP+ module is not on the approved vendor list 8196 * High ==> if SFP+ module is not on the approved vendor list
8319 */ 8197 */
8320 bnx2x_set_e3_module_fault_led(params, gpio_mode); 8198 bnx2x_set_e3_module_fault_led(params, gpio_mode);
@@ -8339,8 +8217,7 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
8339 return; 8217 return;
8340 DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n", 8218 DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
8341 power, pin_cfg); 8219 power, pin_cfg);
8342 /* 8220 /* Low ==> corresponding SFP+ module is powered
8343 * Low ==> corresponding SFP+ module is powered
8344 * high ==> the SFP+ module is powered down 8221 * high ==> the SFP+ module is powered down
8345 */ 8222 */
8346 bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1); 8223 bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
@@ -8474,14 +8351,12 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
8474 bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW); 8351 bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
8475 } 8352 }
8476 8353
8477 /* 8354 /* Check and set limiting mode / LRM mode on 8726. On 8727 it
8478 * Check and set limiting mode / LRM mode on 8726. On 8727 it
8479 * is done automatically 8355 * is done automatically
8480 */ 8356 */
8481 bnx2x_set_limiting_mode(params, phy, edc_mode); 8357 bnx2x_set_limiting_mode(params, phy, edc_mode);
8482 8358
8483 /* 8359 /* Enable transmit for this module if the module is approved, or
8484 * Enable transmit for this module if the module is approved, or
8485 * if unapproved modules should also enable the Tx laser 8360 * if unapproved modules should also enable the Tx laser
8486 */ 8361 */
8487 if (rc == 0 || 8362 if (rc == 0 ||
@@ -8536,8 +8411,7 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
8536 bnx2x_set_gpio_int(bp, gpio_num, 8411 bnx2x_set_gpio_int(bp, gpio_num,
8537 MISC_REGISTERS_GPIO_INT_OUTPUT_SET, 8412 MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
8538 gpio_port); 8413 gpio_port);
8539 /* 8414 /* Module was plugged out.
8540 * Module was plugged out.
8541 * Disable transmit for this module 8415 * Disable transmit for this module
8542 */ 8416 */
8543 phy->media_type = ETH_PHY_NOT_PRESENT; 8417 phy->media_type = ETH_PHY_NOT_PRESENT;
@@ -8607,8 +8481,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
8607 8481
8608 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps" 8482 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
8609 " link_status 0x%x\n", rx_sd, pcs_status, val2); 8483 " link_status 0x%x\n", rx_sd, pcs_status, val2);
8610 /* 8484 /* Link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
8611 * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
8612 * are set, or if the autoneg bit 1 is set 8485 * are set, or if the autoneg bit 1 is set
8613 */ 8486 */
8614 link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1))); 8487 link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
@@ -8722,8 +8595,7 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
8722 } 8595 }
8723 bnx2x_save_bcm_spirom_ver(bp, phy, params->port); 8596 bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
8724 8597
8725 /* 8598 /* If TX Laser is controlled by GPIO_0, do not let PHY go into low
8726 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
8727 * power mode, if TX Laser is disabled 8599 * power mode, if TX Laser is disabled
8728 */ 8600 */
8729 8601
@@ -8833,8 +8705,7 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
8833 8705
8834 bnx2x_8726_external_rom_boot(phy, params); 8706 bnx2x_8726_external_rom_boot(phy, params);
8835 8707
8836 /* 8708 /* Need to call module detected on initialization since the module
8837 * Need to call module detected on initialization since the module
8838 * detection triggered by actual module insertion might occur before 8709 * detection triggered by actual module insertion might occur before
8839 * driver is loaded, and when driver is loaded, it reset all 8710 * driver is loaded, and when driver is loaded, it reset all
8840 * registers, including the transmitter 8711 * registers, including the transmitter
@@ -8871,8 +8742,7 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
8871 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); 8742 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
8872 bnx2x_cl45_write(bp, phy, 8743 bnx2x_cl45_write(bp, phy,
8873 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); 8744 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
8874 /* 8745 /* Enable RX-ALARM control to receive interrupt for 1G speed
8875 * Enable RX-ALARM control to receive interrupt for 1G speed
8876 * change 8746 * change
8877 */ 8747 */
8878 bnx2x_cl45_write(bp, phy, 8748 bnx2x_cl45_write(bp, phy,
@@ -8973,8 +8843,7 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
8973 struct link_params *params) { 8843 struct link_params *params) {
8974 u32 swap_val, swap_override; 8844 u32 swap_val, swap_override;
8975 u8 port; 8845 u8 port;
8976 /* 8846 /* The PHY reset is controlled by GPIO 1. Fake the port number
8977 * The PHY reset is controlled by GPIO 1. Fake the port number
8978 * to cancel the swap done in set_gpio() 8847 * to cancel the swap done in set_gpio()
8979 */ 8848 */
8980 struct bnx2x *bp = params->bp; 8849 struct bnx2x *bp = params->bp;
@@ -9012,14 +8881,12 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9012 bnx2x_cl45_write(bp, phy, 8881 bnx2x_cl45_write(bp, phy,
9013 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val); 8882 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val);
9014 8883
9015 /* 8884 /* Initially configure MOD_ABS to interrupt when module is
9016 * Initially configure MOD_ABS to interrupt when module is
9017 * presence( bit 8) 8885 * presence( bit 8)
9018 */ 8886 */
9019 bnx2x_cl45_read(bp, phy, 8887 bnx2x_cl45_read(bp, phy,
9020 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); 8888 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
9021 /* 8889 /* Set EDC off by setting OPTXLOS signal input to low (bit 9).
9022 * Set EDC off by setting OPTXLOS signal input to low (bit 9).
9023 * When the EDC is off it locks onto a reference clock and avoids 8890 * When the EDC is off it locks onto a reference clock and avoids
9024 * becoming 'lost' 8891 * becoming 'lost'
9025 */ 8892 */
@@ -9040,8 +8907,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9040 if (phy->flags & FLAGS_NOC) 8907 if (phy->flags & FLAGS_NOC)
9041 val |= (3<<5); 8908 val |= (3<<5);
9042 8909
9043 /* 8910 /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
9044 * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
9045 * status which reflect SFP+ module over-current 8911 * status which reflect SFP+ module over-current
9046 */ 8912 */
9047 if (!(phy->flags & FLAGS_NOC)) 8913 if (!(phy->flags & FLAGS_NOC))
@@ -9067,8 +8933,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9067 bnx2x_cl45_read(bp, phy, 8933 bnx2x_cl45_read(bp, phy,
9068 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); 8934 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
9069 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1); 8935 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
9070 /* 8936 /* Power down the XAUI until link is up in case of dual-media
9071 * Power down the XAUI until link is up in case of dual-media
9072 * and 1G 8937 * and 1G
9073 */ 8938 */
9074 if (DUAL_MEDIA(params)) { 8939 if (DUAL_MEDIA(params)) {
@@ -9093,8 +8958,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9093 bnx2x_cl45_write(bp, phy, 8958 bnx2x_cl45_write(bp, phy,
9094 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); 8959 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
9095 } else { 8960 } else {
9096 /* 8961 /* Since the 8727 has only single reset pin, need to set the 10G
9097 * Since the 8727 has only single reset pin, need to set the 10G
9098 * registers although it is default 8962 * registers although it is default
9099 */ 8963 */
9100 bnx2x_cl45_write(bp, phy, 8964 bnx2x_cl45_write(bp, phy,
@@ -9109,8 +8973,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9109 0x0008); 8973 0x0008);
9110 } 8974 }
9111 8975
9112 /* 8976 /* Set 2-wire transfer rate of SFP+ module EEPROM
9113 * Set 2-wire transfer rate of SFP+ module EEPROM
9114 * to 100Khz since some DACs(direct attached cables) do 8977 * to 100Khz since some DACs(direct attached cables) do
9115 * not work at 400Khz. 8978 * not work at 400Khz.
9116 */ 8979 */
@@ -9133,8 +8996,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9133 phy->tx_preemphasis[1]); 8996 phy->tx_preemphasis[1]);
9134 } 8997 }
9135 8998
9136 /* 8999 /* If TX Laser is controlled by GPIO_0, do not let PHY go into low
9137 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
9138 * power mode, if TX Laser is disabled 9000 * power mode, if TX Laser is disabled
9139 */ 9001 */
9140 tx_en_mode = REG_RD(bp, params->shmem_base + 9002 tx_en_mode = REG_RD(bp, params->shmem_base +
@@ -9180,8 +9042,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
9180 DP(NETIF_MSG_LINK, 9042 DP(NETIF_MSG_LINK,
9181 "MOD_ABS indication show module is absent\n"); 9043 "MOD_ABS indication show module is absent\n");
9182 phy->media_type = ETH_PHY_NOT_PRESENT; 9044 phy->media_type = ETH_PHY_NOT_PRESENT;
9183 /* 9045 /* 1. Set mod_abs to detect next module
9184 * 1. Set mod_abs to detect next module
9185 * presence event 9046 * presence event
9186 * 2. Set EDC off by setting OPTXLOS signal input to low 9047 * 2. Set EDC off by setting OPTXLOS signal input to low
9187 * (bit 9). 9048 * (bit 9).
@@ -9195,8 +9056,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
9195 MDIO_PMA_DEVAD, 9056 MDIO_PMA_DEVAD,
9196 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 9057 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
9197 9058
9198 /* 9059 /* Clear RX alarm since it stays up as long as
9199 * Clear RX alarm since it stays up as long as
9200 * the mod_abs wasn't changed 9060 * the mod_abs wasn't changed
9201 */ 9061 */
9202 bnx2x_cl45_read(bp, phy, 9062 bnx2x_cl45_read(bp, phy,
@@ -9207,8 +9067,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
9207 /* Module is present */ 9067 /* Module is present */
9208 DP(NETIF_MSG_LINK, 9068 DP(NETIF_MSG_LINK,
9209 "MOD_ABS indication show module is present\n"); 9069 "MOD_ABS indication show module is present\n");
9210 /* 9070 /* First disable transmitter, and if the module is ok, the
9211 * First disable transmitter, and if the module is ok, the
9212 * module_detection will enable it 9071 * module_detection will enable it
9213 * 1. Set mod_abs to detect next module absent event ( bit 8) 9072 * 1. Set mod_abs to detect next module absent event ( bit 8)
9214 * 2. Restore the default polarity of the OPRXLOS signal and 9073 * 2. Restore the default polarity of the OPRXLOS signal and
@@ -9222,8 +9081,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
9222 MDIO_PMA_DEVAD, 9081 MDIO_PMA_DEVAD,
9223 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 9082 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
9224 9083
9225 /* 9084 /* Clear RX alarm since it stays up as long as the mod_abs
9226 * Clear RX alarm since it stays up as long as the mod_abs
9227 * wasn't changed. This is need to be done before calling the 9085 * wasn't changed. This is need to be done before calling the
9228 * module detection, otherwise it will clear* the link update 9086 * module detection, otherwise it will clear* the link update
9229 * alarm 9087 * alarm
@@ -9284,8 +9142,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
9284 bnx2x_cl45_read(bp, phy, 9142 bnx2x_cl45_read(bp, phy,
9285 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); 9143 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
9286 9144
9287 /* 9145 /* If a module is present and there is need to check
9288 * If a module is present and there is need to check
9289 * for over current 9146 * for over current
9290 */ 9147 */
9291 if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) { 9148 if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) {
@@ -9350,8 +9207,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
9350 MDIO_PMA_DEVAD, 9207 MDIO_PMA_DEVAD,
9351 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status); 9208 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
9352 9209
9353 /* 9210 /* Bits 0..2 --> speed detected,
9354 * Bits 0..2 --> speed detected,
9355 * Bits 13..15--> link is down 9211 * Bits 13..15--> link is down
9356 */ 9212 */
9357 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) { 9213 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
@@ -9394,8 +9250,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
9394 bnx2x_cl45_read(bp, phy, 9250 bnx2x_cl45_read(bp, phy,
9395 MDIO_PMA_DEVAD, 9251 MDIO_PMA_DEVAD,
9396 MDIO_PMA_REG_8727_PCS_GP, &val1); 9252 MDIO_PMA_REG_8727_PCS_GP, &val1);
9397 /* 9253 /* In case of dual-media board and 1G, power up the XAUI side,
9398 * In case of dual-media board and 1G, power up the XAUI side,
9399 * otherwise power it down. For 10G it is done automatically 9254 * otherwise power it down. For 10G it is done automatically
9400 */ 9255 */
9401 if (link_up) 9256 if (link_up)
@@ -9561,8 +9416,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9561 /* Save spirom version */ 9416 /* Save spirom version */
9562 bnx2x_save_848xx_spirom_version(phy, bp, params->port); 9417 bnx2x_save_848xx_spirom_version(phy, bp, params->port);
9563 } 9418 }
9564 /* 9419 /* This phy uses the NIG latch mechanism since link indication
9565 * This phy uses the NIG latch mechanism since link indication
9566 * arrives through its LED4 and not via its LASI signal, so we 9420 * arrives through its LED4 and not via its LASI signal, so we
9567 * get steady signal instead of clear on read 9421 * get steady signal instead of clear on read
9568 */ 9422 */
@@ -9667,8 +9521,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9667 if (phy->req_duplex == DUPLEX_FULL) 9521 if (phy->req_duplex == DUPLEX_FULL)
9668 autoneg_val |= (1<<8); 9522 autoneg_val |= (1<<8);
9669 9523
9670 /* 9524 /* Always write this if this is not 84833.
9671 * Always write this if this is not 84833.
9672 * For 84833, write it only when it's a forced speed. 9525 * For 84833, write it only when it's a forced speed.
9673 */ 9526 */
9674 if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || 9527 if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
@@ -9916,8 +9769,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9916 /* Wait for GPHY to come out of reset */ 9769 /* Wait for GPHY to come out of reset */
9917 msleep(50); 9770 msleep(50);
9918 if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { 9771 if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
9919 /* 9772 /* BCM84823 requires that XGXS links up first @ 10G for normal
9920 * BCM84823 requires that XGXS links up first @ 10G for normal
9921 * behavior. 9773 * behavior.
9922 */ 9774 */
9923 u16 temp; 9775 u16 temp;
@@ -10393,8 +10245,7 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10393 break; 10245 break;
10394 } 10246 }
10395 10247
10396 /* 10248 /* This is a workaround for E3+84833 until autoneg
10397 * This is a workaround for E3+84833 until autoneg
10398 * restart is fixed in f/w 10249 * restart is fixed in f/w
10399 */ 10250 */
10400 if (CHIP_IS_E3(bp)) { 10251 if (CHIP_IS_E3(bp)) {
@@ -10418,8 +10269,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10418 DP(NETIF_MSG_LINK, "54618SE cfg init\n"); 10269 DP(NETIF_MSG_LINK, "54618SE cfg init\n");
10419 usleep_range(1000, 1000); 10270 usleep_range(1000, 1000);
10420 10271
10421 /* 10272 /* This works with E3 only, no need to check the chip
10422 * This works with E3 only, no need to check the chip
10423 * before determining the port. 10273 * before determining the port.
10424 */ 10274 */
10425 port = params->port; 10275 port = params->port;
@@ -10441,7 +10291,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10441 MDIO_PMA_REG_CTRL, 0x8000); 10291 MDIO_PMA_REG_CTRL, 0x8000);
10442 bnx2x_wait_reset_complete(bp, phy, params); 10292 bnx2x_wait_reset_complete(bp, phy, params);
10443 10293
10444 /*wait for GPHY to reset */ 10294 /* Wait for GPHY to reset */
10445 msleep(50); 10295 msleep(50);
10446 10296
10447 /* Configure LED4: set to INTR (0x6). */ 10297 /* Configure LED4: set to INTR (0x6). */
@@ -10647,13 +10497,11 @@ static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
10647 u32 cfg_pin; 10497 u32 cfg_pin;
10648 u8 port; 10498 u8 port;
10649 10499
10650 /* 10500 /* In case of no EPIO routed to reset the GPHY, put it
10651 * In case of no EPIO routed to reset the GPHY, put it
10652 * in low power mode. 10501 * in low power mode.
10653 */ 10502 */
10654 bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800); 10503 bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800);
10655 /* 10504 /* This works with E3 only, no need to check the chip
10656 * This works with E3 only, no need to check the chip
10657 * before determining the port. 10505 * before determining the port.
10658 */ 10506 */
10659 port = params->port; 10507 port = params->port;
@@ -10762,7 +10610,7 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
10762 bnx2x_ext_phy_resolve_fc(phy, params, vars); 10610 bnx2x_ext_phy_resolve_fc(phy, params, vars);
10763 10611
10764 if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { 10612 if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
10765 /* report LP advertised speeds */ 10613 /* Report LP advertised speeds */
10766 bnx2x_cl22_read(bp, phy, 0x5, &val); 10614 bnx2x_cl22_read(bp, phy, 0x5, &val);
10767 10615
10768 if (val & (1<<5)) 10616 if (val & (1<<5))
@@ -10827,8 +10675,7 @@ static void bnx2x_54618se_config_loopback(struct bnx2x_phy *phy,
10827 /* This register opens the gate for the UMAC despite its name */ 10675 /* This register opens the gate for the UMAC despite its name */
10828 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); 10676 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
10829 10677
10830 /* 10678 /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame
10831 * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
10832 * length used by the MAC receive logic to check frames. 10679 * length used by the MAC receive logic to check frames.
10833 */ 10680 */
10834 REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); 10681 REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
@@ -11101,22 +10948,23 @@ static struct bnx2x_phy phy_warpcore = {
11101 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, 10948 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
11102 .addr = 0xff, 10949 .addr = 0xff,
11103 .def_md_devad = 0, 10950 .def_md_devad = 0,
11104 .flags = FLAGS_HW_LOCK_REQUIRED, 10951 .flags = (FLAGS_HW_LOCK_REQUIRED |
10952 FLAGS_TX_ERROR_CHECK),
11105 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10953 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11106 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10954 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11107 .mdio_ctrl = 0, 10955 .mdio_ctrl = 0,
11108 .supported = (SUPPORTED_10baseT_Half | 10956 .supported = (SUPPORTED_10baseT_Half |
11109 SUPPORTED_10baseT_Full | 10957 SUPPORTED_10baseT_Full |
11110 SUPPORTED_100baseT_Half | 10958 SUPPORTED_100baseT_Half |
11111 SUPPORTED_100baseT_Full | 10959 SUPPORTED_100baseT_Full |
11112 SUPPORTED_1000baseT_Full | 10960 SUPPORTED_1000baseT_Full |
11113 SUPPORTED_10000baseT_Full | 10961 SUPPORTED_10000baseT_Full |
11114 SUPPORTED_20000baseKR2_Full | 10962 SUPPORTED_20000baseKR2_Full |
11115 SUPPORTED_20000baseMLD2_Full | 10963 SUPPORTED_20000baseMLD2_Full |
11116 SUPPORTED_FIBRE | 10964 SUPPORTED_FIBRE |
11117 SUPPORTED_Autoneg | 10965 SUPPORTED_Autoneg |
11118 SUPPORTED_Pause | 10966 SUPPORTED_Pause |
11119 SUPPORTED_Asym_Pause), 10967 SUPPORTED_Asym_Pause),
11120 .media_type = ETH_PHY_UNSPECIFIED, 10968 .media_type = ETH_PHY_UNSPECIFIED,
11121 .ver_addr = 0, 10969 .ver_addr = 0,
11122 .req_flow_ctrl = 0, 10970 .req_flow_ctrl = 0,
@@ -11258,7 +11106,8 @@ static struct bnx2x_phy phy_8726 = {
11258 .addr = 0xff, 11106 .addr = 0xff,
11259 .def_md_devad = 0, 11107 .def_md_devad = 0,
11260 .flags = (FLAGS_HW_LOCK_REQUIRED | 11108 .flags = (FLAGS_HW_LOCK_REQUIRED |
11261 FLAGS_INIT_XGXS_FIRST), 11109 FLAGS_INIT_XGXS_FIRST |
11110 FLAGS_TX_ERROR_CHECK),
11262 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11111 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11263 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11112 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11264 .mdio_ctrl = 0, 11113 .mdio_ctrl = 0,
@@ -11289,7 +11138,8 @@ static struct bnx2x_phy phy_8727 = {
11289 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 11138 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
11290 .addr = 0xff, 11139 .addr = 0xff,
11291 .def_md_devad = 0, 11140 .def_md_devad = 0,
11292 .flags = FLAGS_FAN_FAILURE_DET_REQ, 11141 .flags = (FLAGS_FAN_FAILURE_DET_REQ |
11142 FLAGS_TX_ERROR_CHECK),
11293 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11143 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11294 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11144 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11295 .mdio_ctrl = 0, 11145 .mdio_ctrl = 0,
@@ -11354,8 +11204,9 @@ static struct bnx2x_phy phy_84823 = {
11354 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823, 11204 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
11355 .addr = 0xff, 11205 .addr = 0xff,
11356 .def_md_devad = 0, 11206 .def_md_devad = 0,
11357 .flags = FLAGS_FAN_FAILURE_DET_REQ | 11207 .flags = (FLAGS_FAN_FAILURE_DET_REQ |
11358 FLAGS_REARM_LATCH_SIGNAL, 11208 FLAGS_REARM_LATCH_SIGNAL |
11209 FLAGS_TX_ERROR_CHECK),
11359 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11210 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11360 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11211 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11361 .mdio_ctrl = 0, 11212 .mdio_ctrl = 0,
@@ -11390,8 +11241,9 @@ static struct bnx2x_phy phy_84833 = {
11390 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833, 11241 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
11391 .addr = 0xff, 11242 .addr = 0xff,
11392 .def_md_devad = 0, 11243 .def_md_devad = 0,
11393 .flags = FLAGS_FAN_FAILURE_DET_REQ | 11244 .flags = (FLAGS_FAN_FAILURE_DET_REQ |
11394 FLAGS_REARM_LATCH_SIGNAL, 11245 FLAGS_REARM_LATCH_SIGNAL |
11246 FLAGS_TX_ERROR_CHECK),
11395 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11247 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11396 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11248 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11397 .mdio_ctrl = 0, 11249 .mdio_ctrl = 0,
@@ -11466,9 +11318,8 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
11466 /* Get the 4 lanes xgxs config rx and tx */ 11318 /* Get the 4 lanes xgxs config rx and tx */
11467 u32 rx = 0, tx = 0, i; 11319 u32 rx = 0, tx = 0, i;
11468 for (i = 0; i < 2; i++) { 11320 for (i = 0; i < 2; i++) {
11469 /* 11321 /* INT_PHY and EXT_PHY1 share the same value location in
11470 * INT_PHY and EXT_PHY1 share the same value location in the 11322 * the shmem. When num_phys is greater than 1, than this value
11471 * shmem. When num_phys is greater than 1, than this value
11472 * applies only to EXT_PHY1 11323 * applies only to EXT_PHY1
11473 */ 11324 */
11474 if (phy_index == INT_PHY || phy_index == EXT_PHY1) { 11325 if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
@@ -11546,8 +11397,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
11546 offsetof(struct shmem_region, dev_info. 11397 offsetof(struct shmem_region, dev_info.
11547 port_hw_config[port].default_cfg)) & 11398 port_hw_config[port].default_cfg)) &
11548 PORT_HW_CFG_NET_SERDES_IF_MASK); 11399 PORT_HW_CFG_NET_SERDES_IF_MASK);
11549 /* 11400 /* Set the appropriate supported and flags indications per
11550 * Set the appropriate supported and flags indications per
11551 * interface type of the chip 11401 * interface type of the chip
11552 */ 11402 */
11553 switch (serdes_net_if) { 11403 switch (serdes_net_if) {
@@ -11605,8 +11455,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
11605 break; 11455 break;
11606 } 11456 }
11607 11457
11608 /* 11458 /* Enable MDC/MDIO work-around for E3 A0 since free running MDC
11609 * Enable MDC/MDIO work-around for E3 A0 since free running MDC
11610 * was not set as expected. For B0, ECO will be enabled so there 11459 * was not set as expected. For B0, ECO will be enabled so there
11611 * won't be an issue there 11460 * won't be an issue there
11612 */ 11461 */
@@ -11719,8 +11568,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
11719 phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config); 11568 phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
11720 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index); 11569 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
11721 11570
11722 /* 11571 /* The shmem address of the phy version is located on different
11723 * The shmem address of the phy version is located on different
11724 * structures. In case this structure is too old, do not set 11572 * structures. In case this structure is too old, do not set
11725 * the address 11573 * the address
11726 */ 11574 */
@@ -11754,8 +11602,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
11754 11602
11755 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && 11603 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
11756 (phy->ver_addr)) { 11604 (phy->ver_addr)) {
11757 /* 11605 /* Remove 100Mb link supported for BCM84833 when phy fw
11758 * Remove 100Mb link supported for BCM84833 when phy fw
11759 * version lower than or equal to 1.39 11606 * version lower than or equal to 1.39
11760 */ 11607 */
11761 u32 raw_ver = REG_RD(bp, phy->ver_addr); 11608 u32 raw_ver = REG_RD(bp, phy->ver_addr);
@@ -11765,8 +11612,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
11765 SUPPORTED_100baseT_Full); 11612 SUPPORTED_100baseT_Full);
11766 } 11613 }
11767 11614
11768 /* 11615 /* In case mdc/mdio_access of the external phy is different than the
11769 * In case mdc/mdio_access of the external phy is different than the
11770 * mdc/mdio access of the XGXS, a HW lock must be taken in each access 11616 * mdc/mdio access of the XGXS, a HW lock must be taken in each access
11771 * to prevent one port interfere with another port's CL45 operations. 11617 * to prevent one port interfere with another port's CL45 operations.
11772 */ 11618 */
@@ -11936,13 +11782,16 @@ int bnx2x_phy_probe(struct link_params *params)
11936 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) 11782 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
11937 break; 11783 break;
11938 11784
11785 if (params->feature_config_flags &
11786 FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET)
11787 phy->flags &= ~FLAGS_TX_ERROR_CHECK;
11788
11939 sync_offset = params->shmem_base + 11789 sync_offset = params->shmem_base +
11940 offsetof(struct shmem_region, 11790 offsetof(struct shmem_region,
11941 dev_info.port_hw_config[params->port].media_type); 11791 dev_info.port_hw_config[params->port].media_type);
11942 media_types = REG_RD(bp, sync_offset); 11792 media_types = REG_RD(bp, sync_offset);
11943 11793
11944 /* 11794 /* Update media type for non-PMF sync only for the first time
11945 * Update media type for non-PMF sync only for the first time
11946 * In case the media type changes afterwards, it will be updated 11795 * In case the media type changes afterwards, it will be updated
11947 * using the update_status function 11796 * using the update_status function
11948 */ 11797 */
@@ -12016,8 +11865,7 @@ void bnx2x_init_xmac_loopback(struct link_params *params,
12016 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 11865 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
12017 vars->mac_type = MAC_TYPE_XMAC; 11866 vars->mac_type = MAC_TYPE_XMAC;
12018 vars->phy_flags = PHY_XGXS_FLAG; 11867 vars->phy_flags = PHY_XGXS_FLAG;
12019 /* 11868 /* Set WC to loopback mode since link is required to provide clock
12020 * Set WC to loopback mode since link is required to provide clock
12021 * to the XMAC in 20G mode 11869 * to the XMAC in 20G mode
12022 */ 11870 */
12023 bnx2x_set_aer_mmd(params, &params->phy[0]); 11871 bnx2x_set_aer_mmd(params, &params->phy[0]);
@@ -12162,6 +12010,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
12162 bnx2x_link_int_enable(params); 12010 bnx2x_link_int_enable(params);
12163 break; 12011 break;
12164 } 12012 }
12013 bnx2x_update_mng(params, vars->link_status);
12165 return 0; 12014 return 0;
12166} 12015}
12167 12016
@@ -12302,7 +12151,8 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
12302 NIG_MASK_MI_INT)); 12151 NIG_MASK_MI_INT));
12303 12152
12304 /* Need to take the phy out of low power mode in order 12153 /* Need to take the phy out of low power mode in order
12305 to write to access its registers */ 12154 * to write to access its registers
12155 */
12306 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 12156 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
12307 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 12157 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
12308 port); 12158 port);
@@ -12350,8 +12200,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
12350 (val | 1<<10)); 12200 (val | 1<<10));
12351 } 12201 }
12352 12202
12353 /* 12203 /* Toggle Transmitter: Power down and then up with 600ms delay
12354 * Toggle Transmitter: Power down and then up with 600ms delay
12355 * between 12204 * between
12356 */ 12205 */
12357 msleep(600); 12206 msleep(600);
@@ -12494,8 +12343,7 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
12494 reset_gpio = MISC_REGISTERS_GPIO_1; 12343 reset_gpio = MISC_REGISTERS_GPIO_1;
12495 port = 1; 12344 port = 1;
12496 12345
12497 /* 12346 /* Retrieve the reset gpio/port which control the reset.
12498 * Retrieve the reset gpio/port which control the reset.
12499 * Default is GPIO1, PORT1 12347 * Default is GPIO1, PORT1
12500 */ 12348 */
12501 bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0], 12349 bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
@@ -12670,8 +12518,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
12670 break; 12518 break;
12671 12519
12672 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 12520 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
12673 /* 12521 /* GPIO1 affects both ports, so there's need to pull
12674 * GPIO1 affects both ports, so there's need to pull
12675 * it for single port alone 12522 * it for single port alone
12676 */ 12523 */
12677 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path, 12524 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
@@ -12679,8 +12526,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
12679 phy_index, chip_id); 12526 phy_index, chip_id);
12680 break; 12527 break;
12681 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: 12528 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
12682 /* 12529 /* GPIO3's are linked, and so both need to be toggled
12683 * GPIO3's are linked, and so both need to be toggled
12684 * to obtain required 2us pulse. 12530 * to obtain required 2us pulse.
12685 */ 12531 */
12686 rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, 12532 rc = bnx2x_84833_common_init_phy(bp, shmem_base_path,
@@ -12779,7 +12625,8 @@ static void bnx2x_check_over_curr(struct link_params *params,
12779} 12625}
12780 12626
12781static void bnx2x_analyze_link_error(struct link_params *params, 12627static void bnx2x_analyze_link_error(struct link_params *params,
12782 struct link_vars *vars, u32 lss_status) 12628 struct link_vars *vars, u32 lss_status,
12629 u8 notify)
12783{ 12630{
12784 struct bnx2x *bp = params->bp; 12631 struct bnx2x *bp = params->bp;
12785 /* Compare new value with previous value */ 12632 /* Compare new value with previous value */
@@ -12793,8 +12640,7 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12793 DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up, 12640 DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up,
12794 half_open_conn, lss_status); 12641 half_open_conn, lss_status);
12795 12642
12796 /* 12643 /* a. Update shmem->link_status accordingly
12797 * a. Update shmem->link_status accordingly
12798 * b. Update link_vars->link_up 12644 * b. Update link_vars->link_up
12799 */ 12645 */
12800 if (lss_status) { 12646 if (lss_status) {
@@ -12802,8 +12648,10 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12802 vars->link_status &= ~LINK_STATUS_LINK_UP; 12648 vars->link_status &= ~LINK_STATUS_LINK_UP;
12803 vars->link_up = 0; 12649 vars->link_up = 0;
12804 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; 12650 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
12805 /* 12651
12806 * Set LED mode to off since the PHY doesn't know about these 12652 /* activate nig drain */
12653 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
12654 /* Set LED mode to off since the PHY doesn't know about these
12807 * errors 12655 * errors
12808 */ 12656 */
12809 led_mode = LED_MODE_OFF; 12657 led_mode = LED_MODE_OFF;
@@ -12813,7 +12661,11 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12813 vars->link_up = 1; 12661 vars->link_up = 1;
12814 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; 12662 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
12815 led_mode = LED_MODE_OPER; 12663 led_mode = LED_MODE_OPER;
12664
12665 /* Clear nig drain */
12666 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
12816 } 12667 }
12668 bnx2x_sync_link(params, vars);
12817 /* Update the LED according to the link state */ 12669 /* Update the LED according to the link state */
12818 bnx2x_set_led(params, vars, led_mode, SPEED_10000); 12670 bnx2x_set_led(params, vars, led_mode, SPEED_10000);
12819 12671
@@ -12822,7 +12674,8 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12822 12674
12823 /* C. Trigger General Attention */ 12675 /* C. Trigger General Attention */
12824 vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT; 12676 vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
12825 bnx2x_notify_link_changed(bp); 12677 if (notify)
12678 bnx2x_notify_link_changed(bp);
12826} 12679}
12827 12680
12828/****************************************************************************** 12681/******************************************************************************
@@ -12834,22 +12687,23 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12834* a fault, for example, due to break in the TX side of fiber. 12687* a fault, for example, due to break in the TX side of fiber.
12835* 12688*
12836******************************************************************************/ 12689******************************************************************************/
12837static void bnx2x_check_half_open_conn(struct link_params *params, 12690int bnx2x_check_half_open_conn(struct link_params *params,
12838 struct link_vars *vars) 12691 struct link_vars *vars,
12692 u8 notify)
12839{ 12693{
12840 struct bnx2x *bp = params->bp; 12694 struct bnx2x *bp = params->bp;
12841 u32 lss_status = 0; 12695 u32 lss_status = 0;
12842 u32 mac_base; 12696 u32 mac_base;
12843 /* In case link status is physically up @ 10G do */ 12697 /* In case link status is physically up @ 10G do */
12844 if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) 12698 if (((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) ||
12845 return; 12699 (REG_RD(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4)))
12700 return 0;
12846 12701
12847 if (CHIP_IS_E3(bp) && 12702 if (CHIP_IS_E3(bp) &&
12848 (REG_RD(bp, MISC_REG_RESET_REG_2) & 12703 (REG_RD(bp, MISC_REG_RESET_REG_2) &
12849 (MISC_REGISTERS_RESET_REG_2_XMAC))) { 12704 (MISC_REGISTERS_RESET_REG_2_XMAC))) {
12850 /* Check E3 XMAC */ 12705 /* Check E3 XMAC */
12851 /* 12706 /* Note that link speed cannot be queried here, since it may be
12852 * Note that link speed cannot be queried here, since it may be
12853 * zero while link is down. In case UMAC is active, LSS will 12707 * zero while link is down. In case UMAC is active, LSS will
12854 * simply not be set 12708 * simply not be set
12855 */ 12709 */
@@ -12863,7 +12717,7 @@ static void bnx2x_check_half_open_conn(struct link_params *params,
12863 if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS)) 12717 if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
12864 lss_status = 1; 12718 lss_status = 1;
12865 12719
12866 bnx2x_analyze_link_error(params, vars, lss_status); 12720 bnx2x_analyze_link_error(params, vars, lss_status, notify);
12867 } else if (REG_RD(bp, MISC_REG_RESET_REG_2) & 12721 } else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
12868 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) { 12722 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
12869 /* Check E1X / E2 BMAC */ 12723 /* Check E1X / E2 BMAC */
@@ -12880,18 +12734,21 @@ static void bnx2x_check_half_open_conn(struct link_params *params,
12880 REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2); 12734 REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
12881 lss_status = (wb_data[0] > 0); 12735 lss_status = (wb_data[0] > 0);
12882 12736
12883 bnx2x_analyze_link_error(params, vars, lss_status); 12737 bnx2x_analyze_link_error(params, vars, lss_status, notify);
12884 } 12738 }
12739 return 0;
12885} 12740}
12886 12741
12887void bnx2x_period_func(struct link_params *params, struct link_vars *vars) 12742void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
12888{ 12743{
12889 struct bnx2x *bp = params->bp;
12890 u16 phy_idx; 12744 u16 phy_idx;
12745 struct bnx2x *bp = params->bp;
12891 for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { 12746 for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
12892 if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) { 12747 if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
12893 bnx2x_set_aer_mmd(params, &params->phy[phy_idx]); 12748 bnx2x_set_aer_mmd(params, &params->phy[phy_idx]);
12894 bnx2x_check_half_open_conn(params, vars); 12749 if (bnx2x_check_half_open_conn(params, vars, 1) !=
12750 0)
12751 DP(NETIF_MSG_LINK, "Fault detection failed\n");
12895 break; 12752 break;
12896 } 12753 }
12897 } 12754 }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 763535ee4832..ea4371f4335f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -254,8 +254,10 @@ struct link_params {
254#define FEATURE_CONFIG_PFC_ENABLED (1<<1) 254#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
255#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) 255#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
256#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3) 256#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
257#define FEATURE_CONFIG_BC_SUPPORTS_AFEX (1<<8)
257#define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9) 258#define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9)
258#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10) 259#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10)
260#define FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET (1<<11)
259 /* Will be populated during common init */ 261 /* Will be populated during common init */
260 struct bnx2x_phy phy[MAX_PHYS]; 262 struct bnx2x_phy phy[MAX_PHYS];
261 263
@@ -495,4 +497,6 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
495 497
496void bnx2x_period_func(struct link_params *params, struct link_vars *vars); 498void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
497 499
500int bnx2x_check_half_open_conn(struct link_params *params,
501 struct link_vars *vars, u8 notify);
498#endif /* BNX2X_LINK_H */ 502#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e077d2508727..f755a665dab3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -39,7 +39,6 @@
39#include <linux/time.h> 39#include <linux/time.h>
40#include <linux/ethtool.h> 40#include <linux/ethtool.h>
41#include <linux/mii.h> 41#include <linux/mii.h>
42#include <linux/if.h>
43#include <linux/if_vlan.h> 42#include <linux/if_vlan.h>
44#include <net/ip.h> 43#include <net/ip.h>
45#include <net/ipv6.h> 44#include <net/ipv6.h>
@@ -93,15 +92,11 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1);
93MODULE_FIRMWARE(FW_FILE_NAME_E1H); 92MODULE_FIRMWARE(FW_FILE_NAME_E1H);
94MODULE_FIRMWARE(FW_FILE_NAME_E2); 93MODULE_FIRMWARE(FW_FILE_NAME_E2);
95 94
96static int multi_mode = 1;
97module_param(multi_mode, int, 0);
98MODULE_PARM_DESC(multi_mode, " Multi queue mode "
99 "(0 Disable; 1 Enable (default))");
100 95
101int num_queues; 96int num_queues;
102module_param(num_queues, int, 0); 97module_param(num_queues, int, 0);
103MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1" 98MODULE_PARM_DESC(num_queues,
104 " (default is as a number of CPUs)"); 99 " Set number of queues (default is as a number of CPUs)");
105 100
106static int disable_tpa; 101static int disable_tpa;
107module_param(disable_tpa, int, 0); 102module_param(disable_tpa, int, 0);
@@ -141,7 +136,9 @@ enum bnx2x_board_type {
141 BCM57810, 136 BCM57810,
142 BCM57810_MF, 137 BCM57810_MF,
143 BCM57840, 138 BCM57840,
144 BCM57840_MF 139 BCM57840_MF,
140 BCM57811,
141 BCM57811_MF
145}; 142};
146 143
147/* indexed by board_type, above */ 144/* indexed by board_type, above */
@@ -158,8 +155,9 @@ static struct {
158 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, 155 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
159 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, 156 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
160 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, 157 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
161 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit " 158 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
162 "Ethernet Multi Function"} 159 { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"},
160 { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"},
163}; 161};
164 162
165#ifndef PCI_DEVICE_ID_NX2_57710 163#ifndef PCI_DEVICE_ID_NX2_57710
@@ -195,6 +193,12 @@ static struct {
195#ifndef PCI_DEVICE_ID_NX2_57840_MF 193#ifndef PCI_DEVICE_ID_NX2_57840_MF
196#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF 194#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
197#endif 195#endif
196#ifndef PCI_DEVICE_ID_NX2_57811
197#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
198#endif
199#ifndef PCI_DEVICE_ID_NX2_57811_MF
200#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
201#endif
198static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { 202static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
199 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, 203 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
200 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, 204 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
@@ -207,6 +211,8 @@ static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
207 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, 211 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
208 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 }, 212 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 },
209 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, 213 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
214 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
215 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
210 { 0 } 216 { 0 }
211}; 217};
212 218
@@ -220,15 +226,15 @@ static LIST_HEAD(bnx2x_prev_list);
220* General service functions 226* General service functions
221****************************************************************************/ 227****************************************************************************/
222 228
223static inline void __storm_memset_dma_mapping(struct bnx2x *bp, 229static void __storm_memset_dma_mapping(struct bnx2x *bp,
224 u32 addr, dma_addr_t mapping) 230 u32 addr, dma_addr_t mapping)
225{ 231{
226 REG_WR(bp, addr, U64_LO(mapping)); 232 REG_WR(bp, addr, U64_LO(mapping));
227 REG_WR(bp, addr + 4, U64_HI(mapping)); 233 REG_WR(bp, addr + 4, U64_HI(mapping));
228} 234}
229 235
230static inline void storm_memset_spq_addr(struct bnx2x *bp, 236static void storm_memset_spq_addr(struct bnx2x *bp,
231 dma_addr_t mapping, u16 abs_fid) 237 dma_addr_t mapping, u16 abs_fid)
232{ 238{
233 u32 addr = XSEM_REG_FAST_MEMORY + 239 u32 addr = XSEM_REG_FAST_MEMORY +
234 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); 240 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
@@ -236,8 +242,8 @@ static inline void storm_memset_spq_addr(struct bnx2x *bp,
236 __storm_memset_dma_mapping(bp, addr, mapping); 242 __storm_memset_dma_mapping(bp, addr, mapping);
237} 243}
238 244
239static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 245static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
240 u16 pf_id) 246 u16 pf_id)
241{ 247{
242 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 248 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
243 pf_id); 249 pf_id);
@@ -249,8 +255,8 @@ static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
249 pf_id); 255 pf_id);
250} 256}
251 257
252static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 258static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
253 u8 enable) 259 u8 enable)
254{ 260{
255 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 261 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
256 enable); 262 enable);
@@ -262,8 +268,8 @@ static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
262 enable); 268 enable);
263} 269}
264 270
265static inline void storm_memset_eq_data(struct bnx2x *bp, 271static void storm_memset_eq_data(struct bnx2x *bp,
266 struct event_ring_data *eq_data, 272 struct event_ring_data *eq_data,
267 u16 pfid) 273 u16 pfid)
268{ 274{
269 size_t size = sizeof(struct event_ring_data); 275 size_t size = sizeof(struct event_ring_data);
@@ -273,8 +279,8 @@ static inline void storm_memset_eq_data(struct bnx2x *bp,
273 __storm_memset_struct(bp, addr, size, (u32 *)eq_data); 279 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
274} 280}
275 281
276static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, 282static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
277 u16 pfid) 283 u16 pfid)
278{ 284{
279 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid); 285 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
280 REG_WR16(bp, addr, eq_prod); 286 REG_WR16(bp, addr, eq_prod);
@@ -309,67 +315,6 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
309#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" 315#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
310#define DMAE_DP_DST_NONE "dst_addr [none]" 316#define DMAE_DP_DST_NONE "dst_addr [none]"
311 317
312static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
313 int msglvl)
314{
315 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
316
317 switch (dmae->opcode & DMAE_COMMAND_DST) {
318 case DMAE_CMD_DST_PCI:
319 if (src_type == DMAE_CMD_SRC_PCI)
320 DP(msglvl, "DMAE: opcode 0x%08x\n"
321 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
322 "comp_addr [%x:%08x], comp_val 0x%08x\n",
323 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
324 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
325 dmae->comp_addr_hi, dmae->comp_addr_lo,
326 dmae->comp_val);
327 else
328 DP(msglvl, "DMAE: opcode 0x%08x\n"
329 "src [%08x], len [%d*4], dst [%x:%08x]\n"
330 "comp_addr [%x:%08x], comp_val 0x%08x\n",
331 dmae->opcode, dmae->src_addr_lo >> 2,
332 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
333 dmae->comp_addr_hi, dmae->comp_addr_lo,
334 dmae->comp_val);
335 break;
336 case DMAE_CMD_DST_GRC:
337 if (src_type == DMAE_CMD_SRC_PCI)
338 DP(msglvl, "DMAE: opcode 0x%08x\n"
339 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
340 "comp_addr [%x:%08x], comp_val 0x%08x\n",
341 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
342 dmae->len, dmae->dst_addr_lo >> 2,
343 dmae->comp_addr_hi, dmae->comp_addr_lo,
344 dmae->comp_val);
345 else
346 DP(msglvl, "DMAE: opcode 0x%08x\n"
347 "src [%08x], len [%d*4], dst [%08x]\n"
348 "comp_addr [%x:%08x], comp_val 0x%08x\n",
349 dmae->opcode, dmae->src_addr_lo >> 2,
350 dmae->len, dmae->dst_addr_lo >> 2,
351 dmae->comp_addr_hi, dmae->comp_addr_lo,
352 dmae->comp_val);
353 break;
354 default:
355 if (src_type == DMAE_CMD_SRC_PCI)
356 DP(msglvl, "DMAE: opcode 0x%08x\n"
357 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
358 "comp_addr [%x:%08x] comp_val 0x%08x\n",
359 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
360 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
361 dmae->comp_val);
362 else
363 DP(msglvl, "DMAE: opcode 0x%08x\n"
364 "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
365 "comp_addr [%x:%08x] comp_val 0x%08x\n",
366 dmae->opcode, dmae->src_addr_lo >> 2,
367 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
368 dmae->comp_val);
369 break;
370 }
371
372}
373 318
374/* copy command into DMAE command memory and set DMAE command go */ 319/* copy command into DMAE command memory and set DMAE command go */
375void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) 320void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
@@ -506,8 +451,6 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
506 dmae.dst_addr_hi = 0; 451 dmae.dst_addr_hi = 0;
507 dmae.len = len32; 452 dmae.len = len32;
508 453
509 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
510
511 /* issue the command and wait for completion */ 454 /* issue the command and wait for completion */
512 bnx2x_issue_dmae_with_comp(bp, &dmae); 455 bnx2x_issue_dmae_with_comp(bp, &dmae);
513} 456}
@@ -540,8 +483,6 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
540 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); 483 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
541 dmae.len = len32; 484 dmae.len = len32;
542 485
543 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
544
545 /* issue the command and wait for completion */ 486 /* issue the command and wait for completion */
546 bnx2x_issue_dmae_with_comp(bp, &dmae); 487 bnx2x_issue_dmae_with_comp(bp, &dmae);
547} 488}
@@ -562,27 +503,6 @@ static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
562 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); 503 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
563} 504}
564 505
565/* used only for slowpath so not inlined */
566static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
567{
568 u32 wb_write[2];
569
570 wb_write[0] = val_hi;
571 wb_write[1] = val_lo;
572 REG_WR_DMAE(bp, reg, wb_write, 2);
573}
574
575#ifdef USE_WB_RD
576static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
577{
578 u32 wb_data[2];
579
580 REG_RD_DMAE(bp, reg, wb_data, 2);
581
582 return HILO_U64(wb_data[0], wb_data[1]);
583}
584#endif
585
586static int bnx2x_mc_assert(struct bnx2x *bp) 506static int bnx2x_mc_assert(struct bnx2x *bp)
587{ 507{
588 char last_idx; 508 char last_idx;
@@ -756,7 +676,7 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
756 printk("%s" "end of fw dump\n", lvl); 676 printk("%s" "end of fw dump\n", lvl);
757} 677}
758 678
759static inline void bnx2x_fw_dump(struct bnx2x *bp) 679static void bnx2x_fw_dump(struct bnx2x *bp)
760{ 680{
761 bnx2x_fw_dump_lvl(bp, KERN_ERR); 681 bnx2x_fw_dump_lvl(bp, KERN_ERR);
762} 682}
@@ -1076,8 +996,8 @@ static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1076 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 996 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1077} 997}
1078 998
1079static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, 999static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1080 u32 expected, u32 poll_count) 1000 u32 expected, u32 poll_count)
1081{ 1001{
1082 u32 cur_cnt = poll_count; 1002 u32 cur_cnt = poll_count;
1083 u32 val; 1003 u32 val;
@@ -1088,8 +1008,8 @@ static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1088 return val; 1008 return val;
1089} 1009}
1090 1010
1091static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, 1011static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1092 char *msg, u32 poll_cnt) 1012 char *msg, u32 poll_cnt)
1093{ 1013{
1094 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); 1014 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1095 if (val != 0) { 1015 if (val != 0) {
@@ -1186,7 +1106,7 @@ static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1186 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 1106 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1187 1107
1188 1108
1189static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, 1109static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
1190 u32 poll_cnt) 1110 u32 poll_cnt)
1191{ 1111{
1192 struct sdm_op_gen op_gen = {0}; 1112 struct sdm_op_gen op_gen = {0};
@@ -1220,7 +1140,7 @@ static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
1220 return ret; 1140 return ret;
1221} 1141}
1222 1142
1223static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev) 1143static u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1224{ 1144{
1225 int pos; 1145 int pos;
1226 u16 status; 1146 u16 status;
@@ -1361,14 +1281,17 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
1361 int port = BP_PORT(bp); 1281 int port = BP_PORT(bp);
1362 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1282 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1363 u32 val = REG_RD(bp, addr); 1283 u32 val = REG_RD(bp, addr);
1364 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 1284 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1365 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0; 1285 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1286 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1366 1287
1367 if (msix) { 1288 if (msix) {
1368 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1289 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1369 HC_CONFIG_0_REG_INT_LINE_EN_0); 1290 HC_CONFIG_0_REG_INT_LINE_EN_0);
1370 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1291 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1371 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1292 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1293 if (single_msix)
1294 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1372 } else if (msi) { 1295 } else if (msi) {
1373 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 1296 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1374 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1297 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
@@ -1425,8 +1348,9 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
1425static void bnx2x_igu_int_enable(struct bnx2x *bp) 1348static void bnx2x_igu_int_enable(struct bnx2x *bp)
1426{ 1349{
1427 u32 val; 1350 u32 val;
1428 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 1351 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1429 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0; 1352 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1353 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1430 1354
1431 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 1355 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1432 1356
@@ -1436,6 +1360,9 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
1436 val |= (IGU_PF_CONF_FUNC_EN | 1360 val |= (IGU_PF_CONF_FUNC_EN |
1437 IGU_PF_CONF_MSI_MSIX_EN | 1361 IGU_PF_CONF_MSI_MSIX_EN |
1438 IGU_PF_CONF_ATTN_BIT_EN); 1362 IGU_PF_CONF_ATTN_BIT_EN);
1363
1364 if (single_msix)
1365 val |= IGU_PF_CONF_SINGLE_ISR_EN;
1439 } else if (msi) { 1366 } else if (msi) {
1440 val &= ~IGU_PF_CONF_INT_LINE_EN; 1367 val &= ~IGU_PF_CONF_INT_LINE_EN;
1441 val |= (IGU_PF_CONF_FUNC_EN | 1368 val |= (IGU_PF_CONF_FUNC_EN |
@@ -1455,6 +1382,9 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
1455 1382
1456 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 1383 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1457 1384
1385 if (val & IGU_PF_CONF_INT_LINE_EN)
1386 pci_intx(bp->pdev, true);
1387
1458 barrier(); 1388 barrier();
1459 1389
1460 /* init leading/trailing edge */ 1390 /* init leading/trailing edge */
@@ -1623,7 +1553,7 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1623 * Returns the recovery leader resource id according to the engine this function 1553 * Returns the recovery leader resource id according to the engine this function
1624 * belongs to. Currently only only 2 engines is supported. 1554 * belongs to. Currently only only 2 engines is supported.
1625 */ 1555 */
1626static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp) 1556static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1627{ 1557{
1628 if (BP_PATH(bp)) 1558 if (BP_PATH(bp))
1629 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; 1559 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
@@ -1636,9 +1566,9 @@ static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1636 * 1566 *
1637 * @bp: driver handle 1567 * @bp: driver handle
1638 * 1568 *
1639 * Tries to aquire a leader lock for cuurent engine. 1569 * Tries to aquire a leader lock for current engine.
1640 */ 1570 */
1641static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp) 1571static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1642{ 1572{
1643 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 1573 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1644} 1574}
@@ -1719,6 +1649,27 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1719 1649
1720 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); 1650 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1721 1651
1652 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1653 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1654 /* if Q update ramrod is completed for last Q in AFEX vif set
1655 * flow, then ACK MCP at the end
1656 *
1657 * mark pending ACK to MCP bit.
1658 * prevent case that both bits are cleared.
1659 * At the end of load/unload driver checks that
1660 * sp_state is cleaerd, and this order prevents
1661 * races
1662 */
1663 smp_mb__before_clear_bit();
1664 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1665 wmb();
1666 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1667 smp_mb__after_clear_bit();
1668
1669 /* schedule workqueue to send ack to MCP */
1670 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1671 }
1672
1722 return; 1673 return;
1723} 1674}
1724 1675
@@ -2229,40 +2180,6 @@ u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2229 return rc; 2180 return rc;
2230} 2181}
2231 2182
2232static void bnx2x_init_port_minmax(struct bnx2x *bp)
2233{
2234 u32 r_param = bp->link_vars.line_speed / 8;
2235 u32 fair_periodic_timeout_usec;
2236 u32 t_fair;
2237
2238 memset(&(bp->cmng.rs_vars), 0,
2239 sizeof(struct rate_shaping_vars_per_port));
2240 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2241
2242 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2243 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2244
2245 /* this is the threshold below which no timer arming will occur
2246 1.25 coefficient is for the threshold to be a little bigger
2247 than the real time, to compensate for timer in-accuracy */
2248 bp->cmng.rs_vars.rs_threshold =
2249 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2250
2251 /* resolution of fairness timer */
2252 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2253 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2254 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2255
2256 /* this is the threshold below which we won't arm the timer anymore */
2257 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2258
2259 /* we multiply by 1e3/8 to get bytes/msec.
2260 We don't want the credits to pass a credit
2261 of the t_fair*FAIR_MEM (algorithm resolution) */
2262 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2263 /* since each tick is 4 usec */
2264 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2265}
2266 2183
2267/* Calculates the sum of vn_min_rates. 2184/* Calculates the sum of vn_min_rates.
2268 It's needed for further normalizing of the min_rates. 2185 It's needed for further normalizing of the min_rates.
@@ -2273,12 +2190,12 @@ static void bnx2x_init_port_minmax(struct bnx2x *bp)
2273 In the later case fainess algorithm should be deactivated. 2190 In the later case fainess algorithm should be deactivated.
2274 If not all min_rates are zero then those that are zeroes will be set to 1. 2191 If not all min_rates are zero then those that are zeroes will be set to 1.
2275 */ 2192 */
2276static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) 2193static void bnx2x_calc_vn_min(struct bnx2x *bp,
2194 struct cmng_init_input *input)
2277{ 2195{
2278 int all_zero = 1; 2196 int all_zero = 1;
2279 int vn; 2197 int vn;
2280 2198
2281 bp->vn_weight_sum = 0;
2282 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2199 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2283 u32 vn_cfg = bp->mf_config[vn]; 2200 u32 vn_cfg = bp->mf_config[vn];
2284 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2201 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
@@ -2286,106 +2203,56 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2286 2203
2287 /* Skip hidden vns */ 2204 /* Skip hidden vns */
2288 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) 2205 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2289 continue; 2206 vn_min_rate = 0;
2290
2291 /* If min rate is zero - set it to 1 */ 2207 /* If min rate is zero - set it to 1 */
2292 if (!vn_min_rate) 2208 else if (!vn_min_rate)
2293 vn_min_rate = DEF_MIN_RATE; 2209 vn_min_rate = DEF_MIN_RATE;
2294 else 2210 else
2295 all_zero = 0; 2211 all_zero = 0;
2296 2212
2297 bp->vn_weight_sum += vn_min_rate; 2213 input->vnic_min_rate[vn] = vn_min_rate;
2298 } 2214 }
2299 2215
2300 /* if ETS or all min rates are zeros - disable fairness */ 2216 /* if ETS or all min rates are zeros - disable fairness */
2301 if (BNX2X_IS_ETS_ENABLED(bp)) { 2217 if (BNX2X_IS_ETS_ENABLED(bp)) {
2302 bp->cmng.flags.cmng_enables &= 2218 input->flags.cmng_enables &=
2303 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2219 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2304 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); 2220 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2305 } else if (all_zero) { 2221 } else if (all_zero) {
2306 bp->cmng.flags.cmng_enables &= 2222 input->flags.cmng_enables &=
2307 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2223 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2308 DP(NETIF_MSG_IFUP, "All MIN values are zeroes" 2224 DP(NETIF_MSG_IFUP,
2309 " fairness will be disabled\n"); 2225 "All MIN values are zeroes fairness will be disabled\n");
2310 } else 2226 } else
2311 bp->cmng.flags.cmng_enables |= 2227 input->flags.cmng_enables |=
2312 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2228 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2313} 2229}
2314 2230
2315static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) 2231static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2232 struct cmng_init_input *input)
2316{ 2233{
2317 struct rate_shaping_vars_per_vn m_rs_vn; 2234 u16 vn_max_rate;
2318 struct fairness_vars_per_vn m_fair_vn;
2319 u32 vn_cfg = bp->mf_config[vn]; 2235 u32 vn_cfg = bp->mf_config[vn];
2320 int func = func_by_vn(bp, vn);
2321 u16 vn_min_rate, vn_max_rate;
2322 int i;
2323 2236
2324 /* If function is hidden - set min and max to zeroes */ 2237 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2325 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2326 vn_min_rate = 0;
2327 vn_max_rate = 0; 2238 vn_max_rate = 0;
2328 2239 else {
2329 } else {
2330 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); 2240 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2331 2241
2332 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2242 if (IS_MF_SI(bp)) {
2333 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2334 /* If fairness is enabled (not all min rates are zeroes) and
2335 if current min rate is zero - set it to 1.
2336 This is a requirement of the algorithm. */
2337 if (bp->vn_weight_sum && (vn_min_rate == 0))
2338 vn_min_rate = DEF_MIN_RATE;
2339
2340 if (IS_MF_SI(bp))
2341 /* maxCfg in percents of linkspeed */ 2243 /* maxCfg in percents of linkspeed */
2342 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; 2244 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2343 else 2245 } else /* SD modes */
2344 /* maxCfg is absolute in 100Mb units */ 2246 /* maxCfg is absolute in 100Mb units */
2345 vn_max_rate = maxCfg * 100; 2247 vn_max_rate = maxCfg * 100;
2346 } 2248 }
2347 2249
2348 DP(NETIF_MSG_IFUP, 2250 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2349 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n", 2251
2350 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum); 2252 input->vnic_max_rate[vn] = vn_max_rate;
2351
2352 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2353 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2354
2355 /* global vn counter - maximal Mbps for this vn */
2356 m_rs_vn.vn_counter.rate = vn_max_rate;
2357
2358 /* quota - number of bytes transmitted in this period */
2359 m_rs_vn.vn_counter.quota =
2360 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2361
2362 if (bp->vn_weight_sum) {
2363 /* credit for each period of the fairness algorithm:
2364 number of bytes in T_FAIR (the vn share the port rate).
2365 vn_weight_sum should not be larger than 10000, thus
2366 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2367 than zero */
2368 m_fair_vn.vn_credit_delta =
2369 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2370 (8 * bp->vn_weight_sum))),
2371 (bp->cmng.fair_vars.fair_threshold +
2372 MIN_ABOVE_THRESH));
2373 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2374 m_fair_vn.vn_credit_delta);
2375 }
2376
2377 /* Store it to internal memory */
2378 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2379 REG_WR(bp, BAR_XSTRORM_INTMEM +
2380 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2381 ((u32 *)(&m_rs_vn))[i]);
2382
2383 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2384 REG_WR(bp, BAR_XSTRORM_INTMEM +
2385 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2386 ((u32 *)(&m_fair_vn))[i]);
2387} 2253}
2388 2254
2255
2389static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) 2256static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2390{ 2257{
2391 if (CHIP_REV_IS_SLOW(bp)) 2258 if (CHIP_REV_IS_SLOW(bp))
@@ -2423,38 +2290,42 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
2423 bp->mf_config[vn] = 2290 bp->mf_config[vn] =
2424 MF_CFG_RD(bp, func_mf_config[func].config); 2291 MF_CFG_RD(bp, func_mf_config[func].config);
2425 } 2292 }
2293 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2294 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2295 bp->flags |= MF_FUNC_DIS;
2296 } else {
2297 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2298 bp->flags &= ~MF_FUNC_DIS;
2299 }
2426} 2300}
2427 2301
2428static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) 2302static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2429{ 2303{
2304 struct cmng_init_input input;
2305 memset(&input, 0, sizeof(struct cmng_init_input));
2306
2307 input.port_rate = bp->link_vars.line_speed;
2430 2308
2431 if (cmng_type == CMNG_FNS_MINMAX) { 2309 if (cmng_type == CMNG_FNS_MINMAX) {
2432 int vn; 2310 int vn;
2433 2311
2434 /* clear cmng_enables */
2435 bp->cmng.flags.cmng_enables = 0;
2436
2437 /* read mf conf from shmem */ 2312 /* read mf conf from shmem */
2438 if (read_cfg) 2313 if (read_cfg)
2439 bnx2x_read_mf_cfg(bp); 2314 bnx2x_read_mf_cfg(bp);
2440 2315
2441 /* Init rate shaping and fairness contexts */
2442 bnx2x_init_port_minmax(bp);
2443
2444 /* vn_weight_sum and enable fairness if not 0 */ 2316 /* vn_weight_sum and enable fairness if not 0 */
2445 bnx2x_calc_vn_weight_sum(bp); 2317 bnx2x_calc_vn_min(bp, &input);
2446 2318
2447 /* calculate and set min-max rate for each vn */ 2319 /* calculate and set min-max rate for each vn */
2448 if (bp->port.pmf) 2320 if (bp->port.pmf)
2449 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) 2321 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2450 bnx2x_init_vn_minmax(bp, vn); 2322 bnx2x_calc_vn_max(bp, vn, &input);
2451 2323
2452 /* always enable rate shaping and fairness */ 2324 /* always enable rate shaping and fairness */
2453 bp->cmng.flags.cmng_enables |= 2325 input.flags.cmng_enables |=
2454 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 2326 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2455 if (!bp->vn_weight_sum) 2327
2456 DP(NETIF_MSG_IFUP, "All MIN values are zeroes" 2328 bnx2x_init_cmng(&input, &bp->cmng);
2457 " fairness will be disabled\n");
2458 return; 2329 return;
2459 } 2330 }
2460 2331
@@ -2463,6 +2334,35 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2463 "rate shaping and fairness are disabled\n"); 2334 "rate shaping and fairness are disabled\n");
2464} 2335}
2465 2336
2337static void storm_memset_cmng(struct bnx2x *bp,
2338 struct cmng_init *cmng,
2339 u8 port)
2340{
2341 int vn;
2342 size_t size = sizeof(struct cmng_struct_per_port);
2343
2344 u32 addr = BAR_XSTRORM_INTMEM +
2345 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2346
2347 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2348
2349 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2350 int func = func_by_vn(bp, vn);
2351
2352 addr = BAR_XSTRORM_INTMEM +
2353 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2354 size = sizeof(struct rate_shaping_vars_per_vn);
2355 __storm_memset_struct(bp, addr, size,
2356 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2357
2358 addr = BAR_XSTRORM_INTMEM +
2359 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2360 size = sizeof(struct fairness_vars_per_vn);
2361 __storm_memset_struct(bp, addr, size,
2362 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2363 }
2364}
2365
2466/* This function is called upon link interrupt */ 2366/* This function is called upon link interrupt */
2467static void bnx2x_link_attn(struct bnx2x *bp) 2367static void bnx2x_link_attn(struct bnx2x *bp)
2468{ 2368{
@@ -2535,6 +2435,190 @@ void bnx2x__link_status_update(struct bnx2x *bp)
2535 bnx2x_link_report(bp); 2435 bnx2x_link_report(bp);
2536} 2436}
2537 2437
2438static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2439 u16 vlan_val, u8 allowed_prio)
2440{
2441 struct bnx2x_func_state_params func_params = {0};
2442 struct bnx2x_func_afex_update_params *f_update_params =
2443 &func_params.params.afex_update;
2444
2445 func_params.f_obj = &bp->func_obj;
2446 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2447
2448 /* no need to wait for RAMROD completion, so don't
2449 * set RAMROD_COMP_WAIT flag
2450 */
2451
2452 f_update_params->vif_id = vifid;
2453 f_update_params->afex_default_vlan = vlan_val;
2454 f_update_params->allowed_priorities = allowed_prio;
2455
2456 /* if ramrod can not be sent, response to MCP immediately */
2457 if (bnx2x_func_state_change(bp, &func_params) < 0)
2458 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2459
2460 return 0;
2461}
2462
2463static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2464 u16 vif_index, u8 func_bit_map)
2465{
2466 struct bnx2x_func_state_params func_params = {0};
2467 struct bnx2x_func_afex_viflists_params *update_params =
2468 &func_params.params.afex_viflists;
2469 int rc;
2470 u32 drv_msg_code;
2471
2472 /* validate only LIST_SET and LIST_GET are received from switch */
2473 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2474 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2475 cmd_type);
2476
2477 func_params.f_obj = &bp->func_obj;
2478 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2479
2480 /* set parameters according to cmd_type */
2481 update_params->afex_vif_list_command = cmd_type;
2482 update_params->vif_list_index = cpu_to_le16(vif_index);
2483 update_params->func_bit_map =
2484 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2485 update_params->func_to_clear = 0;
2486 drv_msg_code =
2487 (cmd_type == VIF_LIST_RULE_GET) ?
2488 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2489 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2490
2491 /* if ramrod can not be sent, respond to MCP immediately for
2492 * SET and GET requests (other are not triggered from MCP)
2493 */
2494 rc = bnx2x_func_state_change(bp, &func_params);
2495 if (rc < 0)
2496 bnx2x_fw_command(bp, drv_msg_code, 0);
2497
2498 return 0;
2499}
2500
2501static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2502{
2503 struct afex_stats afex_stats;
2504 u32 func = BP_ABS_FUNC(bp);
2505 u32 mf_config;
2506 u16 vlan_val;
2507 u32 vlan_prio;
2508 u16 vif_id;
2509 u8 allowed_prio;
2510 u8 vlan_mode;
2511 u32 addr_to_write, vifid, addrs, stats_type, i;
2512
2513 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2514 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2515 DP(BNX2X_MSG_MCP,
2516 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2517 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2518 }
2519
2520 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2521 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2522 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2523 DP(BNX2X_MSG_MCP,
2524 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2525 vifid, addrs);
2526 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2527 addrs);
2528 }
2529
2530 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2531 addr_to_write = SHMEM2_RD(bp,
2532 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2533 stats_type = SHMEM2_RD(bp,
2534 afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2535
2536 DP(BNX2X_MSG_MCP,
2537 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2538 addr_to_write);
2539
2540 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2541
2542 /* write response to scratchpad, for MCP */
2543 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2544 REG_WR(bp, addr_to_write + i*sizeof(u32),
2545 *(((u32 *)(&afex_stats))+i));
2546
2547 /* send ack message to MCP */
2548 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2549 }
2550
2551 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2552 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2553 bp->mf_config[BP_VN(bp)] = mf_config;
2554 DP(BNX2X_MSG_MCP,
2555 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2556 mf_config);
2557
2558 /* if VIF_SET is "enabled" */
2559 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2560 /* set rate limit directly to internal RAM */
2561 struct cmng_init_input cmng_input;
2562 struct rate_shaping_vars_per_vn m_rs_vn;
2563 size_t size = sizeof(struct rate_shaping_vars_per_vn);
2564 u32 addr = BAR_XSTRORM_INTMEM +
2565 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2566
2567 bp->mf_config[BP_VN(bp)] = mf_config;
2568
2569 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2570 m_rs_vn.vn_counter.rate =
2571 cmng_input.vnic_max_rate[BP_VN(bp)];
2572 m_rs_vn.vn_counter.quota =
2573 (m_rs_vn.vn_counter.rate *
2574 RS_PERIODIC_TIMEOUT_USEC) / 8;
2575
2576 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2577
2578 /* read relevant values from mf_cfg struct in shmem */
2579 vif_id =
2580 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2581 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2582 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2583 vlan_val =
2584 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2585 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2586 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2587 vlan_prio = (mf_config &
2588 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2589 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2590 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2591 vlan_mode =
2592 (MF_CFG_RD(bp,
2593 func_mf_config[func].afex_config) &
2594 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2595 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2596 allowed_prio =
2597 (MF_CFG_RD(bp,
2598 func_mf_config[func].afex_config) &
2599 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2600 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2601
2602 /* send ramrod to FW, return in case of failure */
2603 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2604 allowed_prio))
2605 return;
2606
2607 bp->afex_def_vlan_tag = vlan_val;
2608 bp->afex_vlan_mode = vlan_mode;
2609 } else {
2610 /* notify link down because BP->flags is disabled */
2611 bnx2x_link_report(bp);
2612
2613 /* send INVALID VIF ramrod to FW */
2614 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2615
2616 /* Reset the default afex VLAN */
2617 bp->afex_def_vlan_tag = -1;
2618 }
2619 }
2620}
2621
2538static void bnx2x_pmf_update(struct bnx2x *bp) 2622static void bnx2x_pmf_update(struct bnx2x *bp)
2539{ 2623{
2540 int port = BP_PORT(bp); 2624 int port = BP_PORT(bp);
@@ -2619,6 +2703,18 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2619} 2703}
2620 2704
2621 2705
2706static void storm_memset_func_cfg(struct bnx2x *bp,
2707 struct tstorm_eth_function_common_config *tcfg,
2708 u16 abs_fid)
2709{
2710 size_t size = sizeof(struct tstorm_eth_function_common_config);
2711
2712 u32 addr = BAR_TSTRORM_INTMEM +
2713 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
2714
2715 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
2716}
2717
2622void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) 2718void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2623{ 2719{
2624 if (CHIP_IS_E1x(bp)) { 2720 if (CHIP_IS_E1x(bp)) {
@@ -2648,9 +2744,9 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2648 * 2744 *
2649 * Return the flags that are common for the Tx-only and not normal connections. 2745 * Return the flags that are common for the Tx-only and not normal connections.
2650 */ 2746 */
2651static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp, 2747static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
2652 struct bnx2x_fastpath *fp, 2748 struct bnx2x_fastpath *fp,
2653 bool zero_stats) 2749 bool zero_stats)
2654{ 2750{
2655 unsigned long flags = 0; 2751 unsigned long flags = 0;
2656 2752
@@ -2670,9 +2766,9 @@ static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
2670 return flags; 2766 return flags;
2671} 2767}
2672 2768
2673static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp, 2769static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
2674 struct bnx2x_fastpath *fp, 2770 struct bnx2x_fastpath *fp,
2675 bool leading) 2771 bool leading)
2676{ 2772{
2677 unsigned long flags = 0; 2773 unsigned long flags = 0;
2678 2774
@@ -2680,8 +2776,11 @@ static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
2680 if (IS_MF_SD(bp)) 2776 if (IS_MF_SD(bp))
2681 __set_bit(BNX2X_Q_FLG_OV, &flags); 2777 __set_bit(BNX2X_Q_FLG_OV, &flags);
2682 2778
2683 if (IS_FCOE_FP(fp)) 2779 if (IS_FCOE_FP(fp)) {
2684 __set_bit(BNX2X_Q_FLG_FCOE, &flags); 2780 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
2781 /* For FCoE - force usage of default priority (for afex) */
2782 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
2783 }
2685 2784
2686 if (!fp->disable_tpa) { 2785 if (!fp->disable_tpa) {
2687 __set_bit(BNX2X_Q_FLG_TPA, &flags); 2786 __set_bit(BNX2X_Q_FLG_TPA, &flags);
@@ -2698,6 +2797,10 @@ static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
2698 /* Always set HW VLAN stripping */ 2797 /* Always set HW VLAN stripping */
2699 __set_bit(BNX2X_Q_FLG_VLAN, &flags); 2798 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
2700 2799
2800 /* configure silent vlan removal */
2801 if (IS_MF_AFEX(bp))
2802 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
2803
2701 2804
2702 return flags | bnx2x_get_common_flags(bp, fp, true); 2805 return flags | bnx2x_get_common_flags(bp, fp, true);
2703} 2806}
@@ -2800,6 +2903,13 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2800 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; 2903 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2801 else 2904 else
2802 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 2905 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
2906 /* configure silent vlan removal
2907 * if multi function mode is afex, then mask default vlan
2908 */
2909 if (IS_MF_AFEX(bp)) {
2910 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
2911 rxq_init->silent_removal_mask = VLAN_VID_MASK;
2912 }
2803} 2913}
2804 2914
2805static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, 2915static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
@@ -3051,7 +3161,7 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3051 * configure FW 3161 * configure FW
3052 * notify others function about the change 3162 * notify others function about the change
3053 */ 3163 */
3054static inline void bnx2x_config_mf_bw(struct bnx2x *bp) 3164static void bnx2x_config_mf_bw(struct bnx2x *bp)
3055{ 3165{
3056 if (bp->link_vars.link_up) { 3166 if (bp->link_vars.link_up) {
3057 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); 3167 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
@@ -3060,7 +3170,7 @@ static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
3060 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 3170 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3061} 3171}
3062 3172
3063static inline void bnx2x_set_mf_bw(struct bnx2x *bp) 3173static void bnx2x_set_mf_bw(struct bnx2x *bp)
3064{ 3174{
3065 bnx2x_config_mf_bw(bp); 3175 bnx2x_config_mf_bw(bp);
3066 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 3176 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
@@ -3147,7 +3257,7 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
3147} 3257}
3148 3258
3149/* must be called under the spq lock */ 3259/* must be called under the spq lock */
3150static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) 3260static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3151{ 3261{
3152 struct eth_spe *next_spe = bp->spq_prod_bd; 3262 struct eth_spe *next_spe = bp->spq_prod_bd;
3153 3263
@@ -3163,7 +3273,7 @@ static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3163} 3273}
3164 3274
3165/* must be called under the spq lock */ 3275/* must be called under the spq lock */
3166static inline void bnx2x_sp_prod_update(struct bnx2x *bp) 3276static void bnx2x_sp_prod_update(struct bnx2x *bp)
3167{ 3277{
3168 int func = BP_FUNC(bp); 3278 int func = BP_FUNC(bp);
3169 3279
@@ -3185,7 +3295,7 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
3185 * @cmd: command to check 3295 * @cmd: command to check
3186 * @cmd_type: command type 3296 * @cmd_type: command type
3187 */ 3297 */
3188static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) 3298static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3189{ 3299{
3190 if ((cmd_type == NONE_CONNECTION_TYPE) || 3300 if ((cmd_type == NONE_CONNECTION_TYPE) ||
3191 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 3301 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
@@ -3319,7 +3429,7 @@ static void bnx2x_release_alr(struct bnx2x *bp)
3319#define BNX2X_DEF_SB_ATT_IDX 0x0001 3429#define BNX2X_DEF_SB_ATT_IDX 0x0001
3320#define BNX2X_DEF_SB_IDX 0x0002 3430#define BNX2X_DEF_SB_IDX 0x0002
3321 3431
3322static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) 3432static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3323{ 3433{
3324 struct host_sp_status_block *def_sb = bp->def_status_blk; 3434 struct host_sp_status_block *def_sb = bp->def_status_blk;
3325 u16 rc = 0; 3435 u16 rc = 0;
@@ -3451,7 +3561,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3451 } 3561 }
3452} 3562}
3453 3563
3454static inline void bnx2x_fan_failure(struct bnx2x *bp) 3564static void bnx2x_fan_failure(struct bnx2x *bp)
3455{ 3565{
3456 int port = BP_PORT(bp); 3566 int port = BP_PORT(bp);
3457 u32 ext_phy_config; 3567 u32 ext_phy_config;
@@ -3481,7 +3591,7 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
3481 3591
3482} 3592}
3483 3593
3484static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 3594static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
3485{ 3595{
3486 int port = BP_PORT(bp); 3596 int port = BP_PORT(bp);
3487 int reg_offset; 3597 int reg_offset;
@@ -3521,7 +3631,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
3521 } 3631 }
3522} 3632}
3523 3633
3524static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) 3634static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3525{ 3635{
3526 u32 val; 3636 u32 val;
3527 3637
@@ -3552,7 +3662,7 @@ static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3552 } 3662 }
3553} 3663}
3554 3664
3555static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) 3665static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3556{ 3666{
3557 u32 val; 3667 u32 val;
3558 3668
@@ -3596,7 +3706,7 @@ static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3596 } 3706 }
3597} 3707}
3598 3708
3599static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) 3709static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3600{ 3710{
3601 u32 val; 3711 u32 val;
3602 3712
@@ -3606,6 +3716,7 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3606 int func = BP_FUNC(bp); 3716 int func = BP_FUNC(bp);
3607 3717
3608 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 3718 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3719 bnx2x_read_mf_cfg(bp);
3609 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, 3720 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3610 func_mf_config[BP_ABS_FUNC(bp)].config); 3721 func_mf_config[BP_ABS_FUNC(bp)].config);
3611 val = SHMEM_RD(bp, 3722 val = SHMEM_RD(bp,
@@ -3628,6 +3739,9 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3628 /* start dcbx state machine */ 3739 /* start dcbx state machine */
3629 bnx2x_dcbx_set_params(bp, 3740 bnx2x_dcbx_set_params(bp,
3630 BNX2X_DCBX_STATE_NEG_RECEIVED); 3741 BNX2X_DCBX_STATE_NEG_RECEIVED);
3742 if (val & DRV_STATUS_AFEX_EVENT_MASK)
3743 bnx2x_handle_afex_cmd(bp,
3744 val & DRV_STATUS_AFEX_EVENT_MASK);
3631 if (bp->link_vars.periodic_flags & 3745 if (bp->link_vars.periodic_flags &
3632 PERIODIC_FLAGS_LINK_EVENT) { 3746 PERIODIC_FLAGS_LINK_EVENT) {
3633 /* sync with link */ 3747 /* sync with link */
@@ -3722,7 +3836,7 @@ void bnx2x_set_reset_global(struct bnx2x *bp)
3722 * 3836 *
3723 * Should be run under rtnl lock 3837 * Should be run under rtnl lock
3724 */ 3838 */
3725static inline void bnx2x_clear_reset_global(struct bnx2x *bp) 3839static void bnx2x_clear_reset_global(struct bnx2x *bp)
3726{ 3840{
3727 u32 val; 3841 u32 val;
3728 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3842 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
@@ -3736,7 +3850,7 @@ static inline void bnx2x_clear_reset_global(struct bnx2x *bp)
3736 * 3850 *
3737 * should be run under rtnl lock 3851 * should be run under rtnl lock
3738 */ 3852 */
3739static inline bool bnx2x_reset_is_global(struct bnx2x *bp) 3853static bool bnx2x_reset_is_global(struct bnx2x *bp)
3740{ 3854{
3741 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3855 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3742 3856
@@ -3749,7 +3863,7 @@ static inline bool bnx2x_reset_is_global(struct bnx2x *bp)
3749 * 3863 *
3750 * Should be run under rtnl lock 3864 * Should be run under rtnl lock
3751 */ 3865 */
3752static inline void bnx2x_set_reset_done(struct bnx2x *bp) 3866static void bnx2x_set_reset_done(struct bnx2x *bp)
3753{ 3867{
3754 u32 val; 3868 u32 val;
3755 u32 bit = BP_PATH(bp) ? 3869 u32 bit = BP_PATH(bp) ?
@@ -3874,7 +3988,7 @@ bool bnx2x_clear_pf_load(struct bnx2x *bp)
3874 * 3988 *
3875 * should be run under rtnl lock 3989 * should be run under rtnl lock
3876 */ 3990 */
3877static inline bool bnx2x_get_load_status(struct bnx2x *bp, int engine) 3991static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
3878{ 3992{
3879 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : 3993 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
3880 BNX2X_PATH0_LOAD_CNT_MASK); 3994 BNX2X_PATH0_LOAD_CNT_MASK);
@@ -3895,7 +4009,7 @@ static inline bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
3895/* 4009/*
3896 * Reset the load status for the current engine. 4010 * Reset the load status for the current engine.
3897 */ 4011 */
3898static inline void bnx2x_clear_load_status(struct bnx2x *bp) 4012static void bnx2x_clear_load_status(struct bnx2x *bp)
3899{ 4013{
3900 u32 val; 4014 u32 val;
3901 u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 4015 u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
@@ -3906,13 +4020,13 @@ static inline void bnx2x_clear_load_status(struct bnx2x *bp)
3906 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4020 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3907} 4021}
3908 4022
3909static inline void _print_next_block(int idx, const char *blk) 4023static void _print_next_block(int idx, const char *blk)
3910{ 4024{
3911 pr_cont("%s%s", idx ? ", " : "", blk); 4025 pr_cont("%s%s", idx ? ", " : "", blk);
3912} 4026}
3913 4027
3914static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, 4028static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
3915 bool print) 4029 bool print)
3916{ 4030{
3917 int i = 0; 4031 int i = 0;
3918 u32 cur_bit = 0; 4032 u32 cur_bit = 0;
@@ -3959,8 +4073,8 @@ static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
3959 return par_num; 4073 return par_num;
3960} 4074}
3961 4075
3962static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, 4076static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
3963 bool *global, bool print) 4077 bool *global, bool print)
3964{ 4078{
3965 int i = 0; 4079 int i = 0;
3966 u32 cur_bit = 0; 4080 u32 cur_bit = 0;
@@ -4045,8 +4159,8 @@ static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
4045 return par_num; 4159 return par_num;
4046} 4160}
4047 4161
4048static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, 4162static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
4049 bool print) 4163 bool print)
4050{ 4164{
4051 int i = 0; 4165 int i = 0;
4052 u32 cur_bit = 0; 4166 u32 cur_bit = 0;
@@ -4097,8 +4211,8 @@ static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
4097 return par_num; 4211 return par_num;
4098} 4212}
4099 4213
4100static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, 4214static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
4101 bool *global, bool print) 4215 bool *global, bool print)
4102{ 4216{
4103 int i = 0; 4217 int i = 0;
4104 u32 cur_bit = 0; 4218 u32 cur_bit = 0;
@@ -4139,8 +4253,8 @@ static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
4139 return par_num; 4253 return par_num;
4140} 4254}
4141 4255
4142static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, 4256static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
4143 bool print) 4257 bool print)
4144{ 4258{
4145 int i = 0; 4259 int i = 0;
4146 u32 cur_bit = 0; 4260 u32 cur_bit = 0;
@@ -4166,8 +4280,8 @@ static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
4166 return par_num; 4280 return par_num;
4167} 4281}
4168 4282
4169static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, 4283static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4170 u32 *sig) 4284 u32 *sig)
4171{ 4285{
4172 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 4286 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4173 (sig[1] & HW_PRTY_ASSERT_SET_1) || 4287 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
@@ -4238,7 +4352,7 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4238} 4352}
4239 4353
4240 4354
4241static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) 4355static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
4242{ 4356{
4243 u32 val; 4357 u32 val;
4244 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 4358 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
@@ -4430,7 +4544,7 @@ void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
4430 igu_addr); 4544 igu_addr);
4431} 4545}
4432 4546
4433static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) 4547static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
4434{ 4548{
4435 /* No memory barriers */ 4549 /* No memory barriers */
4436 storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); 4550 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
@@ -4461,7 +4575,7 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
4461} 4575}
4462#endif 4576#endif
4463 4577
4464static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp) 4578static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
4465{ 4579{
4466 struct bnx2x_mcast_ramrod_params rparam; 4580 struct bnx2x_mcast_ramrod_params rparam;
4467 int rc; 4581 int rc;
@@ -4486,8 +4600,8 @@ static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
4486 netif_addr_unlock_bh(bp->dev); 4600 netif_addr_unlock_bh(bp->dev);
4487} 4601}
4488 4602
4489static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp, 4603static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4490 union event_ring_elem *elem) 4604 union event_ring_elem *elem)
4491{ 4605{
4492 unsigned long ramrod_flags = 0; 4606 unsigned long ramrod_flags = 0;
4493 int rc = 0; 4607 int rc = 0;
@@ -4534,7 +4648,7 @@ static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4534static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); 4648static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
4535#endif 4649#endif
4536 4650
4537static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) 4651static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
4538{ 4652{
4539 netif_addr_lock_bh(bp->dev); 4653 netif_addr_lock_bh(bp->dev);
4540 4654
@@ -4555,7 +4669,94 @@ static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
4555 netif_addr_unlock_bh(bp->dev); 4669 netif_addr_unlock_bh(bp->dev);
4556} 4670}
4557 4671
4558static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( 4672static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
4673 union event_ring_elem *elem)
4674{
4675 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
4676 DP(BNX2X_MSG_SP,
4677 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
4678 elem->message.data.vif_list_event.func_bit_map);
4679 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
4680 elem->message.data.vif_list_event.func_bit_map);
4681 } else if (elem->message.data.vif_list_event.echo ==
4682 VIF_LIST_RULE_SET) {
4683 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
4684 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
4685 }
4686}
4687
4688/* called with rtnl_lock */
4689static void bnx2x_after_function_update(struct bnx2x *bp)
4690{
4691 int q, rc;
4692 struct bnx2x_fastpath *fp;
4693 struct bnx2x_queue_state_params queue_params = {NULL};
4694 struct bnx2x_queue_update_params *q_update_params =
4695 &queue_params.params.update;
4696
4697 /* Send Q update command with afex vlan removal values for all Qs */
4698 queue_params.cmd = BNX2X_Q_CMD_UPDATE;
4699
4700 /* set silent vlan removal values according to vlan mode */
4701 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4702 &q_update_params->update_flags);
4703 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
4704 &q_update_params->update_flags);
4705 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
4706
4707 /* in access mode mark mask and value are 0 to strip all vlans */
4708 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
4709 q_update_params->silent_removal_value = 0;
4710 q_update_params->silent_removal_mask = 0;
4711 } else {
4712 q_update_params->silent_removal_value =
4713 (bp->afex_def_vlan_tag & VLAN_VID_MASK);
4714 q_update_params->silent_removal_mask = VLAN_VID_MASK;
4715 }
4716
4717 for_each_eth_queue(bp, q) {
4718 /* Set the appropriate Queue object */
4719 fp = &bp->fp[q];
4720 queue_params.q_obj = &fp->q_obj;
4721
4722 /* send the ramrod */
4723 rc = bnx2x_queue_state_change(bp, &queue_params);
4724 if (rc < 0)
4725 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
4726 q);
4727 }
4728
4729#ifdef BCM_CNIC
4730 if (!NO_FCOE(bp)) {
4731 fp = &bp->fp[FCOE_IDX];
4732 queue_params.q_obj = &fp->q_obj;
4733
4734 /* clear pending completion bit */
4735 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
4736
4737 /* mark latest Q bit */
4738 smp_mb__before_clear_bit();
4739 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
4740 smp_mb__after_clear_bit();
4741
4742 /* send Q update ramrod for FCoE Q */
4743 rc = bnx2x_queue_state_change(bp, &queue_params);
4744 if (rc < 0)
4745 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
4746 q);
4747 } else {
4748 /* If no FCoE ring - ACK MCP now */
4749 bnx2x_link_report(bp);
4750 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
4751 }
4752#else
4753 /* If no FCoE ring - ACK MCP now */
4754 bnx2x_link_report(bp);
4755 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
4756#endif /* BCM_CNIC */
4757}
4758
4759static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
4559 struct bnx2x *bp, u32 cid) 4760 struct bnx2x *bp, u32 cid)
4560{ 4761{
4561 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); 4762 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
@@ -4653,6 +4854,28 @@ static void bnx2x_eq_int(struct bnx2x *bp)
4653 break; 4854 break;
4654 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); 4855 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
4655 goto next_spqe; 4856 goto next_spqe;
4857 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
4858 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
4859 "AFEX: ramrod completed FUNCTION_UPDATE\n");
4860 f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_AFEX_UPDATE);
4861
4862 /* We will perform the Queues update from sp_rtnl task
4863 * as all Queue SP operations should run under
4864 * rtnl_lock.
4865 */
4866 smp_mb__before_clear_bit();
4867 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
4868 &bp->sp_rtnl_state);
4869 smp_mb__after_clear_bit();
4870
4871 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4872 goto next_spqe;
4873
4874 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
4875 f_obj->complete_cmd(bp, f_obj,
4876 BNX2X_F_CMD_AFEX_VIFLISTS);
4877 bnx2x_after_afex_vif_lists(bp, elem);
4878 goto next_spqe;
4656 case EVENT_RING_OPCODE_FUNCTION_START: 4879 case EVENT_RING_OPCODE_FUNCTION_START:
4657 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 4880 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4658 "got FUNC_START ramrod\n"); 4881 "got FUNC_START ramrod\n");
@@ -4784,6 +5007,13 @@ static void bnx2x_sp_task(struct work_struct *work)
4784 5007
4785 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, 5008 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
4786 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); 5009 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5010
5011 /* afex - poll to check if VIFSET_ACK should be sent to MFW */
5012 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5013 &bp->sp_state)) {
5014 bnx2x_link_report(bp);
5015 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5016 }
4787} 5017}
4788 5018
4789irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 5019irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -4870,7 +5100,7 @@ static void bnx2x_timer(unsigned long data)
4870 * nic init service functions 5100 * nic init service functions
4871 */ 5101 */
4872 5102
4873static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) 5103static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
4874{ 5104{
4875 u32 i; 5105 u32 i;
4876 if (!(len%4) && !(addr%4)) 5106 if (!(len%4) && !(addr%4))
@@ -4883,10 +5113,10 @@ static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
4883} 5113}
4884 5114
4885/* helper: writes FP SP data to FW - data_size in dwords */ 5115/* helper: writes FP SP data to FW - data_size in dwords */
4886static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp, 5116static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
4887 int fw_sb_id, 5117 int fw_sb_id,
4888 u32 *sb_data_p, 5118 u32 *sb_data_p,
4889 u32 data_size) 5119 u32 data_size)
4890{ 5120{
4891 int index; 5121 int index;
4892 for (index = 0; index < data_size; index++) 5122 for (index = 0; index < data_size; index++)
@@ -4896,7 +5126,7 @@ static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
4896 *(sb_data_p + index)); 5126 *(sb_data_p + index));
4897} 5127}
4898 5128
4899static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) 5129static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
4900{ 5130{
4901 u32 *sb_data_p; 5131 u32 *sb_data_p;
4902 u32 data_size = 0; 5132 u32 data_size = 0;
@@ -4929,7 +5159,7 @@ static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
4929} 5159}
4930 5160
4931/* helper: writes SP SB data to FW */ 5161/* helper: writes SP SB data to FW */
4932static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp, 5162static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
4933 struct hc_sp_status_block_data *sp_sb_data) 5163 struct hc_sp_status_block_data *sp_sb_data)
4934{ 5164{
4935 int func = BP_FUNC(bp); 5165 int func = BP_FUNC(bp);
@@ -4941,7 +5171,7 @@ static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
4941 *((u32 *)sp_sb_data + i)); 5171 *((u32 *)sp_sb_data + i));
4942} 5172}
4943 5173
4944static inline void bnx2x_zero_sp_sb(struct bnx2x *bp) 5174static void bnx2x_zero_sp_sb(struct bnx2x *bp)
4945{ 5175{
4946 int func = BP_FUNC(bp); 5176 int func = BP_FUNC(bp);
4947 struct hc_sp_status_block_data sp_sb_data; 5177 struct hc_sp_status_block_data sp_sb_data;
@@ -4962,8 +5192,7 @@ static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
4962} 5192}
4963 5193
4964 5194
4965static inline 5195static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4966void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4967 int igu_sb_id, int igu_seg_id) 5196 int igu_sb_id, int igu_seg_id)
4968{ 5197{
4969 hc_sm->igu_sb_id = igu_sb_id; 5198 hc_sm->igu_sb_id = igu_sb_id;
@@ -4974,8 +5203,7 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4974 5203
4975 5204
4976/* allocates state machine ids. */ 5205/* allocates state machine ids. */
4977static inline 5206static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
4978void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
4979{ 5207{
4980 /* zero out state machine indices */ 5208 /* zero out state machine indices */
4981 /* rx indices */ 5209 /* rx indices */
@@ -5383,7 +5611,7 @@ static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
5383 return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT; 5611 return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT;
5384} 5612}
5385 5613
5386static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) 5614static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
5387{ 5615{
5388 if (CHIP_IS_E1x(fp->bp)) 5616 if (CHIP_IS_E1x(fp->bp))
5389 return BP_L_ID(fp->bp) + fp->index; 5617 return BP_L_ID(fp->bp) + fp->index;
@@ -5444,6 +5672,43 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
5444 bnx2x_update_fpsb_idx(fp); 5672 bnx2x_update_fpsb_idx(fp);
5445} 5673}
5446 5674
5675static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
5676{
5677 int i;
5678
5679 for (i = 1; i <= NUM_TX_RINGS; i++) {
5680 struct eth_tx_next_bd *tx_next_bd =
5681 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5682
5683 tx_next_bd->addr_hi =
5684 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
5685 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5686 tx_next_bd->addr_lo =
5687 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
5688 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5689 }
5690
5691 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
5692 txdata->tx_db.data.zero_fill1 = 0;
5693 txdata->tx_db.data.prod = 0;
5694
5695 txdata->tx_pkt_prod = 0;
5696 txdata->tx_pkt_cons = 0;
5697 txdata->tx_bd_prod = 0;
5698 txdata->tx_bd_cons = 0;
5699 txdata->tx_pkt = 0;
5700}
5701
5702static void bnx2x_init_tx_rings(struct bnx2x *bp)
5703{
5704 int i;
5705 u8 cos;
5706
5707 for_each_tx_queue(bp, i)
5708 for_each_cos_in_tx_queue(&bp->fp[i], cos)
5709 bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
5710}
5711
5447void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) 5712void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5448{ 5713{
5449 int i; 5714 int i;
@@ -5968,7 +6233,7 @@ void bnx2x_pf_disable(struct bnx2x *bp)
5968 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); 6233 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
5969} 6234}
5970 6235
5971static inline void bnx2x__common_init_phy(struct bnx2x *bp) 6236static void bnx2x__common_init_phy(struct bnx2x *bp)
5972{ 6237{
5973 u32 shmem_base[2], shmem2_base[2]; 6238 u32 shmem_base[2], shmem2_base[2];
5974 shmem_base[0] = bp->common.shmem_base; 6239 shmem_base[0] = bp->common.shmem_base;
@@ -6255,12 +6520,24 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
6255 if (!CHIP_IS_E1(bp)) 6520 if (!CHIP_IS_E1(bp))
6256 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); 6521 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
6257 6522
6258 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) 6523 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
6259 /* Bit-map indicating which L2 hdrs may appear 6524 if (IS_MF_AFEX(bp)) {
6260 * after the basic Ethernet header 6525 /* configure that VNTag and VLAN headers must be
6261 */ 6526 * received in afex mode
6262 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 6527 */
6263 bp->path_has_ovlan ? 7 : 6); 6528 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
6529 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
6530 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
6531 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
6532 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
6533 } else {
6534 /* Bit-map indicating which L2 hdrs may appear
6535 * after the basic Ethernet header
6536 */
6537 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
6538 bp->path_has_ovlan ? 7 : 6);
6539 }
6540 }
6264 6541
6265 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); 6542 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
6266 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); 6543 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
@@ -6294,9 +6571,21 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
6294 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); 6571 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
6295 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); 6572 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
6296 6573
6297 if (!CHIP_IS_E1x(bp)) 6574 if (!CHIP_IS_E1x(bp)) {
6298 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 6575 if (IS_MF_AFEX(bp)) {
6299 bp->path_has_ovlan ? 7 : 6); 6576 /* configure that VNTag and VLAN headers must be
6577 * sent in afex mode
6578 */
6579 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
6580 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
6581 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
6582 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
6583 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
6584 } else {
6585 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
6586 bp->path_has_ovlan ? 7 : 6);
6587 }
6588 }
6300 6589
6301 REG_WR(bp, SRC_REG_SOFT_RST, 1); 6590 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6302 6591
@@ -6514,15 +6803,29 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6514 6803
6515 6804
6516 bnx2x_init_block(bp, BLOCK_PRS, init_phase); 6805 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
6517 if (CHIP_IS_E3B0(bp)) 6806 if (CHIP_IS_E3B0(bp)) {
6518 /* Ovlan exists only if we are in multi-function + 6807 if (IS_MF_AFEX(bp)) {
6519 * switch-dependent mode, in switch-independent there 6808 /* configure headers for AFEX mode */
6520 * is no ovlan headers 6809 REG_WR(bp, BP_PORT(bp) ?
6521 */ 6810 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
6522 REG_WR(bp, BP_PORT(bp) ? 6811 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
6523 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 6812 REG_WR(bp, BP_PORT(bp) ?
6524 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 6813 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
6525 (bp->path_has_ovlan ? 7 : 6)); 6814 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
6815 REG_WR(bp, BP_PORT(bp) ?
6816 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
6817 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
6818 } else {
6819 /* Ovlan exists only if we are in multi-function +
6820 * switch-dependent mode, in switch-independent there
6821 * is no ovlan headers
6822 */
6823 REG_WR(bp, BP_PORT(bp) ?
6824 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
6825 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
6826 (bp->path_has_ovlan ? 7 : 6));
6827 }
6828 }
6526 6829
6527 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); 6830 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
6528 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); 6831 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
@@ -6584,10 +6887,15 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6584 /* Bit-map indicating which L2 hdrs may appear after the 6887 /* Bit-map indicating which L2 hdrs may appear after the
6585 * basic Ethernet header 6888 * basic Ethernet header
6586 */ 6889 */
6587 REG_WR(bp, BP_PORT(bp) ? 6890 if (IS_MF_AFEX(bp))
6588 NIG_REG_P1_HDRS_AFTER_BASIC : 6891 REG_WR(bp, BP_PORT(bp) ?
6589 NIG_REG_P0_HDRS_AFTER_BASIC, 6892 NIG_REG_P1_HDRS_AFTER_BASIC :
6590 IS_MF_SD(bp) ? 7 : 6); 6893 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
6894 else
6895 REG_WR(bp, BP_PORT(bp) ?
6896 NIG_REG_P1_HDRS_AFTER_BASIC :
6897 NIG_REG_P0_HDRS_AFTER_BASIC,
6898 IS_MF_SD(bp) ? 7 : 6);
6591 6899
6592 if (CHIP_IS_E3(bp)) 6900 if (CHIP_IS_E3(bp))
6593 REG_WR(bp, BP_PORT(bp) ? 6901 REG_WR(bp, BP_PORT(bp) ?
@@ -6609,6 +6917,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6609 val = 1; 6917 val = 1;
6610 break; 6918 break;
6611 case MULTI_FUNCTION_SI: 6919 case MULTI_FUNCTION_SI:
6920 case MULTI_FUNCTION_AFEX:
6612 val = 2; 6921 val = 2;
6613 break; 6922 break;
6614 } 6923 }
@@ -6640,21 +6949,71 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6640static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) 6949static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6641{ 6950{
6642 int reg; 6951 int reg;
6952 u32 wb_write[2];
6643 6953
6644 if (CHIP_IS_E1(bp)) 6954 if (CHIP_IS_E1(bp))
6645 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 6955 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6646 else 6956 else
6647 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; 6957 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6648 6958
6649 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr)); 6959 wb_write[0] = ONCHIP_ADDR1(addr);
6960 wb_write[1] = ONCHIP_ADDR2(addr);
6961 REG_WR_DMAE(bp, reg, wb_write, 2);
6650} 6962}
6651 6963
6652static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) 6964static void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
6965 u8 idu_sb_id, bool is_Pf)
6966{
6967 u32 data, ctl, cnt = 100;
6968 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
6969 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
6970 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
6971 u32 sb_bit = 1 << (idu_sb_id%32);
6972 u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
6973 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
6974
6975 /* Not supported in BC mode */
6976 if (CHIP_INT_MODE_IS_BC(bp))
6977 return;
6978
6979 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
6980 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
6981 IGU_REGULAR_CLEANUP_SET |
6982 IGU_REGULAR_BCLEANUP;
6983
6984 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
6985 func_encode << IGU_CTRL_REG_FID_SHIFT |
6986 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
6987
6988 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
6989 data, igu_addr_data);
6990 REG_WR(bp, igu_addr_data, data);
6991 mmiowb();
6992 barrier();
6993 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
6994 ctl, igu_addr_ctl);
6995 REG_WR(bp, igu_addr_ctl, ctl);
6996 mmiowb();
6997 barrier();
6998
6999 /* wait for clean up to finish */
7000 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7001 msleep(20);
7002
7003
7004 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7005 DP(NETIF_MSG_HW,
7006 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7007 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7008 }
7009}
7010
7011static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
6653{ 7012{
6654 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); 7013 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
6655} 7014}
6656 7015
6657static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) 7016static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
6658{ 7017{
6659 u32 i, base = FUNC_ILT_BASE(func); 7018 u32 i, base = FUNC_ILT_BASE(func);
6660 for (i = base; i < base + ILT_PER_FUNC; i++) 7019 for (i = base; i < base + ILT_PER_FUNC; i++)
@@ -7005,7 +7364,7 @@ void bnx2x_free_mem(struct bnx2x *bp)
7005 BCM_PAGE_SIZE * NUM_EQ_PAGES); 7364 BCM_PAGE_SIZE * NUM_EQ_PAGES);
7006} 7365}
7007 7366
7008static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) 7367static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
7009{ 7368{
7010 int num_groups; 7369 int num_groups;
7011 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; 7370 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
@@ -7192,7 +7551,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
7192 unsigned long ramrod_flags = 0; 7551 unsigned long ramrod_flags = 0;
7193 7552
7194#ifdef BCM_CNIC 7553#ifdef BCM_CNIC
7195 if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_STORAGE_SD(bp)) { 7554 if (is_zero_ether_addr(bp->dev->dev_addr) &&
7555 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
7196 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN, 7556 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
7197 "Ignoring Zero MAC for STORAGE SD mode\n"); 7557 "Ignoring Zero MAC for STORAGE SD mode\n");
7198 return 0; 7558 return 0;
@@ -7230,7 +7590,7 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
7230 BNX2X_DEV_INFO("set number of queues to 1\n"); 7590 BNX2X_DEV_INFO("set number of queues to 1\n");
7231 break; 7591 break;
7232 default: 7592 default:
7233 /* Set number of queues according to bp->multi_mode value */ 7593 /* Set number of queues for MSI-X mode */
7234 bnx2x_set_num_queues(bp); 7594 bnx2x_set_num_queues(bp);
7235 7595
7236 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); 7596 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
@@ -7239,15 +7599,17 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
7239 * so try to enable MSI-X with the requested number of fp's 7599 * so try to enable MSI-X with the requested number of fp's
7240 * and fallback to MSI or legacy INTx with one fp 7600 * and fallback to MSI or legacy INTx with one fp
7241 */ 7601 */
7242 if (bnx2x_enable_msix(bp)) { 7602 if (bnx2x_enable_msix(bp) ||
7243 /* failed to enable MSI-X */ 7603 bp->flags & USING_SINGLE_MSIX_FLAG) {
7244 BNX2X_DEV_INFO("Failed to enable MSI-X (%d), set number of queues to %d\n", 7604 /* failed to enable multiple MSI-X */
7605 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
7245 bp->num_queues, 1 + NON_ETH_CONTEXT_USE); 7606 bp->num_queues, 1 + NON_ETH_CONTEXT_USE);
7246 7607
7247 bp->num_queues = 1 + NON_ETH_CONTEXT_USE; 7608 bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
7248 7609
7249 /* Try to enable MSI */ 7610 /* Try to enable MSI */
7250 if (!(bp->flags & DISABLE_MSI_FLAG)) 7611 if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
7612 !(bp->flags & DISABLE_MSI_FLAG))
7251 bnx2x_enable_msi(bp); 7613 bnx2x_enable_msi(bp);
7252 } 7614 }
7253 break; 7615 break;
@@ -7368,7 +7730,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
7368 * - HC configuration 7730 * - HC configuration
7369 * - Queue's CDU context 7731 * - Queue's CDU context
7370 */ 7732 */
7371static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp, 7733static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
7372 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) 7734 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
7373{ 7735{
7374 7736
@@ -7718,7 +8080,7 @@ static void bnx2x_reset_port(struct bnx2x *bp)
7718 /* TODO: Close Doorbell port? */ 8080 /* TODO: Close Doorbell port? */
7719} 8081}
7720 8082
7721static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) 8083static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
7722{ 8084{
7723 struct bnx2x_func_state_params func_params = {NULL}; 8085 struct bnx2x_func_state_params func_params = {NULL};
7724 8086
@@ -7733,7 +8095,7 @@ static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
7733 return bnx2x_func_state_change(bp, &func_params); 8095 return bnx2x_func_state_change(bp, &func_params);
7734} 8096}
7735 8097
7736static inline int bnx2x_func_stop(struct bnx2x *bp) 8098static int bnx2x_func_stop(struct bnx2x *bp)
7737{ 8099{
7738 struct bnx2x_func_state_params func_params = {NULL}; 8100 struct bnx2x_func_state_params func_params = {NULL};
7739 int rc; 8101 int rc;
@@ -7848,7 +8210,7 @@ void bnx2x_send_unload_done(struct bnx2x *bp)
7848 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 8210 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7849} 8211}
7850 8212
7851static inline int bnx2x_func_wait_started(struct bnx2x *bp) 8213static int bnx2x_func_wait_started(struct bnx2x *bp)
7852{ 8214{
7853 int tout = 50; 8215 int tout = 50;
7854 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 8216 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
@@ -8158,7 +8520,7 @@ static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8158 * 8520 *
8159 * @bp: driver handle 8521 * @bp: driver handle
8160 */ 8522 */
8161static inline void bnx2x_mcp_wait_one(struct bnx2x *bp) 8523static void bnx2x_mcp_wait_one(struct bnx2x *bp)
8162{ 8524{
8163 /* special handling for emulation and FPGA, 8525 /* special handling for emulation and FPGA,
8164 wait 10 times longer */ 8526 wait 10 times longer */
@@ -8494,7 +8856,7 @@ exit_leader_reset:
8494 return rc; 8856 return rc;
8495} 8857}
8496 8858
8497static inline void bnx2x_recovery_failed(struct bnx2x *bp) 8859static void bnx2x_recovery_failed(struct bnx2x *bp)
8498{ 8860{
8499 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); 8861 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
8500 8862
@@ -8727,7 +9089,8 @@ sp_rtnl_not_reset:
8727#endif 9089#endif
8728 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) 9090 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
8729 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); 9091 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
8730 9092 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
9093 bnx2x_after_function_update(bp);
8731 /* 9094 /*
8732 * in case of fan failure we need to reset id if the "stop on error" 9095 * in case of fan failure we need to reset id if the "stop on error"
8733 * debug flag is set, since we trying to prevent permanent overheating 9096 * debug flag is set, since we trying to prevent permanent overheating
@@ -9122,13 +9485,34 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
9122 return bnx2x_prev_mcp_done(bp); 9485 return bnx2x_prev_mcp_done(bp);
9123} 9486}
9124 9487
9488/* previous driver DMAE transaction may have occurred when pre-boot stage ended
9489 * and boot began, or when kdump kernel was loaded. Either case would invalidate
9490 * the addresses of the transaction, resulting in was-error bit set in the pci
9491 * causing all hw-to-host pcie transactions to timeout. If this happened we want
9492 * to clear the interrupt which detected this from the pglueb and the was done
9493 * bit
9494 */
9495static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
9496{
9497 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
9498 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
9499 BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
9500 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp));
9501 }
9502}
9503
9125static int __devinit bnx2x_prev_unload(struct bnx2x *bp) 9504static int __devinit bnx2x_prev_unload(struct bnx2x *bp)
9126{ 9505{
9127 int time_counter = 10; 9506 int time_counter = 10;
9128 u32 rc, fw, hw_lock_reg, hw_lock_val; 9507 u32 rc, fw, hw_lock_reg, hw_lock_val;
9129 BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); 9508 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
9130 9509
9131 /* Release previously held locks */ 9510 /* clear hw from errors which may have resulted from an interrupted
9511 * dmae transaction.
9512 */
9513 bnx2x_prev_interrupted_dmae(bp);
9514
9515 /* Release previously held locks */
9132 hw_lock_reg = (BP_FUNC(bp) <= 5) ? 9516 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
9133 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : 9517 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
9134 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); 9518 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
@@ -9201,6 +9585,17 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9201 id |= (val & 0xf); 9585 id |= (val & 0xf);
9202 bp->common.chip_id = id; 9586 bp->common.chip_id = id;
9203 9587
9588 /* force 57811 according to MISC register */
9589 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
9590 if (CHIP_IS_57810(bp))
9591 bp->common.chip_id = (CHIP_NUM_57811 << 16) |
9592 (bp->common.chip_id & 0x0000FFFF);
9593 else if (CHIP_IS_57810_MF(bp))
9594 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
9595 (bp->common.chip_id & 0x0000FFFF);
9596 bp->common.chip_id |= 0x1;
9597 }
9598
9204 /* Set doorbell size */ 9599 /* Set doorbell size */
9205 bp->db_size = (1 << BNX2X_DB_SHIFT); 9600 bp->db_size = (1 << BNX2X_DB_SHIFT);
9206 9601
@@ -9293,7 +9688,9 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9293 bp->link_params.feature_config_flags |= 9688 bp->link_params.feature_config_flags |=
9294 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? 9689 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
9295 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; 9690 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
9296 9691 bp->link_params.feature_config_flags |=
9692 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
9693 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
9297 bp->link_params.feature_config_flags |= 9694 bp->link_params.feature_config_flags |=
9298 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? 9695 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
9299 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; 9696 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
@@ -9925,6 +10322,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
9925 10322
9926 } else 10323 } else
9927 bp->flags |= NO_FCOE_FLAG; 10324 bp->flags |= NO_FCOE_FLAG;
10325
10326 bp->mf_ext_config = cfg;
10327
9928 } else { /* SD MODE */ 10328 } else { /* SD MODE */
9929 if (IS_MF_STORAGE_SD(bp)) { 10329 if (IS_MF_STORAGE_SD(bp)) {
9930 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { 10330 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
@@ -9946,6 +10346,11 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
9946 memset(bp->dev->dev_addr, 0, ETH_ALEN); 10346 memset(bp->dev->dev_addr, 0, ETH_ALEN);
9947 } 10347 }
9948 } 10348 }
10349
10350 if (IS_MF_FCOE_AFEX(bp))
10351 /* use FIP MAC as primary MAC */
10352 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
10353
9949#endif 10354#endif
9950 } else { 10355 } else {
9951 /* in SF read MACs from port configuration */ 10356 /* in SF read MACs from port configuration */
@@ -10118,6 +10523,19 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
10118 } else 10523 } else
10119 BNX2X_DEV_INFO("illegal MAC address for SI\n"); 10524 BNX2X_DEV_INFO("illegal MAC address for SI\n");
10120 break; 10525 break;
10526 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
10527 if ((!CHIP_IS_E1x(bp)) &&
10528 (MF_CFG_RD(bp, func_mf_config[func].
10529 mac_upper) != 0xffff) &&
10530 (SHMEM2_HAS(bp,
10531 afex_driver_support))) {
10532 bp->mf_mode = MULTI_FUNCTION_AFEX;
10533 bp->mf_config[vn] = MF_CFG_RD(bp,
10534 func_mf_config[func].config);
10535 } else {
10536 BNX2X_DEV_INFO("can not configure afex mode\n");
10537 }
10538 break;
10121 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 10539 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
10122 /* get OV configuration */ 10540 /* get OV configuration */
10123 val = MF_CFG_RD(bp, 10541 val = MF_CFG_RD(bp,
@@ -10158,6 +10576,9 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
10158 return -EPERM; 10576 return -EPERM;
10159 } 10577 }
10160 break; 10578 break;
10579 case MULTI_FUNCTION_AFEX:
10580 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
10581 break;
10161 case MULTI_FUNCTION_SI: 10582 case MULTI_FUNCTION_SI:
10162 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n", 10583 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
10163 func); 10584 func);
@@ -10325,6 +10746,9 @@ static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
10325 case MULTI_FUNCTION_SI: 10746 case MULTI_FUNCTION_SI:
10326 SET_FLAGS(flags, MODE_MF_SI); 10747 SET_FLAGS(flags, MODE_MF_SI);
10327 break; 10748 break;
10749 case MULTI_FUNCTION_AFEX:
10750 SET_FLAGS(flags, MODE_MF_AFEX);
10751 break;
10328 } 10752 }
10329 } else 10753 } else
10330 SET_FLAGS(flags, MODE_SF); 10754 SET_FLAGS(flags, MODE_SF);
@@ -10384,12 +10808,10 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10384 if (BP_NOMCP(bp) && (func == 0)) 10808 if (BP_NOMCP(bp) && (func == 0))
10385 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); 10809 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
10386 10810
10387 bp->multi_mode = multi_mode;
10388
10389 bp->disable_tpa = disable_tpa; 10811 bp->disable_tpa = disable_tpa;
10390 10812
10391#ifdef BCM_CNIC 10813#ifdef BCM_CNIC
10392 bp->disable_tpa |= IS_MF_STORAGE_SD(bp); 10814 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
10393#endif 10815#endif
10394 10816
10395 /* Set TPA flags */ 10817 /* Set TPA flags */
@@ -10408,7 +10830,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10408 10830
10409 bp->mrrs = mrrs; 10831 bp->mrrs = mrrs;
10410 10832
10411 bp->tx_ring_size = MAX_TX_AVAIL; 10833 bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
10412 10834
10413 /* make sure that the numbers are in the right granularity */ 10835 /* make sure that the numbers are in the right granularity */
10414 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; 10836 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
@@ -10439,8 +10861,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10439 if (CHIP_IS_E3B0(bp)) 10861 if (CHIP_IS_E3B0(bp))
10440 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; 10862 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
10441 10863
10442 bp->gro_check = bnx2x_need_gro_check(bp->dev->mtu);
10443
10444 return rc; 10864 return rc;
10445} 10865}
10446 10866
@@ -10530,8 +10950,8 @@ static int bnx2x_close(struct net_device *dev)
10530 return 0; 10950 return 0;
10531} 10951}
10532 10952
10533static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp, 10953static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
10534 struct bnx2x_mcast_ramrod_params *p) 10954 struct bnx2x_mcast_ramrod_params *p)
10535{ 10955{
10536 int mc_count = netdev_mc_count(bp->dev); 10956 int mc_count = netdev_mc_count(bp->dev);
10537 struct bnx2x_mcast_list_elem *mc_mac = 10957 struct bnx2x_mcast_list_elem *mc_mac =
@@ -10554,7 +10974,7 @@ static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
10554 return 0; 10974 return 0;
10555} 10975}
10556 10976
10557static inline void bnx2x_free_mcast_macs_list( 10977static void bnx2x_free_mcast_macs_list(
10558 struct bnx2x_mcast_ramrod_params *p) 10978 struct bnx2x_mcast_ramrod_params *p)
10559{ 10979{
10560 struct bnx2x_mcast_list_elem *mc_mac = 10980 struct bnx2x_mcast_list_elem *mc_mac =
@@ -10572,7 +10992,7 @@ static inline void bnx2x_free_mcast_macs_list(
10572 * 10992 *
10573 * We will use zero (0) as a MAC type for these MACs. 10993 * We will use zero (0) as a MAC type for these MACs.
10574 */ 10994 */
10575static inline int bnx2x_set_uc_list(struct bnx2x *bp) 10995static int bnx2x_set_uc_list(struct bnx2x *bp)
10576{ 10996{
10577 int rc; 10997 int rc;
10578 struct net_device *dev = bp->dev; 10998 struct net_device *dev = bp->dev;
@@ -10603,7 +11023,7 @@ static inline int bnx2x_set_uc_list(struct bnx2x *bp)
10603 BNX2X_UC_LIST_MAC, &ramrod_flags); 11023 BNX2X_UC_LIST_MAC, &ramrod_flags);
10604} 11024}
10605 11025
10606static inline int bnx2x_set_mc_list(struct bnx2x *bp) 11026static int bnx2x_set_mc_list(struct bnx2x *bp)
10607{ 11027{
10608 struct net_device *dev = bp->dev; 11028 struct net_device *dev = bp->dev;
10609 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 11029 struct bnx2x_mcast_ramrod_params rparam = {NULL};
@@ -10789,7 +11209,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
10789#endif 11209#endif
10790}; 11210};
10791 11211
10792static inline int bnx2x_set_coherency_mask(struct bnx2x *bp) 11212static int bnx2x_set_coherency_mask(struct bnx2x *bp)
10793{ 11213{
10794 struct device *dev = &bp->pdev->dev; 11214 struct device *dev = &bp->pdev->dev;
10795 11215
@@ -11055,7 +11475,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
11055 return 0; 11475 return 0;
11056} 11476}
11057 11477
11058static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 11478static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11059{ 11479{
11060 const __be32 *source = (const __be32 *)_source; 11480 const __be32 *source = (const __be32 *)_source;
11061 u32 *target = (u32 *)_target; 11481 u32 *target = (u32 *)_target;
@@ -11069,7 +11489,7 @@ static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11069 Ops array is stored in the following format: 11489 Ops array is stored in the following format:
11070 {op(8bit), offset(24bit, big endian), data(32bit, big endian)} 11490 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11071 */ 11491 */
11072static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) 11492static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11073{ 11493{
11074 const __be32 *source = (const __be32 *)_source; 11494 const __be32 *source = (const __be32 *)_source;
11075 struct raw_op *target = (struct raw_op *)_target; 11495 struct raw_op *target = (struct raw_op *)_target;
@@ -11087,7 +11507,7 @@ static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11087 * IRO array is stored in the following format: 11507 * IRO array is stored in the following format:
11088 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } 11508 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
11089 */ 11509 */
11090static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) 11510static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
11091{ 11511{
11092 const __be32 *source = (const __be32 *)_source; 11512 const __be32 *source = (const __be32 *)_source;
11093 struct iro *target = (struct iro *)_target; 11513 struct iro *target = (struct iro *)_target;
@@ -11107,7 +11527,7 @@ static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
11107 } 11527 }
11108} 11528}
11109 11529
11110static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 11530static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11111{ 11531{
11112 const __be16 *source = (const __be16 *)_source; 11532 const __be16 *source = (const __be16 *)_source;
11113 u16 *target = (u16 *)_target; 11533 u16 *target = (u16 *)_target;
@@ -11244,11 +11664,13 @@ void bnx2x__init_func_obj(struct bnx2x *bp)
11244 bnx2x_init_func_obj(bp, &bp->func_obj, 11664 bnx2x_init_func_obj(bp, &bp->func_obj,
11245 bnx2x_sp(bp, func_rdata), 11665 bnx2x_sp(bp, func_rdata),
11246 bnx2x_sp_mapping(bp, func_rdata), 11666 bnx2x_sp_mapping(bp, func_rdata),
11667 bnx2x_sp(bp, func_afex_rdata),
11668 bnx2x_sp_mapping(bp, func_afex_rdata),
11247 &bnx2x_func_sp_drv); 11669 &bnx2x_func_sp_drv);
11248} 11670}
11249 11671
11250/* must be called after sriov-enable */ 11672/* must be called after sriov-enable */
11251static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp) 11673static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
11252{ 11674{
11253 int cid_count = BNX2X_L2_CID_COUNT(bp); 11675 int cid_count = BNX2X_L2_CID_COUNT(bp);
11254 11676
@@ -11264,7 +11686,7 @@ static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp)
11264 * @dev: pci device 11686 * @dev: pci device
11265 * 11687 *
11266 */ 11688 */
11267static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev) 11689static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
11268{ 11690{
11269 int pos; 11691 int pos;
11270 u16 control; 11692 u16 control;
@@ -11325,6 +11747,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11325 case BCM57810_MF: 11747 case BCM57810_MF:
11326 case BCM57840: 11748 case BCM57840:
11327 case BCM57840_MF: 11749 case BCM57840_MF:
11750 case BCM57811:
11751 case BCM57811_MF:
11328 max_cos_est = BNX2X_MULTI_TX_COS_E3B0; 11752 max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
11329 break; 11753 break;
11330 11754
@@ -11738,7 +12162,7 @@ module_exit(bnx2x_cleanup);
11738 * This function will wait until the ramdord completion returns. 12162 * This function will wait until the ramdord completion returns.
11739 * Return 0 if success, -ENODEV if ramrod doesn't return. 12163 * Return 0 if success, -ENODEV if ramrod doesn't return.
11740 */ 12164 */
11741static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) 12165static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
11742{ 12166{
11743 unsigned long ramrod_flags = 0; 12167 unsigned long ramrod_flags = 0;
11744 12168
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index c25803b9c0ca..bbd387492a80 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1483,6 +1483,11 @@
1483 starts at 0x0 for the A0 tape-out and increments by one for each 1483 starts at 0x0 for the A0 tape-out and increments by one for each
1484 all-layer tape-out. */ 1484 all-layer tape-out. */
1485#define MISC_REG_CHIP_REV 0xa40c 1485#define MISC_REG_CHIP_REV 0xa40c
1486/* [R 14] otp_misc_do[100:0] spare bits collection: 13:11-
1487 * otp_misc_do[100:98]; 10:7 - otp_misc_do[87:84]; 6:3 - otp_misc_do[75:72];
1488 * 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. */
1489#define MISC_REG_CHIP_TYPE 0xac60
1490#define MISC_REG_CHIP_TYPE_57811_MASK (1<<1)
1486/* [RW 32] The following driver registers(1...16) represent 16 drivers and 1491/* [RW 32] The following driver registers(1...16) represent 16 drivers and
1487 32 clients. Each client can be controlled by one driver only. One in each 1492 32 clients. Each client can be controlled by one driver only. One in each
1488 bit represent that this driver control the appropriate client (Ex: bit 5 1493 bit represent that this driver control the appropriate client (Ex: bit 5
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 513573321625..6c14b4a4e82c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -633,14 +633,17 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
633} 633}
634 634
635 635
636static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp, 636void bnx2x_set_mac_in_nig(struct bnx2x *bp,
637 bool add, unsigned char *dev_addr, int index) 637 bool add, unsigned char *dev_addr, int index)
638{ 638{
639 u32 wb_data[2]; 639 u32 wb_data[2];
640 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : 640 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
641 NIG_REG_LLH0_FUNC_MEM; 641 NIG_REG_LLH0_FUNC_MEM;
642 642
643 if (!IS_MF_SI(bp) || index > BNX2X_LLH_CAM_MAX_PF_LINE) 643 if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
644 return;
645
646 if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
644 return; 647 return;
645 648
646 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n", 649 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
@@ -4090,12 +4093,6 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
4090 rss_mode = ETH_RSS_MODE_DISABLED; 4093 rss_mode = ETH_RSS_MODE_DISABLED;
4091 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags)) 4094 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4092 rss_mode = ETH_RSS_MODE_REGULAR; 4095 rss_mode = ETH_RSS_MODE_REGULAR;
4093 else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
4094 rss_mode = ETH_RSS_MODE_VLAN_PRI;
4095 else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
4096 rss_mode = ETH_RSS_MODE_E1HOV_PRI;
4097 else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
4098 rss_mode = ETH_RSS_MODE_IP_DSCP;
4099 4096
4100 data->rss_mode = rss_mode; 4097 data->rss_mode = rss_mode;
4101 4098
@@ -4404,6 +4401,9 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4404 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags); 4401 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4405 tx_data->anti_spoofing_flg = 4402 tx_data->anti_spoofing_flg =
4406 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); 4403 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4404 tx_data->force_default_pri_flg =
4405 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4406
4407 tx_data->tx_status_block_id = params->fw_sb_id; 4407 tx_data->tx_status_block_id = params->fw_sb_id;
4408 tx_data->tx_sb_index_number = params->sb_cq_index; 4408 tx_data->tx_sb_index_number = params->sb_cq_index;
4409 tx_data->tss_leading_client_id = params->tss_leading_cl_id; 4409 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
@@ -5331,6 +5331,17 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
5331 case BNX2X_F_STATE_STARTED: 5331 case BNX2X_F_STATE_STARTED:
5332 if (cmd == BNX2X_F_CMD_STOP) 5332 if (cmd == BNX2X_F_CMD_STOP)
5333 next_state = BNX2X_F_STATE_INITIALIZED; 5333 next_state = BNX2X_F_STATE_INITIALIZED;
5334 /* afex ramrods can be sent only in started mode, and only
5335 * if not pending for function_stop ramrod completion
5336 * for these events - next state remained STARTED.
5337 */
5338 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5339 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5340 next_state = BNX2X_F_STATE_STARTED;
5341
5342 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5343 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5344 next_state = BNX2X_F_STATE_STARTED;
5334 else if (cmd == BNX2X_F_CMD_TX_STOP) 5345 else if (cmd == BNX2X_F_CMD_TX_STOP)
5335 next_state = BNX2X_F_STATE_TX_STOPPED; 5346 next_state = BNX2X_F_STATE_TX_STOPPED;
5336 5347
@@ -5618,6 +5629,83 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
5618 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5629 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5619} 5630}
5620 5631
5632static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5633 struct bnx2x_func_state_params *params)
5634{
5635 struct bnx2x_func_sp_obj *o = params->f_obj;
5636 struct function_update_data *rdata =
5637 (struct function_update_data *)o->afex_rdata;
5638 dma_addr_t data_mapping = o->afex_rdata_mapping;
5639 struct bnx2x_func_afex_update_params *afex_update_params =
5640 &params->params.afex_update;
5641
5642 memset(rdata, 0, sizeof(*rdata));
5643
5644 /* Fill the ramrod data with provided parameters */
5645 rdata->vif_id_change_flg = 1;
5646 rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5647 rdata->afex_default_vlan_change_flg = 1;
5648 rdata->afex_default_vlan =
5649 cpu_to_le16(afex_update_params->afex_default_vlan);
5650 rdata->allowed_priorities_change_flg = 1;
5651 rdata->allowed_priorities = afex_update_params->allowed_priorities;
5652
5653 /* No need for an explicit memory barrier here as long we would
5654 * need to ensure the ordering of writing to the SPQ element
5655 * and updating of the SPQ producer which involves a memory
5656 * read and we will have to put a full memory barrier there
5657 * (inside bnx2x_sp_post()).
5658 */
5659 DP(BNX2X_MSG_SP,
5660 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5661 rdata->vif_id,
5662 rdata->afex_default_vlan, rdata->allowed_priorities);
5663
5664 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5665 U64_HI(data_mapping),
5666 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5667}
5668
5669static
5670inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5671 struct bnx2x_func_state_params *params)
5672{
5673 struct bnx2x_func_sp_obj *o = params->f_obj;
5674 struct afex_vif_list_ramrod_data *rdata =
5675 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5676 struct bnx2x_func_afex_viflists_params *afex_viflist_params =
5677 &params->params.afex_viflists;
5678 u64 *p_rdata = (u64 *)rdata;
5679
5680 memset(rdata, 0, sizeof(*rdata));
5681
5682 /* Fill the ramrod data with provided parameters */
5683 rdata->vif_list_index = afex_viflist_params->vif_list_index;
5684 rdata->func_bit_map = afex_viflist_params->func_bit_map;
5685 rdata->afex_vif_list_command =
5686 afex_viflist_params->afex_vif_list_command;
5687 rdata->func_to_clear = afex_viflist_params->func_to_clear;
5688
5689 /* send in echo type of sub command */
5690 rdata->echo = afex_viflist_params->afex_vif_list_command;
5691
5692 /* No need for an explicit memory barrier here as long we would
5693 * need to ensure the ordering of writing to the SPQ element
5694 * and updating of the SPQ producer which involves a memory
5695 * read and we will have to put a full memory barrier there
5696 * (inside bnx2x_sp_post()).
5697 */
5698
5699 DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5700 rdata->afex_vif_list_command, rdata->vif_list_index,
5701 rdata->func_bit_map, rdata->func_to_clear);
5702
5703 /* this ramrod sends data directly and not through DMA mapping */
5704 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5705 U64_HI(*p_rdata), U64_LO(*p_rdata),
5706 NONE_CONNECTION_TYPE);
5707}
5708
5621static inline int bnx2x_func_send_stop(struct bnx2x *bp, 5709static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5622 struct bnx2x_func_state_params *params) 5710 struct bnx2x_func_state_params *params)
5623{ 5711{
@@ -5669,6 +5757,10 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp,
5669 return bnx2x_func_send_stop(bp, params); 5757 return bnx2x_func_send_stop(bp, params);
5670 case BNX2X_F_CMD_HW_RESET: 5758 case BNX2X_F_CMD_HW_RESET:
5671 return bnx2x_func_hw_reset(bp, params); 5759 return bnx2x_func_hw_reset(bp, params);
5760 case BNX2X_F_CMD_AFEX_UPDATE:
5761 return bnx2x_func_send_afex_update(bp, params);
5762 case BNX2X_F_CMD_AFEX_VIFLISTS:
5763 return bnx2x_func_send_afex_viflists(bp, params);
5672 case BNX2X_F_CMD_TX_STOP: 5764 case BNX2X_F_CMD_TX_STOP:
5673 return bnx2x_func_send_tx_stop(bp, params); 5765 return bnx2x_func_send_tx_stop(bp, params);
5674 case BNX2X_F_CMD_TX_START: 5766 case BNX2X_F_CMD_TX_START:
@@ -5682,6 +5774,7 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp,
5682void bnx2x_init_func_obj(struct bnx2x *bp, 5774void bnx2x_init_func_obj(struct bnx2x *bp,
5683 struct bnx2x_func_sp_obj *obj, 5775 struct bnx2x_func_sp_obj *obj,
5684 void *rdata, dma_addr_t rdata_mapping, 5776 void *rdata, dma_addr_t rdata_mapping,
5777 void *afex_rdata, dma_addr_t afex_rdata_mapping,
5685 struct bnx2x_func_sp_drv_ops *drv_iface) 5778 struct bnx2x_func_sp_drv_ops *drv_iface)
5686{ 5779{
5687 memset(obj, 0, sizeof(*obj)); 5780 memset(obj, 0, sizeof(*obj));
@@ -5690,7 +5783,8 @@ void bnx2x_init_func_obj(struct bnx2x *bp,
5690 5783
5691 obj->rdata = rdata; 5784 obj->rdata = rdata;
5692 obj->rdata_mapping = rdata_mapping; 5785 obj->rdata_mapping = rdata_mapping;
5693 5786 obj->afex_rdata = afex_rdata;
5787 obj->afex_rdata_mapping = afex_rdata_mapping;
5694 obj->send_cmd = bnx2x_func_send_cmd; 5788 obj->send_cmd = bnx2x_func_send_cmd;
5695 obj->check_transition = bnx2x_func_chk_transition; 5789 obj->check_transition = bnx2x_func_chk_transition;
5696 obj->complete_cmd = bnx2x_func_comp_cmd; 5790 obj->complete_cmd = bnx2x_func_comp_cmd;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 61a7670adfcd..efd80bdd0dfe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -62,6 +62,8 @@ enum {
62 BNX2X_FILTER_MCAST_PENDING, 62 BNX2X_FILTER_MCAST_PENDING,
63 BNX2X_FILTER_MCAST_SCHED, 63 BNX2X_FILTER_MCAST_SCHED,
64 BNX2X_FILTER_RSS_CONF_PENDING, 64 BNX2X_FILTER_RSS_CONF_PENDING,
65 BNX2X_AFEX_FCOE_Q_UPDATE_PENDING,
66 BNX2X_AFEX_PENDING_VIFSET_MCP_ACK
65}; 67};
66 68
67struct bnx2x_raw_obj { 69struct bnx2x_raw_obj {
@@ -432,6 +434,8 @@ enum {
432 BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2 434 BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
433}; 435};
434 436
437void bnx2x_set_mac_in_nig(struct bnx2x *bp,
438 bool add, unsigned char *dev_addr, int index);
435 439
436/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ 440/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
437 441
@@ -685,9 +689,6 @@ enum {
685 /* RSS_MODE bits are mutually exclusive */ 689 /* RSS_MODE bits are mutually exclusive */
686 BNX2X_RSS_MODE_DISABLED, 690 BNX2X_RSS_MODE_DISABLED,
687 BNX2X_RSS_MODE_REGULAR, 691 BNX2X_RSS_MODE_REGULAR,
688 BNX2X_RSS_MODE_VLAN_PRI,
689 BNX2X_RSS_MODE_E1HOV_PRI,
690 BNX2X_RSS_MODE_IP_DSCP,
691 692
692 BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */ 693 BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
693 694
@@ -801,7 +802,8 @@ enum {
801 BNX2X_Q_FLG_TX_SWITCH, 802 BNX2X_Q_FLG_TX_SWITCH,
802 BNX2X_Q_FLG_TX_SEC, 803 BNX2X_Q_FLG_TX_SEC,
803 BNX2X_Q_FLG_ANTI_SPOOF, 804 BNX2X_Q_FLG_ANTI_SPOOF,
804 BNX2X_Q_FLG_SILENT_VLAN_REM 805 BNX2X_Q_FLG_SILENT_VLAN_REM,
806 BNX2X_Q_FLG_FORCE_DEFAULT_PRI
805}; 807};
806 808
807/* Queue type options: queue type may be a compination of below. */ 809/* Queue type options: queue type may be a compination of below. */
@@ -963,6 +965,11 @@ struct bnx2x_queue_state_params {
963 } params; 965 } params;
964}; 966};
965 967
968struct bnx2x_viflist_params {
969 u8 echo_res;
970 u8 func_bit_map_res;
971};
972
966struct bnx2x_queue_sp_obj { 973struct bnx2x_queue_sp_obj {
967 u32 cids[BNX2X_MULTI_TX_COS]; 974 u32 cids[BNX2X_MULTI_TX_COS];
968 u8 cl_id; 975 u8 cl_id;
@@ -1045,6 +1052,8 @@ enum bnx2x_func_cmd {
1045 BNX2X_F_CMD_START, 1052 BNX2X_F_CMD_START,
1046 BNX2X_F_CMD_STOP, 1053 BNX2X_F_CMD_STOP,
1047 BNX2X_F_CMD_HW_RESET, 1054 BNX2X_F_CMD_HW_RESET,
1055 BNX2X_F_CMD_AFEX_UPDATE,
1056 BNX2X_F_CMD_AFEX_VIFLISTS,
1048 BNX2X_F_CMD_TX_STOP, 1057 BNX2X_F_CMD_TX_STOP,
1049 BNX2X_F_CMD_TX_START, 1058 BNX2X_F_CMD_TX_START,
1050 BNX2X_F_CMD_MAX, 1059 BNX2X_F_CMD_MAX,
@@ -1089,6 +1098,18 @@ struct bnx2x_func_start_params {
1089 u8 network_cos_mode; 1098 u8 network_cos_mode;
1090}; 1099};
1091 1100
1101struct bnx2x_func_afex_update_params {
1102 u16 vif_id;
1103 u16 afex_default_vlan;
1104 u8 allowed_priorities;
1105};
1106
1107struct bnx2x_func_afex_viflists_params {
1108 u16 vif_list_index;
1109 u8 func_bit_map;
1110 u8 afex_vif_list_command;
1111 u8 func_to_clear;
1112};
1092struct bnx2x_func_tx_start_params { 1113struct bnx2x_func_tx_start_params {
1093 struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; 1114 struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
1094 u8 dcb_enabled; 1115 u8 dcb_enabled;
@@ -1110,6 +1131,8 @@ struct bnx2x_func_state_params {
1110 struct bnx2x_func_hw_init_params hw_init; 1131 struct bnx2x_func_hw_init_params hw_init;
1111 struct bnx2x_func_hw_reset_params hw_reset; 1132 struct bnx2x_func_hw_reset_params hw_reset;
1112 struct bnx2x_func_start_params start; 1133 struct bnx2x_func_start_params start;
1134 struct bnx2x_func_afex_update_params afex_update;
1135 struct bnx2x_func_afex_viflists_params afex_viflists;
1113 struct bnx2x_func_tx_start_params tx_start; 1136 struct bnx2x_func_tx_start_params tx_start;
1114 } params; 1137 } params;
1115}; 1138};
@@ -1154,6 +1177,13 @@ struct bnx2x_func_sp_obj {
1154 void *rdata; 1177 void *rdata;
1155 dma_addr_t rdata_mapping; 1178 dma_addr_t rdata_mapping;
1156 1179
1180 /* Buffer to use as a afex ramrod data and its mapping.
1181 * This can't be same rdata as above because afex ramrod requests
1182 * can arrive to the object in parallel to other ramrod requests.
1183 */
1184 void *afex_rdata;
1185 dma_addr_t afex_rdata_mapping;
1186
1157 /* this mutex validates that when pending flag is taken, the next 1187 /* this mutex validates that when pending flag is taken, the next
1158 * ramrod to be sent will be the one set the pending bit 1188 * ramrod to be sent will be the one set the pending bit
1159 */ 1189 */
@@ -1197,6 +1227,7 @@ union bnx2x_qable_obj {
1197void bnx2x_init_func_obj(struct bnx2x *bp, 1227void bnx2x_init_func_obj(struct bnx2x *bp,
1198 struct bnx2x_func_sp_obj *obj, 1228 struct bnx2x_func_sp_obj *obj,
1199 void *rdata, dma_addr_t rdata_mapping, 1229 void *rdata, dma_addr_t rdata_mapping,
1230 void *afex_rdata, dma_addr_t afex_rdata_mapping,
1200 struct bnx2x_func_sp_drv_ops *drv_iface); 1231 struct bnx2x_func_sp_drv_ops *drv_iface);
1201 1232
1202int bnx2x_func_state_change(struct bnx2x *bp, 1233int bnx2x_func_state_change(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index e1c9310fb07c..1e2785cd11d0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1316,7 +1316,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1316 * 1316 *
1317 * @param bp 1317 * @param bp
1318 */ 1318 */
1319static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) 1319static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
1320{ 1320{
1321 int i; 1321 int i;
1322 int first_queue_query_index; 1322 int first_queue_query_index;
@@ -1561,3 +1561,274 @@ void bnx2x_save_statistics(struct bnx2x *bp)
1561 UPDATE_FW_STAT_OLD(mac_discard); 1561 UPDATE_FW_STAT_OLD(mac_discard);
1562 } 1562 }
1563} 1563}
1564
1565void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1566 u32 stats_type)
1567{
1568 int i;
1569 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1570 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1571 struct per_queue_stats *fcoe_q_stats =
1572 &bp->fw_stats_data->queue_stats[FCOE_IDX];
1573
1574 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
1575 &fcoe_q_stats->tstorm_queue_statistics;
1576
1577 struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
1578 &fcoe_q_stats->ustorm_queue_statistics;
1579
1580 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
1581 &fcoe_q_stats->xstorm_queue_statistics;
1582
1583 struct fcoe_statistics_params *fw_fcoe_stat =
1584 &bp->fw_stats_data->fcoe;
1585
1586 memset(afex_stats, 0, sizeof(struct afex_stats));
1587
1588 for_each_eth_queue(bp, i) {
1589 struct bnx2x_fastpath *fp = &bp->fp[i];
1590 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1591
1592 ADD_64(afex_stats->rx_unicast_bytes_hi,
1593 qstats->total_unicast_bytes_received_hi,
1594 afex_stats->rx_unicast_bytes_lo,
1595 qstats->total_unicast_bytes_received_lo);
1596
1597 ADD_64(afex_stats->rx_broadcast_bytes_hi,
1598 qstats->total_broadcast_bytes_received_hi,
1599 afex_stats->rx_broadcast_bytes_lo,
1600 qstats->total_broadcast_bytes_received_lo);
1601
1602 ADD_64(afex_stats->rx_multicast_bytes_hi,
1603 qstats->total_multicast_bytes_received_hi,
1604 afex_stats->rx_multicast_bytes_lo,
1605 qstats->total_multicast_bytes_received_lo);
1606
1607 ADD_64(afex_stats->rx_unicast_frames_hi,
1608 qstats->total_unicast_packets_received_hi,
1609 afex_stats->rx_unicast_frames_lo,
1610 qstats->total_unicast_packets_received_lo);
1611
1612 ADD_64(afex_stats->rx_broadcast_frames_hi,
1613 qstats->total_broadcast_packets_received_hi,
1614 afex_stats->rx_broadcast_frames_lo,
1615 qstats->total_broadcast_packets_received_lo);
1616
1617 ADD_64(afex_stats->rx_multicast_frames_hi,
1618 qstats->total_multicast_packets_received_hi,
1619 afex_stats->rx_multicast_frames_lo,
1620 qstats->total_multicast_packets_received_lo);
1621
1622 /* sum to rx_frames_discarded all discraded
1623 * packets due to size, ttl0 and checksum
1624 */
1625 ADD_64(afex_stats->rx_frames_discarded_hi,
1626 qstats->total_packets_received_checksum_discarded_hi,
1627 afex_stats->rx_frames_discarded_lo,
1628 qstats->total_packets_received_checksum_discarded_lo);
1629
1630 ADD_64(afex_stats->rx_frames_discarded_hi,
1631 qstats->total_packets_received_ttl0_discarded_hi,
1632 afex_stats->rx_frames_discarded_lo,
1633 qstats->total_packets_received_ttl0_discarded_lo);
1634
1635 ADD_64(afex_stats->rx_frames_discarded_hi,
1636 qstats->etherstatsoverrsizepkts_hi,
1637 afex_stats->rx_frames_discarded_lo,
1638 qstats->etherstatsoverrsizepkts_lo);
1639
1640 ADD_64(afex_stats->rx_frames_dropped_hi,
1641 qstats->no_buff_discard_hi,
1642 afex_stats->rx_frames_dropped_lo,
1643 qstats->no_buff_discard_lo);
1644
1645 ADD_64(afex_stats->tx_unicast_bytes_hi,
1646 qstats->total_unicast_bytes_transmitted_hi,
1647 afex_stats->tx_unicast_bytes_lo,
1648 qstats->total_unicast_bytes_transmitted_lo);
1649
1650 ADD_64(afex_stats->tx_broadcast_bytes_hi,
1651 qstats->total_broadcast_bytes_transmitted_hi,
1652 afex_stats->tx_broadcast_bytes_lo,
1653 qstats->total_broadcast_bytes_transmitted_lo);
1654
1655 ADD_64(afex_stats->tx_multicast_bytes_hi,
1656 qstats->total_multicast_bytes_transmitted_hi,
1657 afex_stats->tx_multicast_bytes_lo,
1658 qstats->total_multicast_bytes_transmitted_lo);
1659
1660 ADD_64(afex_stats->tx_unicast_frames_hi,
1661 qstats->total_unicast_packets_transmitted_hi,
1662 afex_stats->tx_unicast_frames_lo,
1663 qstats->total_unicast_packets_transmitted_lo);
1664
1665 ADD_64(afex_stats->tx_broadcast_frames_hi,
1666 qstats->total_broadcast_packets_transmitted_hi,
1667 afex_stats->tx_broadcast_frames_lo,
1668 qstats->total_broadcast_packets_transmitted_lo);
1669
1670 ADD_64(afex_stats->tx_multicast_frames_hi,
1671 qstats->total_multicast_packets_transmitted_hi,
1672 afex_stats->tx_multicast_frames_lo,
1673 qstats->total_multicast_packets_transmitted_lo);
1674
1675 ADD_64(afex_stats->tx_frames_dropped_hi,
1676 qstats->total_transmitted_dropped_packets_error_hi,
1677 afex_stats->tx_frames_dropped_lo,
1678 qstats->total_transmitted_dropped_packets_error_lo);
1679 }
1680
1681 /* now add FCoE statistics which are collected separately
1682 * (both offloaded and non offloaded)
1683 */
1684 if (!NO_FCOE(bp)) {
1685 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1686 LE32_0,
1687 afex_stats->rx_unicast_bytes_lo,
1688 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
1689
1690 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1691 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
1692 afex_stats->rx_unicast_bytes_lo,
1693 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
1694
1695 ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
1696 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
1697 afex_stats->rx_broadcast_bytes_lo,
1698 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
1699
1700 ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
1701 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
1702 afex_stats->rx_multicast_bytes_lo,
1703 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
1704
1705 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1706 LE32_0,
1707 afex_stats->rx_unicast_frames_lo,
1708 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
1709
1710 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1711 LE32_0,
1712 afex_stats->rx_unicast_frames_lo,
1713 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1714
1715 ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
1716 LE32_0,
1717 afex_stats->rx_broadcast_frames_lo,
1718 fcoe_q_tstorm_stats->rcv_bcast_pkts);
1719
1720 ADD_64_LE(afex_stats->rx_multicast_frames_hi,
1721 LE32_0,
1722 afex_stats->rx_multicast_frames_lo,
1723 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1724
1725 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1726 LE32_0,
1727 afex_stats->rx_frames_discarded_lo,
1728 fcoe_q_tstorm_stats->checksum_discard);
1729
1730 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1731 LE32_0,
1732 afex_stats->rx_frames_discarded_lo,
1733 fcoe_q_tstorm_stats->pkts_too_big_discard);
1734
1735 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1736 LE32_0,
1737 afex_stats->rx_frames_discarded_lo,
1738 fcoe_q_tstorm_stats->ttl0_discard);
1739
1740 ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
1741 LE16_0,
1742 afex_stats->rx_frames_dropped_lo,
1743 fcoe_q_tstorm_stats->no_buff_discard);
1744
1745 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1746 LE32_0,
1747 afex_stats->rx_frames_dropped_lo,
1748 fcoe_q_ustorm_stats->ucast_no_buff_pkts);
1749
1750 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1751 LE32_0,
1752 afex_stats->rx_frames_dropped_lo,
1753 fcoe_q_ustorm_stats->mcast_no_buff_pkts);
1754
1755 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1756 LE32_0,
1757 afex_stats->rx_frames_dropped_lo,
1758 fcoe_q_ustorm_stats->bcast_no_buff_pkts);
1759
1760 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1761 LE32_0,
1762 afex_stats->rx_frames_dropped_lo,
1763 fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
1764
1765 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1766 LE32_0,
1767 afex_stats->rx_frames_dropped_lo,
1768 fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
1769
1770 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1771 LE32_0,
1772 afex_stats->tx_unicast_bytes_lo,
1773 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
1774
1775 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1776 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
1777 afex_stats->tx_unicast_bytes_lo,
1778 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
1779
1780 ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
1781 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
1782 afex_stats->tx_broadcast_bytes_lo,
1783 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
1784
1785 ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
1786 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
1787 afex_stats->tx_multicast_bytes_lo,
1788 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
1789
1790 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1791 LE32_0,
1792 afex_stats->tx_unicast_frames_lo,
1793 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
1794
1795 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1796 LE32_0,
1797 afex_stats->tx_unicast_frames_lo,
1798 fcoe_q_xstorm_stats->ucast_pkts_sent);
1799
1800 ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
1801 LE32_0,
1802 afex_stats->tx_broadcast_frames_lo,
1803 fcoe_q_xstorm_stats->bcast_pkts_sent);
1804
1805 ADD_64_LE(afex_stats->tx_multicast_frames_hi,
1806 LE32_0,
1807 afex_stats->tx_multicast_frames_lo,
1808 fcoe_q_xstorm_stats->mcast_pkts_sent);
1809
1810 ADD_64_LE(afex_stats->tx_frames_dropped_hi,
1811 LE32_0,
1812 afex_stats->tx_frames_dropped_lo,
1813 fcoe_q_xstorm_stats->error_drop_pkts);
1814 }
1815
1816 /* if port stats are requested, add them to the PMF
1817 * stats, as anyway they will be accumulated by the
1818 * MCP before sent to the switch
1819 */
1820 if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
1821 ADD_64(afex_stats->rx_frames_dropped_hi,
1822 0,
1823 afex_stats->rx_frames_dropped_lo,
1824 estats->mac_filter_discard);
1825 ADD_64(afex_stats->rx_frames_dropped_hi,
1826 0,
1827 afex_stats->rx_frames_dropped_lo,
1828 estats->brb_truncate_discard);
1829 ADD_64(afex_stats->rx_frames_discarded_hi,
1830 0,
1831 afex_stats->rx_frames_discarded_lo,
1832 estats->mac_discard);
1833 }
1834}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 2b46e1eb7fd1..93e689fdfeda 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -338,6 +338,18 @@ struct bnx2x_fw_port_stats_old {
338 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ 338 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
339 } while (0) 339 } while (0)
340 340
341#define LE32_0 ((__force __le32) 0)
342#define LE16_0 ((__force __le16) 0)
343
344/* The _force is for cases where high value is 0 */
345#define ADD_64_LE(s_hi, a_hi_le, s_lo, a_lo_le) \
346 ADD_64(s_hi, le32_to_cpu(a_hi_le), \
347 s_lo, le32_to_cpu(a_lo_le))
348
349#define ADD_64_LE16(s_hi, a_hi_le, s_lo, a_lo_le) \
350 ADD_64(s_hi, le16_to_cpu(a_hi_le), \
351 s_lo, le16_to_cpu(a_lo_le))
352
341/* difference = minuend - subtrahend */ 353/* difference = minuend - subtrahend */
342#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ 354#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
343 do { \ 355 do { \
@@ -529,4 +541,7 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
529 * @bp: driver handle 541 * @bp: driver handle
530 */ 542 */
531void bnx2x_save_statistics(struct bnx2x *bp); 543void bnx2x_save_statistics(struct bnx2x *bp);
544
545void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
546 u32 stats_type);
532#endif /* BNX2X_STATS_H */ 547#endif /* BNX2X_STATS_H */
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 062ac333fde6..d55df3290174 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -879,8 +879,13 @@ static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
879 if (sblk->status & SD_STATUS_LINK_CHG) 879 if (sblk->status & SD_STATUS_LINK_CHG)
880 work_exists = 1; 880 work_exists = 1;
881 } 881 }
882 /* check for RX/TX work to do */ 882
883 if (sblk->idx[0].tx_consumer != tnapi->tx_cons || 883 /* check for TX work to do */
884 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
885 work_exists = 1;
886
887 /* check for RX work to do */
888 if (tnapi->rx_rcb_prod_idx &&
884 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 889 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885 work_exists = 1; 890 work_exists = 1;
886 891
@@ -5617,17 +5622,29 @@ static void tg3_tx(struct tg3_napi *tnapi)
5617 } 5622 }
5618} 5623}
5619 5624
5625static void tg3_frag_free(bool is_frag, void *data)
5626{
5627 if (is_frag)
5628 put_page(virt_to_head_page(data));
5629 else
5630 kfree(data);
5631}
5632
5620static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) 5633static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5621{ 5634{
5635 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5636 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5637
5622 if (!ri->data) 5638 if (!ri->data)
5623 return; 5639 return;
5624 5640
5625 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), 5641 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5626 map_sz, PCI_DMA_FROMDEVICE); 5642 map_sz, PCI_DMA_FROMDEVICE);
5627 kfree(ri->data); 5643 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5628 ri->data = NULL; 5644 ri->data = NULL;
5629} 5645}
5630 5646
5647
5631/* Returns size of skb allocated or < 0 on error. 5648/* Returns size of skb allocated or < 0 on error.
5632 * 5649 *
5633 * We only need to fill in the address because the other members 5650 * We only need to fill in the address because the other members
@@ -5640,7 +5657,8 @@ static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5640 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 5657 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5641 */ 5658 */
5642static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, 5659static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5643 u32 opaque_key, u32 dest_idx_unmasked) 5660 u32 opaque_key, u32 dest_idx_unmasked,
5661 unsigned int *frag_size)
5644{ 5662{
5645 struct tg3_rx_buffer_desc *desc; 5663 struct tg3_rx_buffer_desc *desc;
5646 struct ring_info *map; 5664 struct ring_info *map;
@@ -5675,7 +5693,13 @@ static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5675 */ 5693 */
5676 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + 5694 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5677 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 5695 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5678 data = kmalloc(skb_size, GFP_ATOMIC); 5696 if (skb_size <= PAGE_SIZE) {
5697 data = netdev_alloc_frag(skb_size);
5698 *frag_size = skb_size;
5699 } else {
5700 data = kmalloc(skb_size, GFP_ATOMIC);
5701 *frag_size = 0;
5702 }
5679 if (!data) 5703 if (!data)
5680 return -ENOMEM; 5704 return -ENOMEM;
5681 5705
@@ -5683,8 +5707,8 @@ static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5683 data + TG3_RX_OFFSET(tp), 5707 data + TG3_RX_OFFSET(tp),
5684 data_size, 5708 data_size,
5685 PCI_DMA_FROMDEVICE); 5709 PCI_DMA_FROMDEVICE);
5686 if (pci_dma_mapping_error(tp->pdev, mapping)) { 5710 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
5687 kfree(data); 5711 tg3_frag_free(skb_size <= PAGE_SIZE, data);
5688 return -EIO; 5712 return -EIO;
5689 } 5713 }
5690 5714
@@ -5835,18 +5859,19 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
5835 5859
5836 if (len > TG3_RX_COPY_THRESH(tp)) { 5860 if (len > TG3_RX_COPY_THRESH(tp)) {
5837 int skb_size; 5861 int skb_size;
5862 unsigned int frag_size;
5838 5863
5839 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, 5864 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5840 *post_ptr); 5865 *post_ptr, &frag_size);
5841 if (skb_size < 0) 5866 if (skb_size < 0)
5842 goto drop_it; 5867 goto drop_it;
5843 5868
5844 pci_unmap_single(tp->pdev, dma_addr, skb_size, 5869 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5845 PCI_DMA_FROMDEVICE); 5870 PCI_DMA_FROMDEVICE);
5846 5871
5847 skb = build_skb(data); 5872 skb = build_skb(data, frag_size);
5848 if (!skb) { 5873 if (!skb) {
5849 kfree(data); 5874 tg3_frag_free(frag_size != 0, data);
5850 goto drop_it_no_recycle; 5875 goto drop_it_no_recycle;
5851 } 5876 }
5852 skb_reserve(skb, TG3_RX_OFFSET(tp)); 5877 skb_reserve(skb, TG3_RX_OFFSET(tp));
@@ -6124,6 +6149,9 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6124 return work_done; 6149 return work_done;
6125 } 6150 }
6126 6151
6152 if (!tnapi->rx_rcb_prod_idx)
6153 return work_done;
6154
6127 /* run RX thread, within the bounds set by NAPI. 6155 /* run RX thread, within the bounds set by NAPI.
6128 * All RX "locking" is done by ensuring outside 6156 * All RX "locking" is done by ensuring outside
6129 * code synchronizes with tg3->napi.poll() 6157 * code synchronizes with tg3->napi.poll()
@@ -7279,7 +7307,10 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
7279 7307
7280 /* Now allocate fresh SKBs for each rx ring. */ 7308 /* Now allocate fresh SKBs for each rx ring. */
7281 for (i = 0; i < tp->rx_pending; i++) { 7309 for (i = 0; i < tp->rx_pending; i++) {
7282 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { 7310 unsigned int frag_size;
7311
7312 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7313 &frag_size) < 0) {
7283 netdev_warn(tp->dev, 7314 netdev_warn(tp->dev,
7284 "Using a smaller RX standard ring. Only " 7315 "Using a smaller RX standard ring. Only "
7285 "%d out of %d buffers were allocated " 7316 "%d out of %d buffers were allocated "
@@ -7311,7 +7342,10 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
7311 } 7342 }
7312 7343
7313 for (i = 0; i < tp->rx_jumbo_pending; i++) { 7344 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7314 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { 7345 unsigned int frag_size;
7346
7347 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7348 &frag_size) < 0) {
7315 netdev_warn(tp->dev, 7349 netdev_warn(tp->dev,
7316 "Using a smaller RX jumbo ring. Only %d " 7350 "Using a smaller RX jumbo ring. Only %d "
7317 "out of %d buffers were allocated " 7351 "out of %d buffers were allocated "
@@ -7567,6 +7601,12 @@ static int tg3_alloc_consistent(struct tg3 *tp)
7567 */ 7601 */
7568 switch (i) { 7602 switch (i) {
7569 default: 7603 default:
7604 if (tg3_flag(tp, ENABLE_RSS)) {
7605 tnapi->rx_rcb_prod_idx = NULL;
7606 break;
7607 }
7608 /* Fall through */
7609 case 1:
7570 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; 7610 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7571 break; 7611 break;
7572 case 2: 7612 case 2:
@@ -12234,6 +12274,7 @@ static const struct ethtool_ops tg3_ethtool_ops = {
12234 .get_rxfh_indir_size = tg3_get_rxfh_indir_size, 12274 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12235 .get_rxfh_indir = tg3_get_rxfh_indir, 12275 .get_rxfh_indir = tg3_get_rxfh_indir,
12236 .set_rxfh_indir = tg3_set_rxfh_indir, 12276 .set_rxfh_indir = tg3_set_rxfh_indir,
12277 .get_ts_info = ethtool_op_get_ts_info,
12237}; 12278};
12238 12279
12239static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, 12280static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 77977d735dd7..0b640fafbda3 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -70,7 +70,6 @@ static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
70static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc); 70static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
71static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc); 71static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
72static void bfa_ioc_recover(struct bfa_ioc *ioc); 72static void bfa_ioc_recover(struct bfa_ioc *ioc);
73static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
74static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event); 73static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
75static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); 74static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
76static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); 75static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
@@ -346,8 +345,6 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
346 switch (event) { 345 switch (event) {
347 case IOC_E_FWRSP_GETATTR: 346 case IOC_E_FWRSP_GETATTR:
348 del_timer(&ioc->ioc_timer); 347 del_timer(&ioc->ioc_timer);
349 bfa_ioc_check_attr_wwns(ioc);
350 bfa_ioc_hb_monitor(ioc);
351 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 348 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
352 break; 349 break;
353 350
@@ -380,6 +377,7 @@ bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
380{ 377{
381 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); 378 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
382 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); 379 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
380 bfa_ioc_hb_monitor(ioc);
383} 381}
384 382
385static void 383static void
@@ -1207,27 +1205,62 @@ bfa_nw_ioc_sem_release(void __iomem *sem_reg)
1207 writel(1, sem_reg); 1205 writel(1, sem_reg);
1208} 1206}
1209 1207
1208/* Clear fwver hdr */
1209static void
1210bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
1211{
1212 u32 pgnum, pgoff, loff = 0;
1213 int i;
1214
1215 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1216 pgoff = PSS_SMEM_PGOFF(loff);
1217 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1218
1219 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
1220 writel(0, ioc->ioc_regs.smem_page_start + loff);
1221 loff += sizeof(u32);
1222 }
1223}
1224
1225
1210static void 1226static void
1211bfa_ioc_hw_sem_init(struct bfa_ioc *ioc) 1227bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
1212{ 1228{
1213 struct bfi_ioc_image_hdr fwhdr; 1229 struct bfi_ioc_image_hdr fwhdr;
1214 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate); 1230 u32 fwstate, r32;
1215 1231
1216 if (fwstate == BFI_IOC_UNINIT) 1232 /* Spin on init semaphore to serialize. */
1233 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1234 while (r32 & 0x1) {
1235 udelay(20);
1236 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1237 }
1238
1239 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1240 if (fwstate == BFI_IOC_UNINIT) {
1241 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1217 return; 1242 return;
1243 }
1218 1244
1219 bfa_nw_ioc_fwver_get(ioc, &fwhdr); 1245 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1220 1246
1221 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) 1247 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
1248 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1222 return; 1249 return;
1250 }
1223 1251
1252 bfa_ioc_fwver_clear(ioc);
1224 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); 1253 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
1254 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
1225 1255
1226 /* 1256 /*
1227 * Try to lock and then unlock the semaphore. 1257 * Try to lock and then unlock the semaphore.
1228 */ 1258 */
1229 readl(ioc->ioc_regs.ioc_sem_reg); 1259 readl(ioc->ioc_regs.ioc_sem_reg);
1230 writel(1, ioc->ioc_regs.ioc_sem_reg); 1260 writel(1, ioc->ioc_regs.ioc_sem_reg);
1261
1262 /* Unlock init semaphore */
1263 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1231} 1264}
1232 1265
1233static void 1266static void
@@ -1585,11 +1618,6 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1585 u32 i; 1618 u32 i;
1586 u32 asicmode; 1619 u32 asicmode;
1587 1620
1588 /**
1589 * Initialize LMEM first before code download
1590 */
1591 bfa_ioc_lmem_init(ioc);
1592
1593 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno); 1621 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1594 1622
1595 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1623 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
@@ -1914,6 +1942,10 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
1914 bfa_ioc_pll_init_asic(ioc); 1942 bfa_ioc_pll_init_asic(ioc);
1915 1943
1916 ioc->pllinit = true; 1944 ioc->pllinit = true;
1945
1946 /* Initialize LMEM */
1947 bfa_ioc_lmem_init(ioc);
1948
1917 /* 1949 /*
1918 * release semaphore. 1950 * release semaphore.
1919 */ 1951 */
@@ -2513,13 +2545,6 @@ bfa_ioc_recover(struct bfa_ioc *ioc)
2513 bfa_fsm_send_event(ioc, IOC_E_HBFAIL); 2545 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2514} 2546}
2515 2547
2516static void
2517bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
2518{
2519 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2520 return;
2521}
2522
2523/** 2548/**
2524 * @dg hal_iocpf_pvt BFA IOC PF private functions 2549 * @dg hal_iocpf_pvt BFA IOC PF private functions
2525 * @{ 2550 * @{
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index 348479bbfa3a..b6b036a143ae 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -199,9 +199,9 @@ bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
199 * Host to LPU mailbox message addresses 199 * Host to LPU mailbox message addresses
200 */ 200 */
201static const struct { 201static const struct {
202 u32 hfn_mbox; 202 u32 hfn_mbox;
203 u32 lpu_mbox; 203 u32 lpu_mbox;
204 u32 hfn_pgn; 204 u32 hfn_pgn;
205} ct_fnreg[] = { 205} ct_fnreg[] = {
206 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, 206 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
207 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, 207 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
@@ -803,17 +803,72 @@ bfa_ioc_ct2_mac_reset(void __iomem *rb)
803} 803}
804 804
805#define CT2_NFC_MAX_DELAY 1000 805#define CT2_NFC_MAX_DELAY 1000
806#define CT2_NFC_VER_VALID 0x143
807#define BFA_IOC_PLL_POLL 1000000
808
809static bool
810bfa_ioc_ct2_nfc_halted(void __iomem *rb)
811{
812 volatile u32 r32;
813
814 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
815 if (r32 & __NFC_CONTROLLER_HALTED)
816 return true;
817
818 return false;
819}
820
821static void
822bfa_ioc_ct2_nfc_resume(void __iomem *rb)
823{
824 volatile u32 r32;
825 int i;
826
827 writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
828 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
829 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
830 if (!(r32 & __NFC_CONTROLLER_HALTED))
831 return;
832 udelay(1000);
833 }
834 BUG_ON(1);
835}
836
806static enum bfa_status 837static enum bfa_status
807bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) 838bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
808{ 839{
809 volatile u32 wgn, r32; 840 volatile u32 wgn, r32;
810 int i; 841 u32 nfc_ver, i;
811 842
812 /*
813 * Initialize PLL if not already done by NFC
814 */
815 wgn = readl(rb + CT2_WGN_STATUS); 843 wgn = readl(rb + CT2_WGN_STATUS);
816 if (!(wgn & __GLBL_PF_VF_CFG_RDY)) { 844
845 nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
846
847 if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
848 (nfc_ver >= CT2_NFC_VER_VALID)) {
849 if (bfa_ioc_ct2_nfc_halted(rb))
850 bfa_ioc_ct2_nfc_resume(rb);
851 writel(__RESET_AND_START_SCLK_LCLK_PLLS,
852 rb + CT2_CSI_FW_CTL_SET_REG);
853
854 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
855 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
856 if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
857 break;
858 }
859 BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
860
861 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
862 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
863 if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
864 break;
865 }
866 BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
867 udelay(1000);
868
869 r32 = readl(rb + CT2_CSI_FW_CTL_REG);
870 BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
871 } else {
817 writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG)); 872 writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
818 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { 873 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
819 r32 = readl(rb + CT2_NFC_CSR_SET_REG); 874 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
@@ -821,53 +876,48 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
821 break; 876 break;
822 udelay(1000); 877 udelay(1000);
823 } 878 }
879
880 bfa_ioc_ct2_mac_reset(rb);
881 bfa_ioc_ct2_sclk_init(rb);
882 bfa_ioc_ct2_lclk_init(rb);
883
884 /* release soft reset on s_clk & l_clk */
885 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
886 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
887 rb + CT2_APP_PLL_SCLK_CTL_REG);
888 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
889 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
890 rb + CT2_APP_PLL_LCLK_CTL_REG);
891 }
892
893 /* Announce flash device presence, if flash was corrupted. */
894 if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
895 r32 = readl((rb + PSS_GPIO_OUT_REG));
896 writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
897 r32 = readl((rb + PSS_GPIO_OE_REG));
898 writel(r32 | 1, rb + PSS_GPIO_OE_REG);
824 } 899 }
825 900
826 /* 901 /*
827 * Mask the interrupts and clear any 902 * Mask the interrupts and clear any
828 * pending interrupts left by BIOS/EFI 903 * pending interrupts left by BIOS/EFI
829 */ 904 */
830
831 writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); 905 writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
832 writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); 906 writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
833 907
834 r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); 908 /* For first time initialization, no need to clear interrupts */
835 if (r32 == 1) { 909 r32 = readl(rb + HOST_SEM5_REG);
836 writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); 910 if (r32 & 0x1) {
837 readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); 911 r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
838 } 912 if (r32 == 1) {
839 r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); 913 writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
840 if (r32 == 1) { 914 readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
841 writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT)); 915 }
842 readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); 916 r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
843 } 917 if (r32 == 1) {
844 918 writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
845 bfa_ioc_ct2_mac_reset(rb); 919 readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
846 bfa_ioc_ct2_sclk_init(rb); 920 }
847 bfa_ioc_ct2_lclk_init(rb);
848
849 /*
850 * release soft reset on s_clk & l_clk
851 */
852 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
853 writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
854 (rb + CT2_APP_PLL_SCLK_CTL_REG));
855
856 /*
857 * release soft reset on s_clk & l_clk
858 */
859 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
860 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
861 (rb + CT2_APP_PLL_LCLK_CTL_REG));
862
863 /*
864 * Announce flash device presence, if flash was corrupted.
865 */
866 if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
867 r32 = readl((rb + PSS_GPIO_OUT_REG));
868 writel((r32 & ~1), (rb + PSS_GPIO_OUT_REG));
869 r32 = readl((rb + PSS_GPIO_OE_REG));
870 writel((r32 | 1), (rb + PSS_GPIO_OE_REG));
871 } 921 }
872 922
873 bfa_ioc_ct2_mem_init(rb); 923 bfa_ioc_ct2_mem_init(rb);
diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h
index efacff3ab51d..0e094fe46dfd 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_reg.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h
@@ -339,10 +339,16 @@ enum {
339#define __A2T_AHB_LOAD 0x00000800 339#define __A2T_AHB_LOAD 0x00000800
340#define __WGN_READY 0x00000400 340#define __WGN_READY 0x00000400
341#define __GLBL_PF_VF_CFG_RDY 0x00000200 341#define __GLBL_PF_VF_CFG_RDY 0x00000200
342#define CT2_NFC_CSR_CLR_REG 0x00027420
342#define CT2_NFC_CSR_SET_REG 0x00027424 343#define CT2_NFC_CSR_SET_REG 0x00027424
343#define __HALT_NFC_CONTROLLER 0x00000002 344#define __HALT_NFC_CONTROLLER 0x00000002
344#define __NFC_CONTROLLER_HALTED 0x00001000 345#define __NFC_CONTROLLER_HALTED 0x00001000
345 346
347#define CT2_RSC_GPR15_REG 0x0002765c
348#define CT2_CSI_FW_CTL_REG 0x00027080
349#define __RESET_AND_START_SCLK_LCLK_PLLS 0x00010000
350#define CT2_CSI_FW_CTL_SET_REG 0x00027088
351
346#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0 352#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0
347#define __CSI_MAC_RESET 0x00000010 353#define __CSI_MAC_RESET 0x00000010
348#define __CSI_MAC_AHB_RESET 0x00000008 354#define __CSI_MAC_AHB_RESET 0x00000008
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ff78f770dec9..25c4e7f2a099 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -80,8 +80,6 @@ do { \
80 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \ 80 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
81} while (0) 81} while (0)
82 82
83#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
84
85static void 83static void
86bnad_add_to_list(struct bnad *bnad) 84bnad_add_to_list(struct bnad *bnad)
87{ 85{
@@ -103,7 +101,7 @@ bnad_remove_from_list(struct bnad *bnad)
103 * Reinitialize completions in CQ, once Rx is taken down 101 * Reinitialize completions in CQ, once Rx is taken down
104 */ 102 */
105static void 103static void
106bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb) 104bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
107{ 105{
108 struct bna_cq_entry *cmpl, *next_cmpl; 106 struct bna_cq_entry *cmpl, *next_cmpl;
109 unsigned int wi_range, wis = 0, ccb_prod = 0; 107 unsigned int wi_range, wis = 0, ccb_prod = 0;
@@ -141,7 +139,8 @@ bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
141 139
142 for (j = 0; j < frag; j++) { 140 for (j = 0; j < frag; j++) {
143 dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr), 141 dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
144 skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE); 142 skb_frag_size(&skb_shinfo(skb)->frags[j]),
143 DMA_TO_DEVICE);
145 dma_unmap_addr_set(&array[index], dma_addr, 0); 144 dma_unmap_addr_set(&array[index], dma_addr, 0);
146 BNA_QE_INDX_ADD(index, 1, depth); 145 BNA_QE_INDX_ADD(index, 1, depth);
147 } 146 }
@@ -155,7 +154,7 @@ bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
155 * so DMA unmap & freeing is fine. 154 * so DMA unmap & freeing is fine.
156 */ 155 */
157static void 156static void
158bnad_free_all_txbufs(struct bnad *bnad, 157bnad_txq_cleanup(struct bnad *bnad,
159 struct bna_tcb *tcb) 158 struct bna_tcb *tcb)
160{ 159{
161 u32 unmap_cons; 160 u32 unmap_cons;
@@ -183,13 +182,12 @@ bnad_free_all_txbufs(struct bnad *bnad,
183/* Data Path Handlers */ 182/* Data Path Handlers */
184 183
185/* 184/*
186 * bnad_free_txbufs : Frees the Tx bufs on Tx completion 185 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
187 * Can be called in a) Interrupt context 186 * Can be called in a) Interrupt context
188 * b) Sending context 187 * b) Sending context
189 * c) Tasklet context
190 */ 188 */
191static u32 189static u32
192bnad_free_txbufs(struct bnad *bnad, 190bnad_txcmpl_process(struct bnad *bnad,
193 struct bna_tcb *tcb) 191 struct bna_tcb *tcb)
194{ 192{
195 u32 unmap_cons, sent_packets = 0, sent_bytes = 0; 193 u32 unmap_cons, sent_packets = 0, sent_bytes = 0;
@@ -198,13 +196,7 @@ bnad_free_txbufs(struct bnad *bnad,
198 struct bnad_skb_unmap *unmap_array; 196 struct bnad_skb_unmap *unmap_array;
199 struct sk_buff *skb; 197 struct sk_buff *skb;
200 198
201 /* 199 /* Just return if TX is stopped */
202 * Just return if TX is stopped. This check is useful
203 * when bnad_free_txbufs() runs out of a tasklet scheduled
204 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
205 * but this routine runs actually after the cleanup has been
206 * executed.
207 */
208 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) 200 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
209 return 0; 201 return 0;
210 202
@@ -243,57 +235,8 @@ bnad_free_txbufs(struct bnad *bnad,
243 return sent_packets; 235 return sent_packets;
244} 236}
245 237
246/* Tx Free Tasklet function */
247/* Frees for all the tcb's in all the Tx's */
248/*
249 * Scheduled from sending context, so that
250 * the fat Tx lock is not held for too long
251 * in the sending context.
252 */
253static void
254bnad_tx_free_tasklet(unsigned long bnad_ptr)
255{
256 struct bnad *bnad = (struct bnad *)bnad_ptr;
257 struct bna_tcb *tcb;
258 u32 acked = 0;
259 int i, j;
260
261 for (i = 0; i < bnad->num_tx; i++) {
262 for (j = 0; j < bnad->num_txq_per_tx; j++) {
263 tcb = bnad->tx_info[i].tcb[j];
264 if (!tcb)
265 continue;
266 if (((u16) (*tcb->hw_consumer_index) !=
267 tcb->consumer_index) &&
268 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
269 &tcb->flags))) {
270 acked = bnad_free_txbufs(bnad, tcb);
271 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
272 &tcb->flags)))
273 bna_ib_ack(tcb->i_dbell, acked);
274 smp_mb__before_clear_bit();
275 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
276 }
277 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
278 &tcb->flags)))
279 continue;
280 if (netif_queue_stopped(bnad->netdev)) {
281 if (acked && netif_carrier_ok(bnad->netdev) &&
282 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
283 BNAD_NETIF_WAKE_THRESHOLD) {
284 netif_wake_queue(bnad->netdev);
285 /* TODO */
286 /* Counters for individual TxQs? */
287 BNAD_UPDATE_CTR(bnad,
288 netif_queue_wakeup);
289 }
290 }
291 }
292 }
293}
294
295static u32 238static u32
296bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) 239bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
297{ 240{
298 struct net_device *netdev = bnad->netdev; 241 struct net_device *netdev = bnad->netdev;
299 u32 sent = 0; 242 u32 sent = 0;
@@ -301,7 +244,7 @@ bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
301 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) 244 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
302 return 0; 245 return 0;
303 246
304 sent = bnad_free_txbufs(bnad, tcb); 247 sent = bnad_txcmpl_process(bnad, tcb);
305 if (sent) { 248 if (sent) {
306 if (netif_queue_stopped(netdev) && 249 if (netif_queue_stopped(netdev) &&
307 netif_carrier_ok(netdev) && 250 netif_carrier_ok(netdev) &&
@@ -330,13 +273,13 @@ bnad_msix_tx(int irq, void *data)
330 struct bna_tcb *tcb = (struct bna_tcb *)data; 273 struct bna_tcb *tcb = (struct bna_tcb *)data;
331 struct bnad *bnad = tcb->bnad; 274 struct bnad *bnad = tcb->bnad;
332 275
333 bnad_tx(bnad, tcb); 276 bnad_tx_complete(bnad, tcb);
334 277
335 return IRQ_HANDLED; 278 return IRQ_HANDLED;
336} 279}
337 280
338static void 281static void
339bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb) 282bnad_rcb_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
340{ 283{
341 struct bnad_unmap_q *unmap_q = rcb->unmap_q; 284 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
342 285
@@ -348,7 +291,7 @@ bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
348} 291}
349 292
350static void 293static void
351bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) 294bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
352{ 295{
353 struct bnad_unmap_q *unmap_q; 296 struct bnad_unmap_q *unmap_q;
354 struct bnad_skb_unmap *unmap_array; 297 struct bnad_skb_unmap *unmap_array;
@@ -369,11 +312,11 @@ bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
369 DMA_FROM_DEVICE); 312 DMA_FROM_DEVICE);
370 dev_kfree_skb(skb); 313 dev_kfree_skb(skb);
371 } 314 }
372 bnad_reset_rcb(bnad, rcb); 315 bnad_rcb_cleanup(bnad, rcb);
373} 316}
374 317
375static void 318static void
376bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) 319bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
377{ 320{
378 u16 to_alloc, alloced, unmap_prod, wi_range; 321 u16 to_alloc, alloced, unmap_prod, wi_range;
379 struct bnad_unmap_q *unmap_q = rcb->unmap_q; 322 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
@@ -434,14 +377,14 @@ bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
434 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { 377 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
435 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) 378 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
436 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) 379 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
437 bnad_alloc_n_post_rxbufs(bnad, rcb); 380 bnad_rxq_post(bnad, rcb);
438 smp_mb__before_clear_bit(); 381 smp_mb__before_clear_bit();
439 clear_bit(BNAD_RXQ_REFILL, &rcb->flags); 382 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
440 } 383 }
441} 384}
442 385
443static u32 386static u32
444bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) 387bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
445{ 388{
446 struct bna_cq_entry *cmpl, *next_cmpl; 389 struct bna_cq_entry *cmpl, *next_cmpl;
447 struct bna_rcb *rcb = NULL; 390 struct bna_rcb *rcb = NULL;
@@ -453,12 +396,8 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
453 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; 396 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
454 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); 397 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
455 398
456 set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags); 399 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
457
458 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
459 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
460 return 0; 400 return 0;
461 }
462 401
463 prefetch(bnad->netdev); 402 prefetch(bnad->netdev);
464 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, 403 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
@@ -533,9 +472,8 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
533 472
534 if (skb->ip_summed == CHECKSUM_UNNECESSARY) 473 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
535 napi_gro_receive(&rx_ctrl->napi, skb); 474 napi_gro_receive(&rx_ctrl->napi, skb);
536 else { 475 else
537 netif_receive_skb(skb); 476 netif_receive_skb(skb);
538 }
539 477
540next: 478next:
541 cmpl->valid = 0; 479 cmpl->valid = 0;
@@ -646,7 +584,7 @@ bnad_isr(int irq, void *data)
646 for (j = 0; j < bnad->num_txq_per_tx; j++) { 584 for (j = 0; j < bnad->num_txq_per_tx; j++) {
647 tcb = bnad->tx_info[i].tcb[j]; 585 tcb = bnad->tx_info[i].tcb[j];
648 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) 586 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
649 bnad_tx(bnad, bnad->tx_info[i].tcb[j]); 587 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
650 } 588 }
651 } 589 }
652 /* Rx processing */ 590 /* Rx processing */
@@ -839,20 +777,9 @@ bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
839{ 777{
840 struct bnad_tx_info *tx_info = 778 struct bnad_tx_info *tx_info =
841 (struct bnad_tx_info *)tcb->txq->tx->priv; 779 (struct bnad_tx_info *)tcb->txq->tx->priv;
842 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
843
844 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
845 cpu_relax();
846
847 bnad_free_all_txbufs(bnad, tcb);
848
849 unmap_q->producer_index = 0;
850 unmap_q->consumer_index = 0;
851
852 smp_mb__before_clear_bit();
853 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
854 780
855 tx_info->tcb[tcb->id] = NULL; 781 tx_info->tcb[tcb->id] = NULL;
782 tcb->priv = NULL;
856} 783}
857 784
858static void 785static void
@@ -866,12 +793,6 @@ bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
866} 793}
867 794
868static void 795static void
869bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
870{
871 bnad_free_all_rxbufs(bnad, rcb);
872}
873
874static void
875bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) 796bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
876{ 797{
877 struct bnad_rx_info *rx_info = 798 struct bnad_rx_info *rx_info =
@@ -916,7 +837,6 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
916{ 837{
917 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; 838 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
918 struct bna_tcb *tcb; 839 struct bna_tcb *tcb;
919 struct bnad_unmap_q *unmap_q;
920 u32 txq_id; 840 u32 txq_id;
921 int i; 841 int i;
922 842
@@ -926,23 +846,9 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
926 continue; 846 continue;
927 txq_id = tcb->id; 847 txq_id = tcb->id;
928 848
929 unmap_q = tcb->unmap_q; 849 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
930
931 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
932 continue;
933
934 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
935 cpu_relax();
936
937 bnad_free_all_txbufs(bnad, tcb);
938
939 unmap_q->producer_index = 0;
940 unmap_q->consumer_index = 0;
941
942 smp_mb__before_clear_bit();
943 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
944
945 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); 850 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
851 BUG_ON(*(tcb->hw_consumer_index) != 0);
946 852
947 if (netif_carrier_ok(bnad->netdev)) { 853 if (netif_carrier_ok(bnad->netdev)) {
948 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n", 854 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
@@ -963,6 +869,54 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
963 } 869 }
964} 870}
965 871
872/*
873 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
874 */
875static void
876bnad_tx_cleanup(struct delayed_work *work)
877{
878 struct bnad_tx_info *tx_info =
879 container_of(work, struct bnad_tx_info, tx_cleanup_work);
880 struct bnad *bnad = NULL;
881 struct bnad_unmap_q *unmap_q;
882 struct bna_tcb *tcb;
883 unsigned long flags;
884 uint32_t i, pending = 0;
885
886 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
887 tcb = tx_info->tcb[i];
888 if (!tcb)
889 continue;
890
891 bnad = tcb->bnad;
892
893 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
894 pending++;
895 continue;
896 }
897
898 bnad_txq_cleanup(bnad, tcb);
899
900 unmap_q = tcb->unmap_q;
901 unmap_q->producer_index = 0;
902 unmap_q->consumer_index = 0;
903
904 smp_mb__before_clear_bit();
905 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
906 }
907
908 if (pending) {
909 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
910 msecs_to_jiffies(1));
911 return;
912 }
913
914 spin_lock_irqsave(&bnad->bna_lock, flags);
915 bna_tx_cleanup_complete(tx_info->tx);
916 spin_unlock_irqrestore(&bnad->bna_lock, flags);
917}
918
919
966static void 920static void
967bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) 921bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
968{ 922{
@@ -976,8 +930,7 @@ bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
976 continue; 930 continue;
977 } 931 }
978 932
979 mdelay(BNAD_TXRX_SYNC_MDELAY); 933 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
980 bna_tx_cleanup_complete(tx);
981} 934}
982 935
983static void 936static void
@@ -1001,6 +954,44 @@ bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1001 } 954 }
1002} 955}
1003 956
957/*
958 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
959 */
960static void
961bnad_rx_cleanup(void *work)
962{
963 struct bnad_rx_info *rx_info =
964 container_of(work, struct bnad_rx_info, rx_cleanup_work);
965 struct bnad_rx_ctrl *rx_ctrl;
966 struct bnad *bnad = NULL;
967 unsigned long flags;
968 uint32_t i;
969
970 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
971 rx_ctrl = &rx_info->rx_ctrl[i];
972
973 if (!rx_ctrl->ccb)
974 continue;
975
976 bnad = rx_ctrl->ccb->bnad;
977
978 /*
979 * Wait till the poll handler has exited
980 * and nothing can be scheduled anymore
981 */
982 napi_disable(&rx_ctrl->napi);
983
984 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
985 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
986 if (rx_ctrl->ccb->rcb[1])
987 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
988 }
989
990 spin_lock_irqsave(&bnad->bna_lock, flags);
991 bna_rx_cleanup_complete(rx_info->rx);
992 spin_unlock_irqrestore(&bnad->bna_lock, flags);
993}
994
1004static void 995static void
1005bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) 996bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1006{ 997{
@@ -1009,8 +1000,6 @@ bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1009 struct bnad_rx_ctrl *rx_ctrl; 1000 struct bnad_rx_ctrl *rx_ctrl;
1010 int i; 1001 int i;
1011 1002
1012 mdelay(BNAD_TXRX_SYNC_MDELAY);
1013
1014 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { 1003 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1015 rx_ctrl = &rx_info->rx_ctrl[i]; 1004 rx_ctrl = &rx_info->rx_ctrl[i];
1016 ccb = rx_ctrl->ccb; 1005 ccb = rx_ctrl->ccb;
@@ -1021,12 +1010,9 @@ bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1021 1010
1022 if (ccb->rcb[1]) 1011 if (ccb->rcb[1])
1023 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); 1012 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1024
1025 while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
1026 cpu_relax();
1027 } 1013 }
1028 1014
1029 bna_rx_cleanup_complete(rx); 1015 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1030} 1016}
1031 1017
1032static void 1018static void
@@ -1046,13 +1032,12 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1046 if (!ccb) 1032 if (!ccb)
1047 continue; 1033 continue;
1048 1034
1049 bnad_cq_cmpl_init(bnad, ccb); 1035 napi_enable(&rx_ctrl->napi);
1050 1036
1051 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) { 1037 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1052 rcb = ccb->rcb[j]; 1038 rcb = ccb->rcb[j];
1053 if (!rcb) 1039 if (!rcb)
1054 continue; 1040 continue;
1055 bnad_free_all_rxbufs(bnad, rcb);
1056 1041
1057 set_bit(BNAD_RXQ_STARTED, &rcb->flags); 1042 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1058 set_bit(BNAD_RXQ_POST_OK, &rcb->flags); 1043 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
@@ -1063,7 +1048,7 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1063 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { 1048 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
1064 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) 1049 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
1065 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) 1050 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
1066 bnad_alloc_n_post_rxbufs(bnad, rcb); 1051 bnad_rxq_post(bnad, rcb);
1067 smp_mb__before_clear_bit(); 1052 smp_mb__before_clear_bit();
1068 clear_bit(BNAD_RXQ_REFILL, &rcb->flags); 1053 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
1069 } 1054 }
@@ -1687,7 +1672,7 @@ bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1687 if (!netif_carrier_ok(bnad->netdev)) 1672 if (!netif_carrier_ok(bnad->netdev))
1688 goto poll_exit; 1673 goto poll_exit;
1689 1674
1690 rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget); 1675 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1691 if (rcvd >= budget) 1676 if (rcvd >= budget)
1692 return rcvd; 1677 return rcvd;
1693 1678
@@ -1704,7 +1689,7 @@ poll_exit:
1704 1689
1705#define BNAD_NAPI_POLL_QUOTA 64 1690#define BNAD_NAPI_POLL_QUOTA 64
1706static void 1691static void
1707bnad_napi_init(struct bnad *bnad, u32 rx_id) 1692bnad_napi_add(struct bnad *bnad, u32 rx_id)
1708{ 1693{
1709 struct bnad_rx_ctrl *rx_ctrl; 1694 struct bnad_rx_ctrl *rx_ctrl;
1710 int i; 1695 int i;
@@ -1718,34 +1703,18 @@ bnad_napi_init(struct bnad *bnad, u32 rx_id)
1718} 1703}
1719 1704
1720static void 1705static void
1721bnad_napi_enable(struct bnad *bnad, u32 rx_id) 1706bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1722{
1723 struct bnad_rx_ctrl *rx_ctrl;
1724 int i;
1725
1726 /* Initialize & enable NAPI */
1727 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1728 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1729
1730 napi_enable(&rx_ctrl->napi);
1731 }
1732}
1733
1734static void
1735bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1736{ 1707{
1737 int i; 1708 int i;
1738 1709
1739 /* First disable and then clean up */ 1710 /* First disable and then clean up */
1740 for (i = 0; i < bnad->num_rxp_per_rx; i++) { 1711 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1741 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1742 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi); 1712 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1743 }
1744} 1713}
1745 1714
1746/* Should be held with conf_lock held */ 1715/* Should be held with conf_lock held */
1747void 1716void
1748bnad_cleanup_tx(struct bnad *bnad, u32 tx_id) 1717bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1749{ 1718{
1750 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; 1719 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1751 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; 1720 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
@@ -1764,9 +1733,6 @@ bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
1764 bnad_tx_msix_unregister(bnad, tx_info, 1733 bnad_tx_msix_unregister(bnad, tx_info,
1765 bnad->num_txq_per_tx); 1734 bnad->num_txq_per_tx);
1766 1735
1767 if (0 == tx_id)
1768 tasklet_kill(&bnad->tx_free_tasklet);
1769
1770 spin_lock_irqsave(&bnad->bna_lock, flags); 1736 spin_lock_irqsave(&bnad->bna_lock, flags);
1771 bna_tx_destroy(tx_info->tx); 1737 bna_tx_destroy(tx_info->tx);
1772 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1738 spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -1832,6 +1798,9 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1832 goto err_return; 1798 goto err_return;
1833 tx_info->tx = tx; 1799 tx_info->tx = tx;
1834 1800
1801 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
1802 (work_func_t)bnad_tx_cleanup);
1803
1835 /* Register ISR for the Tx object */ 1804 /* Register ISR for the Tx object */
1836 if (intr_info->intr_type == BNA_INTR_T_MSIX) { 1805 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1837 err = bnad_tx_msix_register(bnad, tx_info, 1806 err = bnad_tx_msix_register(bnad, tx_info,
@@ -1896,7 +1865,7 @@ bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
1896 1865
1897/* Called with mutex_lock(&bnad->conf_mutex) held */ 1866/* Called with mutex_lock(&bnad->conf_mutex) held */
1898void 1867void
1899bnad_cleanup_rx(struct bnad *bnad, u32 rx_id) 1868bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
1900{ 1869{
1901 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; 1870 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1902 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; 1871 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
@@ -1928,7 +1897,7 @@ bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
1928 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX) 1897 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1929 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); 1898 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1930 1899
1931 bnad_napi_disable(bnad, rx_id); 1900 bnad_napi_delete(bnad, rx_id);
1932 1901
1933 spin_lock_irqsave(&bnad->bna_lock, flags); 1902 spin_lock_irqsave(&bnad->bna_lock, flags);
1934 bna_rx_destroy(rx_info->rx); 1903 bna_rx_destroy(rx_info->rx);
@@ -1952,7 +1921,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
1952 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; 1921 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1953 static const struct bna_rx_event_cbfn rx_cbfn = { 1922 static const struct bna_rx_event_cbfn rx_cbfn = {
1954 .rcb_setup_cbfn = bnad_cb_rcb_setup, 1923 .rcb_setup_cbfn = bnad_cb_rcb_setup,
1955 .rcb_destroy_cbfn = bnad_cb_rcb_destroy, 1924 .rcb_destroy_cbfn = NULL,
1956 .ccb_setup_cbfn = bnad_cb_ccb_setup, 1925 .ccb_setup_cbfn = bnad_cb_ccb_setup,
1957 .ccb_destroy_cbfn = bnad_cb_ccb_destroy, 1926 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
1958 .rx_stall_cbfn = bnad_cb_rx_stall, 1927 .rx_stall_cbfn = bnad_cb_rx_stall,
@@ -1998,11 +1967,14 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
1998 rx_info->rx = rx; 1967 rx_info->rx = rx;
1999 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1968 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2000 1969
1970 INIT_WORK(&rx_info->rx_cleanup_work,
1971 (work_func_t)(bnad_rx_cleanup));
1972
2001 /* 1973 /*
2002 * Init NAPI, so that state is set to NAPI_STATE_SCHED, 1974 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2003 * so that IRQ handler cannot schedule NAPI at this point. 1975 * so that IRQ handler cannot schedule NAPI at this point.
2004 */ 1976 */
2005 bnad_napi_init(bnad, rx_id); 1977 bnad_napi_add(bnad, rx_id);
2006 1978
2007 /* Register ISR for the Rx object */ 1979 /* Register ISR for the Rx object */
2008 if (intr_info->intr_type == BNA_INTR_T_MSIX) { 1980 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
@@ -2028,13 +2000,10 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2028 bna_rx_enable(rx); 2000 bna_rx_enable(rx);
2029 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2001 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2030 2002
2031 /* Enable scheduling of NAPI */
2032 bnad_napi_enable(bnad, rx_id);
2033
2034 return 0; 2003 return 0;
2035 2004
2036err_return: 2005err_return:
2037 bnad_cleanup_rx(bnad, rx_id); 2006 bnad_destroy_rx(bnad, rx_id);
2038 return err; 2007 return err;
2039} 2008}
2040 2009
@@ -2519,7 +2488,7 @@ bnad_open(struct net_device *netdev)
2519 return 0; 2488 return 0;
2520 2489
2521cleanup_tx: 2490cleanup_tx:
2522 bnad_cleanup_tx(bnad, 0); 2491 bnad_destroy_tx(bnad, 0);
2523 2492
2524err_return: 2493err_return:
2525 mutex_unlock(&bnad->conf_mutex); 2494 mutex_unlock(&bnad->conf_mutex);
@@ -2546,8 +2515,8 @@ bnad_stop(struct net_device *netdev)
2546 2515
2547 wait_for_completion(&bnad->bnad_completions.enet_comp); 2516 wait_for_completion(&bnad->bnad_completions.enet_comp);
2548 2517
2549 bnad_cleanup_tx(bnad, 0); 2518 bnad_destroy_tx(bnad, 0);
2550 bnad_cleanup_rx(bnad, 0); 2519 bnad_destroy_rx(bnad, 0);
2551 2520
2552 /* Synchronize mailbox IRQ */ 2521 /* Synchronize mailbox IRQ */
2553 bnad_mbox_irq_sync(bnad); 2522 bnad_mbox_irq_sync(bnad);
@@ -2620,7 +2589,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2620 if ((u16) (*tcb->hw_consumer_index) != 2589 if ((u16) (*tcb->hw_consumer_index) !=
2621 tcb->consumer_index && 2590 tcb->consumer_index &&
2622 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { 2591 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2623 acked = bnad_free_txbufs(bnad, tcb); 2592 acked = bnad_txcmpl_process(bnad, tcb);
2624 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) 2593 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2625 bna_ib_ack(tcb->i_dbell, acked); 2594 bna_ib_ack(tcb->i_dbell, acked);
2626 smp_mb__before_clear_bit(); 2595 smp_mb__before_clear_bit();
@@ -2843,9 +2812,6 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2843 bna_txq_prod_indx_doorbell(tcb); 2812 bna_txq_prod_indx_doorbell(tcb);
2844 smp_mb(); 2813 smp_mb();
2845 2814
2846 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2847 tasklet_schedule(&bnad->tx_free_tasklet);
2848
2849 return NETDEV_TX_OK; 2815 return NETDEV_TX_OK;
2850} 2816}
2851 2817
@@ -3127,8 +3093,8 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac)
3127/* 3093/*
3128 * 1. Initialize the bnad structure 3094 * 1. Initialize the bnad structure
3129 * 2. Setup netdev pointer in pci_dev 3095 * 2. Setup netdev pointer in pci_dev
3130 * 3. Initialze Tx free tasklet 3096 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3131 * 4. Initialize no. of TxQ & CQs & MSIX vectors 3097 * 4. Initialize work queue.
3132 */ 3098 */
3133static int 3099static int
3134bnad_init(struct bnad *bnad, 3100bnad_init(struct bnad *bnad,
@@ -3171,8 +3137,11 @@ bnad_init(struct bnad *bnad,
3171 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO; 3137 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3172 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO; 3138 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3173 3139
3174 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet, 3140 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3175 (unsigned long)bnad); 3141 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3142
3143 if (!bnad->work_q)
3144 return -ENOMEM;
3176 3145
3177 return 0; 3146 return 0;
3178} 3147}
@@ -3185,6 +3154,12 @@ bnad_init(struct bnad *bnad,
3185static void 3154static void
3186bnad_uninit(struct bnad *bnad) 3155bnad_uninit(struct bnad *bnad)
3187{ 3156{
3157 if (bnad->work_q) {
3158 flush_workqueue(bnad->work_q);
3159 destroy_workqueue(bnad->work_q);
3160 bnad->work_q = NULL;
3161 }
3162
3188 if (bnad->bar0) 3163 if (bnad->bar0)
3189 iounmap(bnad->bar0); 3164 iounmap(bnad->bar0);
3190 pci_set_drvdata(bnad->pcidev, NULL); 3165 pci_set_drvdata(bnad->pcidev, NULL);
@@ -3304,7 +3279,6 @@ bnad_pci_probe(struct pci_dev *pdev,
3304 /* 3279 /*
3305 * Initialize bnad structure 3280 * Initialize bnad structure
3306 * Setup relation between pci_dev & netdev 3281 * Setup relation between pci_dev & netdev
3307 * Init Tx free tasklet
3308 */ 3282 */
3309 err = bnad_init(bnad, pdev, netdev); 3283 err = bnad_init(bnad, pdev, netdev);
3310 if (err) 3284 if (err)
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 55824d92699f..72742be11277 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -71,7 +71,7 @@ struct bnad_rx_ctrl {
71#define BNAD_NAME "bna" 71#define BNAD_NAME "bna"
72#define BNAD_NAME_LEN 64 72#define BNAD_NAME_LEN 64
73 73
74#define BNAD_VERSION "3.0.2.2" 74#define BNAD_VERSION "3.0.23.0"
75 75
76#define BNAD_MAILBOX_MSIX_INDEX 0 76#define BNAD_MAILBOX_MSIX_INDEX 0
77#define BNAD_MAILBOX_MSIX_VECTORS 1 77#define BNAD_MAILBOX_MSIX_VECTORS 1
@@ -210,6 +210,7 @@ struct bnad_tx_info {
210 struct bna_tx *tx; /* 1:1 between tx_info & tx */ 210 struct bna_tx *tx; /* 1:1 between tx_info & tx */
211 struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX]; 211 struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
212 u32 tx_id; 212 u32 tx_id;
213 struct delayed_work tx_cleanup_work;
213} ____cacheline_aligned; 214} ____cacheline_aligned;
214 215
215struct bnad_rx_info { 216struct bnad_rx_info {
@@ -217,6 +218,7 @@ struct bnad_rx_info {
217 218
218 struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX]; 219 struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX];
219 u32 rx_id; 220 u32 rx_id;
221 struct work_struct rx_cleanup_work;
220} ____cacheline_aligned; 222} ____cacheline_aligned;
221 223
222/* Unmap queues for Tx / Rx cleanup */ 224/* Unmap queues for Tx / Rx cleanup */
@@ -318,7 +320,7 @@ struct bnad {
318 /* Burnt in MAC address */ 320 /* Burnt in MAC address */
319 mac_t perm_addr; 321 mac_t perm_addr;
320 322
321 struct tasklet_struct tx_free_tasklet; 323 struct workqueue_struct *work_q;
322 324
323 /* Statistics */ 325 /* Statistics */
324 struct bnad_stats stats; 326 struct bnad_stats stats;
@@ -328,6 +330,7 @@ struct bnad {
328 char adapter_name[BNAD_NAME_LEN]; 330 char adapter_name[BNAD_NAME_LEN];
329 char port_name[BNAD_NAME_LEN]; 331 char port_name[BNAD_NAME_LEN];
330 char mbox_irq_name[BNAD_NAME_LEN]; 332 char mbox_irq_name[BNAD_NAME_LEN];
333 char wq_name[BNAD_NAME_LEN];
331 334
332 /* debugfs specific data */ 335 /* debugfs specific data */
333 char *regdata; 336 char *regdata;
@@ -370,8 +373,8 @@ extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
370 373
371extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id); 374extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
372extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id); 375extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
373extern void bnad_cleanup_tx(struct bnad *bnad, u32 tx_id); 376extern void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
374extern void bnad_cleanup_rx(struct bnad *bnad, u32 rx_id); 377extern void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
375 378
376/* Timer start/stop protos */ 379/* Timer start/stop protos */
377extern void bnad_dim_timer_start(struct bnad *bnad); 380extern void bnad_dim_timer_start(struct bnad *bnad);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index ab753d7334a6..40e1e84f4984 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -464,7 +464,7 @@ bnad_set_ringparam(struct net_device *netdev,
464 for (i = 0; i < bnad->num_rx; i++) { 464 for (i = 0; i < bnad->num_rx; i++) {
465 if (!bnad->rx_info[i].rx) 465 if (!bnad->rx_info[i].rx)
466 continue; 466 continue;
467 bnad_cleanup_rx(bnad, i); 467 bnad_destroy_rx(bnad, i);
468 current_err = bnad_setup_rx(bnad, i); 468 current_err = bnad_setup_rx(bnad, i);
469 if (current_err && !err) 469 if (current_err && !err)
470 err = current_err; 470 err = current_err;
@@ -492,7 +492,7 @@ bnad_set_ringparam(struct net_device *netdev,
492 for (i = 0; i < bnad->num_tx; i++) { 492 for (i = 0; i < bnad->num_tx; i++) {
493 if (!bnad->tx_info[i].tx) 493 if (!bnad->tx_info[i].tx)
494 continue; 494 continue;
495 bnad_cleanup_tx(bnad, i); 495 bnad_destroy_tx(bnad, i);
496 current_err = bnad_setup_tx(bnad, i); 496 current_err = bnad_setup_tx(bnad, i);
497 if (current_err && !err) 497 if (current_err && !err)
498 err = current_err; 498 err = current_err;
@@ -539,7 +539,7 @@ bnad_set_pauseparam(struct net_device *netdev,
539} 539}
540 540
541static void 541static void
542bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string) 542bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
543{ 543{
544 struct bnad *bnad = netdev_priv(netdev); 544 struct bnad *bnad = netdev_priv(netdev);
545 int i, j, q_num; 545 int i, j, q_num;
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 906117016fc4..77884191a8c6 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -30,6 +30,7 @@
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/clk.h> 31#include <linux/clk.h>
32#include <linux/gfp.h> 32#include <linux/gfp.h>
33#include <linux/phy.h>
33 34
34#include <asm/io.h> 35#include <asm/io.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
@@ -51,21 +52,17 @@
51/* 52/*
52 * Read from a EMAC register. 53 * Read from a EMAC register.
53 */ 54 */
54static inline unsigned long at91_emac_read(unsigned int reg) 55static inline unsigned long at91_emac_read(struct at91_private *lp, unsigned int reg)
55{ 56{
56 void __iomem *emac_base = (void __iomem *)AT91_VA_BASE_EMAC; 57 return __raw_readl(lp->emac_base + reg);
57
58 return __raw_readl(emac_base + reg);
59} 58}
60 59
61/* 60/*
62 * Write to a EMAC register. 61 * Write to a EMAC register.
63 */ 62 */
64static inline void at91_emac_write(unsigned int reg, unsigned long value) 63static inline void at91_emac_write(struct at91_private *lp, unsigned int reg, unsigned long value)
65{ 64{
66 void __iomem *emac_base = (void __iomem *)AT91_VA_BASE_EMAC; 65 __raw_writel(value, lp->emac_base + reg);
67
68 __raw_writel(value, emac_base + reg);
69} 66}
70 67
71/* ........................... PHY INTERFACE ........................... */ 68/* ........................... PHY INTERFACE ........................... */
@@ -75,32 +72,33 @@ static inline void at91_emac_write(unsigned int reg, unsigned long value)
75 * When not called from an interrupt-handler, access to the PHY must be 72 * When not called from an interrupt-handler, access to the PHY must be
76 * protected by a spinlock. 73 * protected by a spinlock.
77 */ 74 */
78static void enable_mdi(void) 75static void enable_mdi(struct at91_private *lp)
79{ 76{
80 unsigned long ctl; 77 unsigned long ctl;
81 78
82 ctl = at91_emac_read(AT91_EMAC_CTL); 79 ctl = at91_emac_read(lp, AT91_EMAC_CTL);
83 at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_MPE); /* enable management port */ 80 at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_MPE); /* enable management port */
84} 81}
85 82
86/* 83/*
87 * Disable the MDIO bit in the MAC control register 84 * Disable the MDIO bit in the MAC control register
88 */ 85 */
89static void disable_mdi(void) 86static void disable_mdi(struct at91_private *lp)
90{ 87{
91 unsigned long ctl; 88 unsigned long ctl;
92 89
93 ctl = at91_emac_read(AT91_EMAC_CTL); 90 ctl = at91_emac_read(lp, AT91_EMAC_CTL);
94 at91_emac_write(AT91_EMAC_CTL, ctl & ~AT91_EMAC_MPE); /* disable management port */ 91 at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~AT91_EMAC_MPE); /* disable management port */
95} 92}
96 93
97/* 94/*
98 * Wait until the PHY operation is complete. 95 * Wait until the PHY operation is complete.
99 */ 96 */
100static inline void at91_phy_wait(void) { 97static inline void at91_phy_wait(struct at91_private *lp)
98{
101 unsigned long timeout = jiffies + 2; 99 unsigned long timeout = jiffies + 2;
102 100
103 while (!(at91_emac_read(AT91_EMAC_SR) & AT91_EMAC_SR_IDLE)) { 101 while (!(at91_emac_read(lp, AT91_EMAC_SR) & AT91_EMAC_SR_IDLE)) {
104 if (time_after(jiffies, timeout)) { 102 if (time_after(jiffies, timeout)) {
105 printk("at91_ether: MIO timeout\n"); 103 printk("at91_ether: MIO timeout\n");
106 break; 104 break;
@@ -113,28 +111,28 @@ static inline void at91_phy_wait(void) {
113 * Write value to the a PHY register 111 * Write value to the a PHY register
114 * Note: MDI interface is assumed to already have been enabled. 112 * Note: MDI interface is assumed to already have been enabled.
115 */ 113 */
116static void write_phy(unsigned char phy_addr, unsigned char address, unsigned int value) 114static void write_phy(struct at91_private *lp, unsigned char phy_addr, unsigned char address, unsigned int value)
117{ 115{
118 at91_emac_write(AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_W 116 at91_emac_write(lp, AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_W
119 | ((phy_addr & 0x1f) << 23) | (address << 18) | (value & AT91_EMAC_DATA)); 117 | ((phy_addr & 0x1f) << 23) | (address << 18) | (value & AT91_EMAC_DATA));
120 118
121 /* Wait until IDLE bit in Network Status register is cleared */ 119 /* Wait until IDLE bit in Network Status register is cleared */
122 at91_phy_wait(); 120 at91_phy_wait(lp);
123} 121}
124 122
125/* 123/*
126 * Read value stored in a PHY register. 124 * Read value stored in a PHY register.
127 * Note: MDI interface is assumed to already have been enabled. 125 * Note: MDI interface is assumed to already have been enabled.
128 */ 126 */
129static void read_phy(unsigned char phy_addr, unsigned char address, unsigned int *value) 127static void read_phy(struct at91_private *lp, unsigned char phy_addr, unsigned char address, unsigned int *value)
130{ 128{
131 at91_emac_write(AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_R 129 at91_emac_write(lp, AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_R
132 | ((phy_addr & 0x1f) << 23) | (address << 18)); 130 | ((phy_addr & 0x1f) << 23) | (address << 18));
133 131
134 /* Wait until IDLE bit in Network Status register is cleared */ 132 /* Wait until IDLE bit in Network Status register is cleared */
135 at91_phy_wait(); 133 at91_phy_wait(lp);
136 134
137 *value = at91_emac_read(AT91_EMAC_MAN) & AT91_EMAC_DATA; 135 *value = at91_emac_read(lp, AT91_EMAC_MAN) & AT91_EMAC_DATA;
138} 136}
139 137
140/* ........................... PHY MANAGEMENT .......................... */ 138/* ........................... PHY MANAGEMENT .......................... */
@@ -158,13 +156,13 @@ static void update_linkspeed(struct net_device *dev, int silent)
158 } 156 }
159 157
160 /* Link up, or auto-negotiation still in progress */ 158 /* Link up, or auto-negotiation still in progress */
161 read_phy(lp->phy_address, MII_BMSR, &bmsr); 159 read_phy(lp, lp->phy_address, MII_BMSR, &bmsr);
162 read_phy(lp->phy_address, MII_BMCR, &bmcr); 160 read_phy(lp, lp->phy_address, MII_BMCR, &bmcr);
163 if (bmcr & BMCR_ANENABLE) { /* AutoNegotiation is enabled */ 161 if (bmcr & BMCR_ANENABLE) { /* AutoNegotiation is enabled */
164 if (!(bmsr & BMSR_ANEGCOMPLETE)) 162 if (!(bmsr & BMSR_ANEGCOMPLETE))
165 return; /* Do nothing - another interrupt generated when negotiation complete */ 163 return; /* Do nothing - another interrupt generated when negotiation complete */
166 164
167 read_phy(lp->phy_address, MII_LPA, &lpa); 165 read_phy(lp, lp->phy_address, MII_LPA, &lpa);
168 if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) speed = SPEED_100; 166 if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) speed = SPEED_100;
169 else speed = SPEED_10; 167 else speed = SPEED_10;
170 if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) duplex = DUPLEX_FULL; 168 if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) duplex = DUPLEX_FULL;
@@ -175,7 +173,7 @@ static void update_linkspeed(struct net_device *dev, int silent)
175 } 173 }
176 174
177 /* Update the MAC */ 175 /* Update the MAC */
178 mac_cfg = at91_emac_read(AT91_EMAC_CFG) & ~(AT91_EMAC_SPD | AT91_EMAC_FD); 176 mac_cfg = at91_emac_read(lp, AT91_EMAC_CFG) & ~(AT91_EMAC_SPD | AT91_EMAC_FD);
179 if (speed == SPEED_100) { 177 if (speed == SPEED_100) {
180 if (duplex == DUPLEX_FULL) /* 100 Full Duplex */ 178 if (duplex == DUPLEX_FULL) /* 100 Full Duplex */
181 mac_cfg |= AT91_EMAC_SPD | AT91_EMAC_FD; 179 mac_cfg |= AT91_EMAC_SPD | AT91_EMAC_FD;
@@ -186,7 +184,7 @@ static void update_linkspeed(struct net_device *dev, int silent)
186 mac_cfg |= AT91_EMAC_FD; 184 mac_cfg |= AT91_EMAC_FD;
187 else {} /* 10 Half Duplex */ 185 else {} /* 10 Half Duplex */
188 } 186 }
189 at91_emac_write(AT91_EMAC_CFG, mac_cfg); 187 at91_emac_write(lp, AT91_EMAC_CFG, mac_cfg);
190 188
191 if (!silent) 189 if (!silent)
192 printk(KERN_INFO "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex"); 190 printk(KERN_INFO "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
@@ -207,34 +205,34 @@ static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id)
207 * level-triggering. We therefore have to check if the PHY actually has 205 * level-triggering. We therefore have to check if the PHY actually has
208 * an IRQ pending. 206 * an IRQ pending.
209 */ 207 */
210 enable_mdi(); 208 enable_mdi(lp);
211 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { 209 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
212 read_phy(lp->phy_address, MII_DSINTR_REG, &phy); /* ack interrupt in Davicom PHY */ 210 read_phy(lp, lp->phy_address, MII_DSINTR_REG, &phy); /* ack interrupt in Davicom PHY */
213 if (!(phy & (1 << 0))) 211 if (!(phy & (1 << 0)))
214 goto done; 212 goto done;
215 } 213 }
216 else if (lp->phy_type == MII_LXT971A_ID) { 214 else if (lp->phy_type == MII_LXT971A_ID) {
217 read_phy(lp->phy_address, MII_ISINTS_REG, &phy); /* ack interrupt in Intel PHY */ 215 read_phy(lp, lp->phy_address, MII_ISINTS_REG, &phy); /* ack interrupt in Intel PHY */
218 if (!(phy & (1 << 2))) 216 if (!(phy & (1 << 2)))
219 goto done; 217 goto done;
220 } 218 }
221 else if (lp->phy_type == MII_BCM5221_ID) { 219 else if (lp->phy_type == MII_BCM5221_ID) {
222 read_phy(lp->phy_address, MII_BCMINTR_REG, &phy); /* ack interrupt in Broadcom PHY */ 220 read_phy(lp, lp->phy_address, MII_BCMINTR_REG, &phy); /* ack interrupt in Broadcom PHY */
223 if (!(phy & (1 << 0))) 221 if (!(phy & (1 << 0)))
224 goto done; 222 goto done;
225 } 223 }
226 else if (lp->phy_type == MII_KS8721_ID) { 224 else if (lp->phy_type == MII_KS8721_ID) {
227 read_phy(lp->phy_address, MII_TPISTATUS, &phy); /* ack interrupt in Micrel PHY */ 225 read_phy(lp, lp->phy_address, MII_TPISTATUS, &phy); /* ack interrupt in Micrel PHY */
228 if (!(phy & ((1 << 2) | 1))) 226 if (!(phy & ((1 << 2) | 1)))
229 goto done; 227 goto done;
230 } 228 }
231 else if (lp->phy_type == MII_T78Q21x3_ID) { /* ack interrupt in Teridian PHY */ 229 else if (lp->phy_type == MII_T78Q21x3_ID) { /* ack interrupt in Teridian PHY */
232 read_phy(lp->phy_address, MII_T78Q21INT_REG, &phy); 230 read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &phy);
233 if (!(phy & ((1 << 2) | 1))) 231 if (!(phy & ((1 << 2) | 1)))
234 goto done; 232 goto done;
235 } 233 }
236 else if (lp->phy_type == MII_DP83848_ID) { 234 else if (lp->phy_type == MII_DP83848_ID) {
237 read_phy(lp->phy_address, MII_DPPHYSTS_REG, &phy); /* ack interrupt in DP83848 PHY */ 235 read_phy(lp, lp->phy_address, MII_DPPHYSTS_REG, &phy); /* ack interrupt in DP83848 PHY */
238 if (!(phy & (1 << 7))) 236 if (!(phy & (1 << 7)))
239 goto done; 237 goto done;
240 } 238 }
@@ -242,7 +240,7 @@ static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id)
242 update_linkspeed(dev, 0); 240 update_linkspeed(dev, 0);
243 241
244done: 242done:
245 disable_mdi(); 243 disable_mdi(lp);
246 244
247 return IRQ_HANDLED; 245 return IRQ_HANDLED;
248} 246}
@@ -265,7 +263,7 @@ static void enable_phyirq(struct net_device *dev)
265 return; 263 return;
266 } 264 }
267 265
268 irq_number = lp->board_data.phy_irq_pin; 266 irq_number = gpio_to_irq(lp->board_data.phy_irq_pin);
269 status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev); 267 status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev);
270 if (status) { 268 if (status) {
271 printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status); 269 printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status);
@@ -273,41 +271,41 @@ static void enable_phyirq(struct net_device *dev)
273 } 271 }
274 272
275 spin_lock_irq(&lp->lock); 273 spin_lock_irq(&lp->lock);
276 enable_mdi(); 274 enable_mdi(lp);
277 275
278 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */ 276 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */
279 read_phy(lp->phy_address, MII_DSINTR_REG, &dsintr); 277 read_phy(lp, lp->phy_address, MII_DSINTR_REG, &dsintr);
280 dsintr = dsintr & ~0xf00; /* clear bits 8..11 */ 278 dsintr = dsintr & ~0xf00; /* clear bits 8..11 */
281 write_phy(lp->phy_address, MII_DSINTR_REG, dsintr); 279 write_phy(lp, lp->phy_address, MII_DSINTR_REG, dsintr);
282 } 280 }
283 else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */ 281 else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */
284 read_phy(lp->phy_address, MII_ISINTE_REG, &dsintr); 282 read_phy(lp, lp->phy_address, MII_ISINTE_REG, &dsintr);
285 dsintr = dsintr | 0xf2; /* set bits 1, 4..7 */ 283 dsintr = dsintr | 0xf2; /* set bits 1, 4..7 */
286 write_phy(lp->phy_address, MII_ISINTE_REG, dsintr); 284 write_phy(lp, lp->phy_address, MII_ISINTE_REG, dsintr);
287 } 285 }
288 else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */ 286 else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */
289 dsintr = (1 << 15) | ( 1 << 14); 287 dsintr = (1 << 15) | ( 1 << 14);
290 write_phy(lp->phy_address, MII_BCMINTR_REG, dsintr); 288 write_phy(lp, lp->phy_address, MII_BCMINTR_REG, dsintr);
291 } 289 }
292 else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */ 290 else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */
293 dsintr = (1 << 10) | ( 1 << 8); 291 dsintr = (1 << 10) | ( 1 << 8);
294 write_phy(lp->phy_address, MII_TPISTATUS, dsintr); 292 write_phy(lp, lp->phy_address, MII_TPISTATUS, dsintr);
295 } 293 }
296 else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */ 294 else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */
297 read_phy(lp->phy_address, MII_T78Q21INT_REG, &dsintr); 295 read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &dsintr);
298 dsintr = dsintr | 0x500; /* set bits 8, 10 */ 296 dsintr = dsintr | 0x500; /* set bits 8, 10 */
299 write_phy(lp->phy_address, MII_T78Q21INT_REG, dsintr); 297 write_phy(lp, lp->phy_address, MII_T78Q21INT_REG, dsintr);
300 } 298 }
301 else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */ 299 else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */
302 read_phy(lp->phy_address, MII_DPMISR_REG, &dsintr); 300 read_phy(lp, lp->phy_address, MII_DPMISR_REG, &dsintr);
303 dsintr = dsintr | 0x3c; /* set bits 2..5 */ 301 dsintr = dsintr | 0x3c; /* set bits 2..5 */
304 write_phy(lp->phy_address, MII_DPMISR_REG, dsintr); 302 write_phy(lp, lp->phy_address, MII_DPMISR_REG, dsintr);
305 read_phy(lp->phy_address, MII_DPMICR_REG, &dsintr); 303 read_phy(lp, lp->phy_address, MII_DPMICR_REG, &dsintr);
306 dsintr = dsintr | 0x3; /* set bits 0,1 */ 304 dsintr = dsintr | 0x3; /* set bits 0,1 */
307 write_phy(lp->phy_address, MII_DPMICR_REG, dsintr); 305 write_phy(lp, lp->phy_address, MII_DPMICR_REG, dsintr);
308 } 306 }
309 307
310 disable_mdi(); 308 disable_mdi(lp);
311 spin_unlock_irq(&lp->lock); 309 spin_unlock_irq(&lp->lock);
312} 310}
313 311
@@ -326,46 +324,46 @@ static void disable_phyirq(struct net_device *dev)
326 } 324 }
327 325
328 spin_lock_irq(&lp->lock); 326 spin_lock_irq(&lp->lock);
329 enable_mdi(); 327 enable_mdi(lp);
330 328
331 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */ 329 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */
332 read_phy(lp->phy_address, MII_DSINTR_REG, &dsintr); 330 read_phy(lp, lp->phy_address, MII_DSINTR_REG, &dsintr);
333 dsintr = dsintr | 0xf00; /* set bits 8..11 */ 331 dsintr = dsintr | 0xf00; /* set bits 8..11 */
334 write_phy(lp->phy_address, MII_DSINTR_REG, dsintr); 332 write_phy(lp, lp->phy_address, MII_DSINTR_REG, dsintr);
335 } 333 }
336 else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */ 334 else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */
337 read_phy(lp->phy_address, MII_ISINTE_REG, &dsintr); 335 read_phy(lp, lp->phy_address, MII_ISINTE_REG, &dsintr);
338 dsintr = dsintr & ~0xf2; /* clear bits 1, 4..7 */ 336 dsintr = dsintr & ~0xf2; /* clear bits 1, 4..7 */
339 write_phy(lp->phy_address, MII_ISINTE_REG, dsintr); 337 write_phy(lp, lp->phy_address, MII_ISINTE_REG, dsintr);
340 } 338 }
341 else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */ 339 else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */
342 read_phy(lp->phy_address, MII_BCMINTR_REG, &dsintr); 340 read_phy(lp, lp->phy_address, MII_BCMINTR_REG, &dsintr);
343 dsintr = ~(1 << 14); 341 dsintr = ~(1 << 14);
344 write_phy(lp->phy_address, MII_BCMINTR_REG, dsintr); 342 write_phy(lp, lp->phy_address, MII_BCMINTR_REG, dsintr);
345 } 343 }
346 else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */ 344 else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */
347 read_phy(lp->phy_address, MII_TPISTATUS, &dsintr); 345 read_phy(lp, lp->phy_address, MII_TPISTATUS, &dsintr);
348 dsintr = ~((1 << 10) | (1 << 8)); 346 dsintr = ~((1 << 10) | (1 << 8));
349 write_phy(lp->phy_address, MII_TPISTATUS, dsintr); 347 write_phy(lp, lp->phy_address, MII_TPISTATUS, dsintr);
350 } 348 }
351 else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */ 349 else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */
352 read_phy(lp->phy_address, MII_T78Q21INT_REG, &dsintr); 350 read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &dsintr);
353 dsintr = dsintr & ~0x500; /* clear bits 8, 10 */ 351 dsintr = dsintr & ~0x500; /* clear bits 8, 10 */
354 write_phy(lp->phy_address, MII_T78Q21INT_REG, dsintr); 352 write_phy(lp, lp->phy_address, MII_T78Q21INT_REG, dsintr);
355 } 353 }
356 else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */ 354 else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */
357 read_phy(lp->phy_address, MII_DPMICR_REG, &dsintr); 355 read_phy(lp, lp->phy_address, MII_DPMICR_REG, &dsintr);
358 dsintr = dsintr & ~0x3; /* clear bits 0, 1 */ 356 dsintr = dsintr & ~0x3; /* clear bits 0, 1 */
359 write_phy(lp->phy_address, MII_DPMICR_REG, dsintr); 357 write_phy(lp, lp->phy_address, MII_DPMICR_REG, dsintr);
360 read_phy(lp->phy_address, MII_DPMISR_REG, &dsintr); 358 read_phy(lp, lp->phy_address, MII_DPMISR_REG, &dsintr);
361 dsintr = dsintr & ~0x3c; /* clear bits 2..5 */ 359 dsintr = dsintr & ~0x3c; /* clear bits 2..5 */
362 write_phy(lp->phy_address, MII_DPMISR_REG, dsintr); 360 write_phy(lp, lp->phy_address, MII_DPMISR_REG, dsintr);
363 } 361 }
364 362
365 disable_mdi(); 363 disable_mdi(lp);
366 spin_unlock_irq(&lp->lock); 364 spin_unlock_irq(&lp->lock);
367 365
368 irq_number = lp->board_data.phy_irq_pin; 366 irq_number = gpio_to_irq(lp->board_data.phy_irq_pin);
369 free_irq(irq_number, dev); /* Free interrupt handler */ 367 free_irq(irq_number, dev); /* Free interrupt handler */
370} 368}
371 369
@@ -379,17 +377,17 @@ static void reset_phy(struct net_device *dev)
379 unsigned int bmcr; 377 unsigned int bmcr;
380 378
381 spin_lock_irq(&lp->lock); 379 spin_lock_irq(&lp->lock);
382 enable_mdi(); 380 enable_mdi(lp);
383 381
384 /* Perform PHY reset */ 382 /* Perform PHY reset */
385 write_phy(lp->phy_address, MII_BMCR, BMCR_RESET); 383 write_phy(lp, lp->phy_address, MII_BMCR, BMCR_RESET);
386 384
387 /* Wait until PHY reset is complete */ 385 /* Wait until PHY reset is complete */
388 do { 386 do {
389 read_phy(lp->phy_address, MII_BMCR, &bmcr); 387 read_phy(lp, lp->phy_address, MII_BMCR, &bmcr);
390 } while (!(bmcr & BMCR_RESET)); 388 } while (!(bmcr & BMCR_RESET));
391 389
392 disable_mdi(); 390 disable_mdi(lp);
393 spin_unlock_irq(&lp->lock); 391 spin_unlock_irq(&lp->lock);
394} 392}
395#endif 393#endif
@@ -399,13 +397,37 @@ static void at91ether_check_link(unsigned long dev_id)
399 struct net_device *dev = (struct net_device *) dev_id; 397 struct net_device *dev = (struct net_device *) dev_id;
400 struct at91_private *lp = netdev_priv(dev); 398 struct at91_private *lp = netdev_priv(dev);
401 399
402 enable_mdi(); 400 enable_mdi(lp);
403 update_linkspeed(dev, 1); 401 update_linkspeed(dev, 1);
404 disable_mdi(); 402 disable_mdi(lp);
405 403
406 mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL); 404 mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
407} 405}
408 406
407/*
408 * Perform any PHY-specific initialization.
409 */
410static void __init initialize_phy(struct at91_private *lp)
411{
412 unsigned int val;
413
414 spin_lock_irq(&lp->lock);
415 enable_mdi(lp);
416
417 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
418 read_phy(lp, lp->phy_address, MII_DSCR_REG, &val);
419 if ((val & (1 << 10)) == 0) /* DSCR bit 10 is 0 -- fiber mode */
420 lp->phy_media = PORT_FIBRE;
421 } else if (machine_is_csb337()) {
422 /* mix link activity status into LED2 link state */
423 write_phy(lp, lp->phy_address, MII_LEDCTRL_REG, 0x0d22);
424 } else if (machine_is_ecbat91())
425 write_phy(lp, lp->phy_address, MII_LEDCTRL_REG, 0x156A);
426
427 disable_mdi(lp);
428 spin_unlock_irq(&lp->lock);
429}
430
409/* ......................... ADDRESS MANAGEMENT ........................ */ 431/* ......................... ADDRESS MANAGEMENT ........................ */
410 432
411/* 433/*
@@ -454,17 +476,19 @@ static short __init unpack_mac_address(struct net_device *dev, unsigned int hi,
454 */ 476 */
455static void __init get_mac_address(struct net_device *dev) 477static void __init get_mac_address(struct net_device *dev)
456{ 478{
479 struct at91_private *lp = netdev_priv(dev);
480
457 /* Check Specific-Address 1 */ 481 /* Check Specific-Address 1 */
458 if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA1H), at91_emac_read(AT91_EMAC_SA1L))) 482 if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA1H), at91_emac_read(lp, AT91_EMAC_SA1L)))
459 return; 483 return;
460 /* Check Specific-Address 2 */ 484 /* Check Specific-Address 2 */
461 if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA2H), at91_emac_read(AT91_EMAC_SA2L))) 485 if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA2H), at91_emac_read(lp, AT91_EMAC_SA2L)))
462 return; 486 return;
463 /* Check Specific-Address 3 */ 487 /* Check Specific-Address 3 */
464 if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA3H), at91_emac_read(AT91_EMAC_SA3L))) 488 if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA3H), at91_emac_read(lp, AT91_EMAC_SA3L)))
465 return; 489 return;
466 /* Check Specific-Address 4 */ 490 /* Check Specific-Address 4 */
467 if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA4H), at91_emac_read(AT91_EMAC_SA4L))) 491 if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA4H), at91_emac_read(lp, AT91_EMAC_SA4L)))
468 return; 492 return;
469 493
470 printk(KERN_ERR "at91_ether: Your bootloader did not configure a MAC address.\n"); 494 printk(KERN_ERR "at91_ether: Your bootloader did not configure a MAC address.\n");
@@ -475,11 +499,13 @@ static void __init get_mac_address(struct net_device *dev)
475 */ 499 */
476static void update_mac_address(struct net_device *dev) 500static void update_mac_address(struct net_device *dev)
477{ 501{
478 at91_emac_write(AT91_EMAC_SA1L, (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | (dev->dev_addr[1] << 8) | (dev->dev_addr[0])); 502 struct at91_private *lp = netdev_priv(dev);
479 at91_emac_write(AT91_EMAC_SA1H, (dev->dev_addr[5] << 8) | (dev->dev_addr[4]));
480 503
481 at91_emac_write(AT91_EMAC_SA2L, 0); 504 at91_emac_write(lp, AT91_EMAC_SA1L, (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | (dev->dev_addr[1] << 8) | (dev->dev_addr[0]));
482 at91_emac_write(AT91_EMAC_SA2H, 0); 505 at91_emac_write(lp, AT91_EMAC_SA1H, (dev->dev_addr[5] << 8) | (dev->dev_addr[4]));
506
507 at91_emac_write(lp, AT91_EMAC_SA2L, 0);
508 at91_emac_write(lp, AT91_EMAC_SA2H, 0);
483} 509}
484 510
485/* 511/*
@@ -559,6 +585,7 @@ static int hash_get_index(__u8 *addr)
559 */ 585 */
560static void at91ether_sethashtable(struct net_device *dev) 586static void at91ether_sethashtable(struct net_device *dev)
561{ 587{
588 struct at91_private *lp = netdev_priv(dev);
562 struct netdev_hw_addr *ha; 589 struct netdev_hw_addr *ha;
563 unsigned long mc_filter[2]; 590 unsigned long mc_filter[2];
564 unsigned int bitnr; 591 unsigned int bitnr;
@@ -570,8 +597,8 @@ static void at91ether_sethashtable(struct net_device *dev)
570 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 597 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
571 } 598 }
572 599
573 at91_emac_write(AT91_EMAC_HSL, mc_filter[0]); 600 at91_emac_write(lp, AT91_EMAC_HSL, mc_filter[0]);
574 at91_emac_write(AT91_EMAC_HSH, mc_filter[1]); 601 at91_emac_write(lp, AT91_EMAC_HSH, mc_filter[1]);
575} 602}
576 603
577/* 604/*
@@ -579,9 +606,10 @@ static void at91ether_sethashtable(struct net_device *dev)
579 */ 606 */
580static void at91ether_set_multicast_list(struct net_device *dev) 607static void at91ether_set_multicast_list(struct net_device *dev)
581{ 608{
609 struct at91_private *lp = netdev_priv(dev);
582 unsigned long cfg; 610 unsigned long cfg;
583 611
584 cfg = at91_emac_read(AT91_EMAC_CFG); 612 cfg = at91_emac_read(lp, AT91_EMAC_CFG);
585 613
586 if (dev->flags & IFF_PROMISC) /* Enable promiscuous mode */ 614 if (dev->flags & IFF_PROMISC) /* Enable promiscuous mode */
587 cfg |= AT91_EMAC_CAF; 615 cfg |= AT91_EMAC_CAF;
@@ -589,34 +617,37 @@ static void at91ether_set_multicast_list(struct net_device *dev)
589 cfg &= ~AT91_EMAC_CAF; 617 cfg &= ~AT91_EMAC_CAF;
590 618
591 if (dev->flags & IFF_ALLMULTI) { /* Enable all multicast mode */ 619 if (dev->flags & IFF_ALLMULTI) { /* Enable all multicast mode */
592 at91_emac_write(AT91_EMAC_HSH, -1); 620 at91_emac_write(lp, AT91_EMAC_HSH, -1);
593 at91_emac_write(AT91_EMAC_HSL, -1); 621 at91_emac_write(lp, AT91_EMAC_HSL, -1);
594 cfg |= AT91_EMAC_MTI; 622 cfg |= AT91_EMAC_MTI;
595 } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */ 623 } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */
596 at91ether_sethashtable(dev); 624 at91ether_sethashtable(dev);
597 cfg |= AT91_EMAC_MTI; 625 cfg |= AT91_EMAC_MTI;
598 } else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */ 626 } else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */
599 at91_emac_write(AT91_EMAC_HSH, 0); 627 at91_emac_write(lp, AT91_EMAC_HSH, 0);
600 at91_emac_write(AT91_EMAC_HSL, 0); 628 at91_emac_write(lp, AT91_EMAC_HSL, 0);
601 cfg &= ~AT91_EMAC_MTI; 629 cfg &= ~AT91_EMAC_MTI;
602 } 630 }
603 631
604 at91_emac_write(AT91_EMAC_CFG, cfg); 632 at91_emac_write(lp, AT91_EMAC_CFG, cfg);
605} 633}
606 634
607/* ......................... ETHTOOL SUPPORT ........................... */ 635/* ......................... ETHTOOL SUPPORT ........................... */
608 636
609static int mdio_read(struct net_device *dev, int phy_id, int location) 637static int mdio_read(struct net_device *dev, int phy_id, int location)
610{ 638{
639 struct at91_private *lp = netdev_priv(dev);
611 unsigned int value; 640 unsigned int value;
612 641
613 read_phy(phy_id, location, &value); 642 read_phy(lp, phy_id, location, &value);
614 return value; 643 return value;
615} 644}
616 645
617static void mdio_write(struct net_device *dev, int phy_id, int location, int value) 646static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
618{ 647{
619 write_phy(phy_id, location, value); 648 struct at91_private *lp = netdev_priv(dev);
649
650 write_phy(lp, phy_id, location, value);
620} 651}
621 652
622static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 653static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -625,11 +656,11 @@ static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cm
625 int ret; 656 int ret;
626 657
627 spin_lock_irq(&lp->lock); 658 spin_lock_irq(&lp->lock);
628 enable_mdi(); 659 enable_mdi(lp);
629 660
630 ret = mii_ethtool_gset(&lp->mii, cmd); 661 ret = mii_ethtool_gset(&lp->mii, cmd);
631 662
632 disable_mdi(); 663 disable_mdi(lp);
633 spin_unlock_irq(&lp->lock); 664 spin_unlock_irq(&lp->lock);
634 665
635 if (lp->phy_media == PORT_FIBRE) { /* override media type since mii.c doesn't know */ 666 if (lp->phy_media == PORT_FIBRE) { /* override media type since mii.c doesn't know */
@@ -646,11 +677,11 @@ static int at91ether_set_settings(struct net_device *dev, struct ethtool_cmd *cm
646 int ret; 677 int ret;
647 678
648 spin_lock_irq(&lp->lock); 679 spin_lock_irq(&lp->lock);
649 enable_mdi(); 680 enable_mdi(lp);
650 681
651 ret = mii_ethtool_sset(&lp->mii, cmd); 682 ret = mii_ethtool_sset(&lp->mii, cmd);
652 683
653 disable_mdi(); 684 disable_mdi(lp);
654 spin_unlock_irq(&lp->lock); 685 spin_unlock_irq(&lp->lock);
655 686
656 return ret; 687 return ret;
@@ -662,11 +693,11 @@ static int at91ether_nwayreset(struct net_device *dev)
662 int ret; 693 int ret;
663 694
664 spin_lock_irq(&lp->lock); 695 spin_lock_irq(&lp->lock);
665 enable_mdi(); 696 enable_mdi(lp);
666 697
667 ret = mii_nway_restart(&lp->mii); 698 ret = mii_nway_restart(&lp->mii);
668 699
669 disable_mdi(); 700 disable_mdi(lp);
670 spin_unlock_irq(&lp->lock); 701 spin_unlock_irq(&lp->lock);
671 702
672 return ret; 703 return ret;
@@ -696,9 +727,9 @@ static int at91ether_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
696 return -EINVAL; 727 return -EINVAL;
697 728
698 spin_lock_irq(&lp->lock); 729 spin_lock_irq(&lp->lock);
699 enable_mdi(); 730 enable_mdi(lp);
700 res = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL); 731 res = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL);
701 disable_mdi(); 732 disable_mdi(lp);
702 spin_unlock_irq(&lp->lock); 733 spin_unlock_irq(&lp->lock);
703 734
704 return res; 735 return res;
@@ -731,11 +762,11 @@ static void at91ether_start(struct net_device *dev)
731 lp->rxBuffIndex = 0; 762 lp->rxBuffIndex = 0;
732 763
733 /* Program address of descriptor list in Rx Buffer Queue register */ 764 /* Program address of descriptor list in Rx Buffer Queue register */
734 at91_emac_write(AT91_EMAC_RBQP, (unsigned long) dlist_phys); 765 at91_emac_write(lp, AT91_EMAC_RBQP, (unsigned long) dlist_phys);
735 766
736 /* Enable Receive and Transmit */ 767 /* Enable Receive and Transmit */
737 ctl = at91_emac_read(AT91_EMAC_CTL); 768 ctl = at91_emac_read(lp, AT91_EMAC_CTL);
738 at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_RE | AT91_EMAC_TE); 769 at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_RE | AT91_EMAC_TE);
739} 770}
740 771
741/* 772/*
@@ -752,8 +783,8 @@ static int at91ether_open(struct net_device *dev)
752 clk_enable(lp->ether_clk); /* Re-enable Peripheral clock */ 783 clk_enable(lp->ether_clk); /* Re-enable Peripheral clock */
753 784
754 /* Clear internal statistics */ 785 /* Clear internal statistics */
755 ctl = at91_emac_read(AT91_EMAC_CTL); 786 ctl = at91_emac_read(lp, AT91_EMAC_CTL);
756 at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_CSR); 787 at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_CSR);
757 788
758 /* Update the MAC address (incase user has changed it) */ 789 /* Update the MAC address (incase user has changed it) */
759 update_mac_address(dev); 790 update_mac_address(dev);
@@ -762,15 +793,15 @@ static int at91ether_open(struct net_device *dev)
762 enable_phyirq(dev); 793 enable_phyirq(dev);
763 794
764 /* Enable MAC interrupts */ 795 /* Enable MAC interrupts */
765 at91_emac_write(AT91_EMAC_IER, AT91_EMAC_RCOM | AT91_EMAC_RBNA 796 at91_emac_write(lp, AT91_EMAC_IER, AT91_EMAC_RCOM | AT91_EMAC_RBNA
766 | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM 797 | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM
767 | AT91_EMAC_ROVR | AT91_EMAC_ABT); 798 | AT91_EMAC_ROVR | AT91_EMAC_ABT);
768 799
769 /* Determine current link speed */ 800 /* Determine current link speed */
770 spin_lock_irq(&lp->lock); 801 spin_lock_irq(&lp->lock);
771 enable_mdi(); 802 enable_mdi(lp);
772 update_linkspeed(dev, 0); 803 update_linkspeed(dev, 0);
773 disable_mdi(); 804 disable_mdi(lp);
774 spin_unlock_irq(&lp->lock); 805 spin_unlock_irq(&lp->lock);
775 806
776 at91ether_start(dev); 807 at91ether_start(dev);
@@ -787,14 +818,14 @@ static int at91ether_close(struct net_device *dev)
787 unsigned long ctl; 818 unsigned long ctl;
788 819
789 /* Disable Receiver and Transmitter */ 820 /* Disable Receiver and Transmitter */
790 ctl = at91_emac_read(AT91_EMAC_CTL); 821 ctl = at91_emac_read(lp, AT91_EMAC_CTL);
791 at91_emac_write(AT91_EMAC_CTL, ctl & ~(AT91_EMAC_TE | AT91_EMAC_RE)); 822 at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~(AT91_EMAC_TE | AT91_EMAC_RE));
792 823
793 /* Disable PHY interrupt */ 824 /* Disable PHY interrupt */
794 disable_phyirq(dev); 825 disable_phyirq(dev);
795 826
796 /* Disable MAC interrupts */ 827 /* Disable MAC interrupts */
797 at91_emac_write(AT91_EMAC_IDR, AT91_EMAC_RCOM | AT91_EMAC_RBNA 828 at91_emac_write(lp, AT91_EMAC_IDR, AT91_EMAC_RCOM | AT91_EMAC_RBNA
798 | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM 829 | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM
799 | AT91_EMAC_ROVR | AT91_EMAC_ABT); 830 | AT91_EMAC_ROVR | AT91_EMAC_ABT);
800 831
@@ -812,7 +843,7 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
812{ 843{
813 struct at91_private *lp = netdev_priv(dev); 844 struct at91_private *lp = netdev_priv(dev);
814 845
815 if (at91_emac_read(AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) { 846 if (at91_emac_read(lp, AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) {
816 netif_stop_queue(dev); 847 netif_stop_queue(dev);
817 848
818 /* Store packet information (to free when Tx completed) */ 849 /* Store packet information (to free when Tx completed) */
@@ -822,9 +853,9 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
822 dev->stats.tx_bytes += skb->len; 853 dev->stats.tx_bytes += skb->len;
823 854
824 /* Set address of the data in the Transmit Address register */ 855 /* Set address of the data in the Transmit Address register */
825 at91_emac_write(AT91_EMAC_TAR, lp->skb_physaddr); 856 at91_emac_write(lp, AT91_EMAC_TAR, lp->skb_physaddr);
826 /* Set length of the packet in the Transmit Control register */ 857 /* Set length of the packet in the Transmit Control register */
827 at91_emac_write(AT91_EMAC_TCR, skb->len); 858 at91_emac_write(lp, AT91_EMAC_TCR, skb->len);
828 859
829 } else { 860 } else {
830 printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n"); 861 printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n");
@@ -841,31 +872,32 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
841 */ 872 */
842static struct net_device_stats *at91ether_stats(struct net_device *dev) 873static struct net_device_stats *at91ether_stats(struct net_device *dev)
843{ 874{
875 struct at91_private *lp = netdev_priv(dev);
844 int ale, lenerr, seqe, lcol, ecol; 876 int ale, lenerr, seqe, lcol, ecol;
845 877
846 if (netif_running(dev)) { 878 if (netif_running(dev)) {
847 dev->stats.rx_packets += at91_emac_read(AT91_EMAC_OK); /* Good frames received */ 879 dev->stats.rx_packets += at91_emac_read(lp, AT91_EMAC_OK); /* Good frames received */
848 ale = at91_emac_read(AT91_EMAC_ALE); 880 ale = at91_emac_read(lp, AT91_EMAC_ALE);
849 dev->stats.rx_frame_errors += ale; /* Alignment errors */ 881 dev->stats.rx_frame_errors += ale; /* Alignment errors */
850 lenerr = at91_emac_read(AT91_EMAC_ELR) + at91_emac_read(AT91_EMAC_USF); 882 lenerr = at91_emac_read(lp, AT91_EMAC_ELR) + at91_emac_read(lp, AT91_EMAC_USF);
851 dev->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */ 883 dev->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */
852 seqe = at91_emac_read(AT91_EMAC_SEQE); 884 seqe = at91_emac_read(lp, AT91_EMAC_SEQE);
853 dev->stats.rx_crc_errors += seqe; /* CRC error */ 885 dev->stats.rx_crc_errors += seqe; /* CRC error */
854 dev->stats.rx_fifo_errors += at91_emac_read(AT91_EMAC_DRFC); /* Receive buffer not available */ 886 dev->stats.rx_fifo_errors += at91_emac_read(lp, AT91_EMAC_DRFC);/* Receive buffer not available */
855 dev->stats.rx_errors += (ale + lenerr + seqe 887 dev->stats.rx_errors += (ale + lenerr + seqe
856 + at91_emac_read(AT91_EMAC_CDE) + at91_emac_read(AT91_EMAC_RJB)); 888 + at91_emac_read(lp, AT91_EMAC_CDE) + at91_emac_read(lp, AT91_EMAC_RJB));
857 889
858 dev->stats.tx_packets += at91_emac_read(AT91_EMAC_FRA); /* Frames successfully transmitted */ 890 dev->stats.tx_packets += at91_emac_read(lp, AT91_EMAC_FRA); /* Frames successfully transmitted */
859 dev->stats.tx_fifo_errors += at91_emac_read(AT91_EMAC_TUE); /* Transmit FIFO underruns */ 891 dev->stats.tx_fifo_errors += at91_emac_read(lp, AT91_EMAC_TUE); /* Transmit FIFO underruns */
860 dev->stats.tx_carrier_errors += at91_emac_read(AT91_EMAC_CSE); /* Carrier Sense errors */ 892 dev->stats.tx_carrier_errors += at91_emac_read(lp, AT91_EMAC_CSE); /* Carrier Sense errors */
861 dev->stats.tx_heartbeat_errors += at91_emac_read(AT91_EMAC_SQEE);/* Heartbeat error */ 893 dev->stats.tx_heartbeat_errors += at91_emac_read(lp, AT91_EMAC_SQEE);/* Heartbeat error */
862 894
863 lcol = at91_emac_read(AT91_EMAC_LCOL); 895 lcol = at91_emac_read(lp, AT91_EMAC_LCOL);
864 ecol = at91_emac_read(AT91_EMAC_ECOL); 896 ecol = at91_emac_read(lp, AT91_EMAC_ECOL);
865 dev->stats.tx_window_errors += lcol; /* Late collisions */ 897 dev->stats.tx_window_errors += lcol; /* Late collisions */
866 dev->stats.tx_aborted_errors += ecol; /* 16 collisions */ 898 dev->stats.tx_aborted_errors += ecol; /* 16 collisions */
867 899
868 dev->stats.collisions += (at91_emac_read(AT91_EMAC_SCOL) + at91_emac_read(AT91_EMAC_MCOL) + lcol + ecol); 900 dev->stats.collisions += (at91_emac_read(lp, AT91_EMAC_SCOL) + at91_emac_read(lp, AT91_EMAC_MCOL) + lcol + ecol);
869 } 901 }
870 return &dev->stats; 902 return &dev->stats;
871} 903}
@@ -922,7 +954,7 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
922 954
923 /* MAC Interrupt Status register indicates what interrupts are pending. 955 /* MAC Interrupt Status register indicates what interrupts are pending.
924 It is automatically cleared once read. */ 956 It is automatically cleared once read. */
925 intstatus = at91_emac_read(AT91_EMAC_ISR); 957 intstatus = at91_emac_read(lp, AT91_EMAC_ISR);
926 958
927 if (intstatus & AT91_EMAC_RCOM) /* Receive complete */ 959 if (intstatus & AT91_EMAC_RCOM) /* Receive complete */
928 at91ether_rx(dev); 960 at91ether_rx(dev);
@@ -942,9 +974,9 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
942 974
943 /* Work-around for Errata #11 */ 975 /* Work-around for Errata #11 */
944 if (intstatus & AT91_EMAC_RBNA) { 976 if (intstatus & AT91_EMAC_RBNA) {
945 ctl = at91_emac_read(AT91_EMAC_CTL); 977 ctl = at91_emac_read(lp, AT91_EMAC_CTL);
946 at91_emac_write(AT91_EMAC_CTL, ctl & ~AT91_EMAC_RE); 978 at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~AT91_EMAC_RE);
947 at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_RE); 979 at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_RE);
948 } 980 }
949 981
950 if (intstatus & AT91_EMAC_ROVR) 982 if (intstatus & AT91_EMAC_ROVR)
@@ -980,189 +1012,199 @@ static const struct net_device_ops at91ether_netdev_ops = {
980}; 1012};
981 1013
982/* 1014/*
983 * Initialize the ethernet interface 1015 * Detect the PHY type, and its address.
984 */ 1016 */
985static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_address, 1017static int __init at91ether_phy_detect(struct at91_private *lp)
986 struct platform_device *pdev, struct clk *ether_clk) 1018{
1019 unsigned int phyid1, phyid2;
1020 unsigned long phy_id;
1021 unsigned short phy_address = 0;
1022
1023 while (phy_address < PHY_MAX_ADDR) {
1024 /* Read the PHY ID registers */
1025 enable_mdi(lp);
1026 read_phy(lp, phy_address, MII_PHYSID1, &phyid1);
1027 read_phy(lp, phy_address, MII_PHYSID2, &phyid2);
1028 disable_mdi(lp);
1029
1030 phy_id = (phyid1 << 16) | (phyid2 & 0xfff0);
1031 switch (phy_id) {
1032 case MII_DM9161_ID: /* Davicom 9161: PHY_ID1 = 0x181, PHY_ID2 = B881 */
1033 case MII_DM9161A_ID: /* Davicom 9161A: PHY_ID1 = 0x181, PHY_ID2 = B8A0 */
1034 case MII_LXT971A_ID: /* Intel LXT971A: PHY_ID1 = 0x13, PHY_ID2 = 78E0 */
1035 case MII_RTL8201_ID: /* Realtek RTL8201: PHY_ID1 = 0, PHY_ID2 = 0x8201 */
1036 case MII_BCM5221_ID: /* Broadcom BCM5221: PHY_ID1 = 0x40, PHY_ID2 = 0x61e0 */
1037 case MII_DP83847_ID: /* National Semiconductor DP83847: */
1038 case MII_DP83848_ID: /* National Semiconductor DP83848: */
1039 case MII_AC101L_ID: /* Altima AC101L: PHY_ID1 = 0x22, PHY_ID2 = 0x5520 */
1040 case MII_KS8721_ID: /* Micrel KS8721: PHY_ID1 = 0x22, PHY_ID2 = 0x1610 */
1041 case MII_T78Q21x3_ID: /* Teridian 78Q21x3: PHY_ID1 = 0x0E, PHY_ID2 = 7237 */
1042 case MII_LAN83C185_ID: /* SMSC LAN83C185: PHY_ID1 = 0x0007, PHY_ID2 = 0xC0A1 */
1043 /* store detected values */
1044 lp->phy_type = phy_id; /* Type of PHY connected */
1045 lp->phy_address = phy_address; /* MDI address of PHY */
1046 return 1;
1047 }
1048
1049 phy_address++;
1050 }
1051
1052 return 0; /* not detected */
1053}
1054
1055
1056/*
1057 * Detect MAC & PHY and perform ethernet interface initialization
1058 */
1059static int __init at91ether_probe(struct platform_device *pdev)
987{ 1060{
988 struct macb_platform_data *board_data = pdev->dev.platform_data; 1061 struct macb_platform_data *board_data = pdev->dev.platform_data;
1062 struct resource *regs;
989 struct net_device *dev; 1063 struct net_device *dev;
990 struct at91_private *lp; 1064 struct at91_private *lp;
991 unsigned int val;
992 int res; 1065 int res;
993 1066
1067 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1068 if (!regs)
1069 return -ENOENT;
1070
994 dev = alloc_etherdev(sizeof(struct at91_private)); 1071 dev = alloc_etherdev(sizeof(struct at91_private));
995 if (!dev) 1072 if (!dev)
996 return -ENOMEM; 1073 return -ENOMEM;
997 1074
998 dev->base_addr = AT91_VA_BASE_EMAC; 1075 lp = netdev_priv(dev);
999 dev->irq = AT91RM9200_ID_EMAC; 1076 lp->board_data = *board_data;
1077 spin_lock_init(&lp->lock);
1078
1079 dev->base_addr = regs->start; /* physical base address */
1080 lp->emac_base = ioremap(regs->start, regs->end - regs->start + 1);
1081 if (!lp->emac_base) {
1082 res = -ENOMEM;
1083 goto err_free_dev;
1084 }
1085
1086 /* Clock */
1087 lp->ether_clk = clk_get(&pdev->dev, "ether_clk");
1088 if (IS_ERR(lp->ether_clk)) {
1089 res = -ENODEV;
1090 goto err_ioumap;
1091 }
1092 clk_enable(lp->ether_clk);
1000 1093
1001 /* Install the interrupt handler */ 1094 /* Install the interrupt handler */
1095 dev->irq = platform_get_irq(pdev, 0);
1002 if (request_irq(dev->irq, at91ether_interrupt, 0, dev->name, dev)) { 1096 if (request_irq(dev->irq, at91ether_interrupt, 0, dev->name, dev)) {
1003 free_netdev(dev); 1097 res = -EBUSY;
1004 return -EBUSY; 1098 goto err_disable_clock;
1005 } 1099 }
1006 1100
1007 /* Allocate memory for DMA Receive descriptors */ 1101 /* Allocate memory for DMA Receive descriptors */
1008 lp = netdev_priv(dev);
1009 lp->dlist = (struct recv_desc_bufs *) dma_alloc_coherent(NULL, sizeof(struct recv_desc_bufs), (dma_addr_t *) &lp->dlist_phys, GFP_KERNEL); 1102 lp->dlist = (struct recv_desc_bufs *) dma_alloc_coherent(NULL, sizeof(struct recv_desc_bufs), (dma_addr_t *) &lp->dlist_phys, GFP_KERNEL);
1010 if (lp->dlist == NULL) { 1103 if (lp->dlist == NULL) {
1011 free_irq(dev->irq, dev); 1104 res = -ENOMEM;
1012 free_netdev(dev); 1105 goto err_free_irq;
1013 return -ENOMEM;
1014 } 1106 }
1015 lp->board_data = *board_data;
1016 lp->ether_clk = ether_clk;
1017 platform_set_drvdata(pdev, dev);
1018
1019 spin_lock_init(&lp->lock);
1020 1107
1021 ether_setup(dev); 1108 ether_setup(dev);
1022 dev->netdev_ops = &at91ether_netdev_ops; 1109 dev->netdev_ops = &at91ether_netdev_ops;
1023 dev->ethtool_ops = &at91ether_ethtool_ops; 1110 dev->ethtool_ops = &at91ether_ethtool_ops;
1024 1111 platform_set_drvdata(pdev, dev);
1025 SET_NETDEV_DEV(dev, &pdev->dev); 1112 SET_NETDEV_DEV(dev, &pdev->dev);
1026 1113
1027 get_mac_address(dev); /* Get ethernet address and store it in dev->dev_addr */ 1114 get_mac_address(dev); /* Get ethernet address and store it in dev->dev_addr */
1028 update_mac_address(dev); /* Program ethernet address into MAC */ 1115 update_mac_address(dev); /* Program ethernet address into MAC */
1029 1116
1030 at91_emac_write(AT91_EMAC_CTL, 0); 1117 at91_emac_write(lp, AT91_EMAC_CTL, 0);
1031 1118
1032 if (lp->board_data.is_rmii) 1119 if (board_data->is_rmii)
1033 at91_emac_write(AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG | AT91_EMAC_RMII); 1120 at91_emac_write(lp, AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG | AT91_EMAC_RMII);
1034 else 1121 else
1035 at91_emac_write(AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG); 1122 at91_emac_write(lp, AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG);
1036 1123
1037 /* Perform PHY-specific initialization */ 1124 /* Detect PHY */
1038 spin_lock_irq(&lp->lock); 1125 if (!at91ether_phy_detect(lp)) {
1039 enable_mdi(); 1126 printk(KERN_ERR "at91_ether: Could not detect ethernet PHY\n");
1040 if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { 1127 res = -ENODEV;
1041 read_phy(phy_address, MII_DSCR_REG, &val); 1128 goto err_free_dmamem;
1042 if ((val & (1 << 10)) == 0) /* DSCR bit 10 is 0 -- fiber mode */ 1129 }
1043 lp->phy_media = PORT_FIBRE;
1044 } else if (machine_is_csb337()) {
1045 /* mix link activity status into LED2 link state */
1046 write_phy(phy_address, MII_LEDCTRL_REG, 0x0d22);
1047 } else if (machine_is_ecbat91())
1048 write_phy(phy_address, MII_LEDCTRL_REG, 0x156A);
1049 1130
1050 disable_mdi(); 1131 initialize_phy(lp);
1051 spin_unlock_irq(&lp->lock);
1052 1132
1053 lp->mii.dev = dev; /* Support for ethtool */ 1133 lp->mii.dev = dev; /* Support for ethtool */
1054 lp->mii.mdio_read = mdio_read; 1134 lp->mii.mdio_read = mdio_read;
1055 lp->mii.mdio_write = mdio_write; 1135 lp->mii.mdio_write = mdio_write;
1056 lp->mii.phy_id = phy_address; 1136 lp->mii.phy_id = lp->phy_address;
1057 lp->mii.phy_id_mask = 0x1f; 1137 lp->mii.phy_id_mask = 0x1f;
1058 lp->mii.reg_num_mask = 0x1f; 1138 lp->mii.reg_num_mask = 0x1f;
1059 1139
1060 lp->phy_type = phy_type; /* Type of PHY connected */
1061 lp->phy_address = phy_address; /* MDI address of PHY */
1062
1063 /* Register the network interface */ 1140 /* Register the network interface */
1064 res = register_netdev(dev); 1141 res = register_netdev(dev);
1065 if (res) { 1142 if (res)
1066 free_irq(dev->irq, dev); 1143 goto err_free_dmamem;
1067 free_netdev(dev);
1068 dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
1069 return res;
1070 }
1071 1144
1072 /* Determine current link speed */ 1145 /* Determine current link speed */
1073 spin_lock_irq(&lp->lock); 1146 spin_lock_irq(&lp->lock);
1074 enable_mdi(); 1147 enable_mdi(lp);
1075 update_linkspeed(dev, 0); 1148 update_linkspeed(dev, 0);
1076 disable_mdi(); 1149 disable_mdi(lp);
1077 spin_unlock_irq(&lp->lock); 1150 spin_unlock_irq(&lp->lock);
1078 netif_carrier_off(dev); /* will be enabled in open() */ 1151 netif_carrier_off(dev); /* will be enabled in open() */
1079 1152
1080 /* If board has no PHY IRQ, use a timer to poll the PHY */ 1153 /* If board has no PHY IRQ, use a timer to poll the PHY */
1081 if (!gpio_is_valid(lp->board_data.phy_irq_pin)) { 1154 if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
1155 gpio_request(board_data->phy_irq_pin, "ethernet_phy");
1156 } else {
1157 /* If board has no PHY IRQ, use a timer to poll the PHY */
1082 init_timer(&lp->check_timer); 1158 init_timer(&lp->check_timer);
1083 lp->check_timer.data = (unsigned long)dev; 1159 lp->check_timer.data = (unsigned long)dev;
1084 lp->check_timer.function = at91ether_check_link; 1160 lp->check_timer.function = at91ether_check_link;
1085 } else if (lp->board_data.phy_irq_pin >= 32) 1161 }
1086 gpio_request(lp->board_data.phy_irq_pin, "ethernet_phy");
1087 1162
1088 /* Display ethernet banner */ 1163 /* Display ethernet banner */
1089 printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%pM)\n", 1164 printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%pM)\n",
1090 dev->name, (uint) dev->base_addr, dev->irq, 1165 dev->name, (uint) dev->base_addr, dev->irq,
1091 at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-", 1166 at91_emac_read(lp, AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-",
1092 at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex", 1167 at91_emac_read(lp, AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex",
1093 dev->dev_addr); 1168 dev->dev_addr);
1094 if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) 1169 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID))
1095 printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)"); 1170 printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)");
1096 else if (phy_type == MII_LXT971A_ID) 1171 else if (lp->phy_type == MII_LXT971A_ID)
1097 printk(KERN_INFO "%s: Intel LXT971A PHY\n", dev->name); 1172 printk(KERN_INFO "%s: Intel LXT971A PHY\n", dev->name);
1098 else if (phy_type == MII_RTL8201_ID) 1173 else if (lp->phy_type == MII_RTL8201_ID)
1099 printk(KERN_INFO "%s: Realtek RTL8201(B)L PHY\n", dev->name); 1174 printk(KERN_INFO "%s: Realtek RTL8201(B)L PHY\n", dev->name);
1100 else if (phy_type == MII_BCM5221_ID) 1175 else if (lp->phy_type == MII_BCM5221_ID)
1101 printk(KERN_INFO "%s: Broadcom BCM5221 PHY\n", dev->name); 1176 printk(KERN_INFO "%s: Broadcom BCM5221 PHY\n", dev->name);
1102 else if (phy_type == MII_DP83847_ID) 1177 else if (lp->phy_type == MII_DP83847_ID)
1103 printk(KERN_INFO "%s: National Semiconductor DP83847 PHY\n", dev->name); 1178 printk(KERN_INFO "%s: National Semiconductor DP83847 PHY\n", dev->name);
1104 else if (phy_type == MII_DP83848_ID) 1179 else if (lp->phy_type == MII_DP83848_ID)
1105 printk(KERN_INFO "%s: National Semiconductor DP83848 PHY\n", dev->name); 1180 printk(KERN_INFO "%s: National Semiconductor DP83848 PHY\n", dev->name);
1106 else if (phy_type == MII_AC101L_ID) 1181 else if (lp->phy_type == MII_AC101L_ID)
1107 printk(KERN_INFO "%s: Altima AC101L PHY\n", dev->name); 1182 printk(KERN_INFO "%s: Altima AC101L PHY\n", dev->name);
1108 else if (phy_type == MII_KS8721_ID) 1183 else if (lp->phy_type == MII_KS8721_ID)
1109 printk(KERN_INFO "%s: Micrel KS8721 PHY\n", dev->name); 1184 printk(KERN_INFO "%s: Micrel KS8721 PHY\n", dev->name);
1110 else if (phy_type == MII_T78Q21x3_ID) 1185 else if (lp->phy_type == MII_T78Q21x3_ID)
1111 printk(KERN_INFO "%s: Teridian 78Q21x3 PHY\n", dev->name); 1186 printk(KERN_INFO "%s: Teridian 78Q21x3 PHY\n", dev->name);
1112 else if (phy_type == MII_LAN83C185_ID) 1187 else if (lp->phy_type == MII_LAN83C185_ID)
1113 printk(KERN_INFO "%s: SMSC LAN83C185 PHY\n", dev->name); 1188 printk(KERN_INFO "%s: SMSC LAN83C185 PHY\n", dev->name);
1114 1189
1115 return 0; 1190 clk_disable(lp->ether_clk); /* Disable Peripheral clock */
1116}
1117
1118/*
1119 * Detect MAC and PHY and perform initialization
1120 */
1121static int __init at91ether_probe(struct platform_device *pdev)
1122{
1123 unsigned int phyid1, phyid2;
1124 int detected = -1;
1125 unsigned long phy_id;
1126 unsigned short phy_address = 0;
1127 struct clk *ether_clk;
1128
1129 ether_clk = clk_get(&pdev->dev, "ether_clk");
1130 if (IS_ERR(ether_clk)) {
1131 printk(KERN_ERR "at91_ether: no clock defined\n");
1132 return -ENODEV;
1133 }
1134 clk_enable(ether_clk); /* Enable Peripheral clock */
1135
1136 while ((detected != 0) && (phy_address < 32)) {
1137 /* Read the PHY ID registers */
1138 enable_mdi();
1139 read_phy(phy_address, MII_PHYSID1, &phyid1);
1140 read_phy(phy_address, MII_PHYSID2, &phyid2);
1141 disable_mdi();
1142
1143 phy_id = (phyid1 << 16) | (phyid2 & 0xfff0);
1144 switch (phy_id) {
1145 case MII_DM9161_ID: /* Davicom 9161: PHY_ID1 = 0x181, PHY_ID2 = B881 */
1146 case MII_DM9161A_ID: /* Davicom 9161A: PHY_ID1 = 0x181, PHY_ID2 = B8A0 */
1147 case MII_LXT971A_ID: /* Intel LXT971A: PHY_ID1 = 0x13, PHY_ID2 = 78E0 */
1148 case MII_RTL8201_ID: /* Realtek RTL8201: PHY_ID1 = 0, PHY_ID2 = 0x8201 */
1149 case MII_BCM5221_ID: /* Broadcom BCM5221: PHY_ID1 = 0x40, PHY_ID2 = 0x61e0 */
1150 case MII_DP83847_ID: /* National Semiconductor DP83847: */
1151 case MII_DP83848_ID: /* National Semiconductor DP83848: */
1152 case MII_AC101L_ID: /* Altima AC101L: PHY_ID1 = 0x22, PHY_ID2 = 0x5520 */
1153 case MII_KS8721_ID: /* Micrel KS8721: PHY_ID1 = 0x22, PHY_ID2 = 0x1610 */
1154 case MII_T78Q21x3_ID: /* Teridian 78Q21x3: PHY_ID1 = 0x0E, PHY_ID2 = 7237 */
1155 case MII_LAN83C185_ID: /* SMSC LAN83C185: PHY_ID1 = 0x0007, PHY_ID2 = 0xC0A1 */
1156 detected = at91ether_setup(phy_id, phy_address, pdev, ether_clk);
1157 break;
1158 }
1159 1191
1160 phy_address++; 1192 return 0;
1161 }
1162 1193
1163 clk_disable(ether_clk); /* Disable Peripheral clock */
1164 1194
1165 return detected; 1195err_free_dmamem:
1196 platform_set_drvdata(pdev, NULL);
1197 dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
1198err_free_irq:
1199 free_irq(dev->irq, dev);
1200err_disable_clock:
1201 clk_disable(lp->ether_clk);
1202 clk_put(lp->ether_clk);
1203err_ioumap:
1204 iounmap(lp->emac_base);
1205err_free_dev:
1206 free_netdev(dev);
1207 return res;
1166} 1208}
1167 1209
1168static int __devexit at91ether_remove(struct platform_device *pdev) 1210static int __devexit at91ether_remove(struct platform_device *pdev)
@@ -1170,8 +1212,7 @@ static int __devexit at91ether_remove(struct platform_device *pdev)
1170 struct net_device *dev = platform_get_drvdata(pdev); 1212 struct net_device *dev = platform_get_drvdata(pdev);
1171 struct at91_private *lp = netdev_priv(dev); 1213 struct at91_private *lp = netdev_priv(dev);
1172 1214
1173 if (gpio_is_valid(lp->board_data.phy_irq_pin) && 1215 if (gpio_is_valid(lp->board_data.phy_irq_pin))
1174 lp->board_data.phy_irq_pin >= 32)
1175 gpio_free(lp->board_data.phy_irq_pin); 1216 gpio_free(lp->board_data.phy_irq_pin);
1176 1217
1177 unregister_netdev(dev); 1218 unregister_netdev(dev);
@@ -1193,7 +1234,7 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
1193 1234
1194 if (netif_running(net_dev)) { 1235 if (netif_running(net_dev)) {
1195 if (gpio_is_valid(lp->board_data.phy_irq_pin)) { 1236 if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
1196 int phy_irq = lp->board_data.phy_irq_pin; 1237 int phy_irq = gpio_to_irq(lp->board_data.phy_irq_pin);
1197 disable_irq(phy_irq); 1238 disable_irq(phy_irq);
1198 } 1239 }
1199 1240
@@ -1217,7 +1258,7 @@ static int at91ether_resume(struct platform_device *pdev)
1217 netif_start_queue(net_dev); 1258 netif_start_queue(net_dev);
1218 1259
1219 if (gpio_is_valid(lp->board_data.phy_irq_pin)) { 1260 if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
1220 int phy_irq = lp->board_data.phy_irq_pin; 1261 int phy_irq = gpio_to_irq(lp->board_data.phy_irq_pin);
1221 enable_irq(phy_irq); 1262 enable_irq(phy_irq);
1222 } 1263 }
1223 } 1264 }
diff --git a/drivers/net/ethernet/cadence/at91_ether.h b/drivers/net/ethernet/cadence/at91_ether.h
index 3725fbb0defe..0ef6328fa7f8 100644
--- a/drivers/net/ethernet/cadence/at91_ether.h
+++ b/drivers/net/ethernet/cadence/at91_ether.h
@@ -88,6 +88,7 @@ struct at91_private
88 struct macb_platform_data board_data; /* board-specific 88 struct macb_platform_data board_data; /* board-specific
89 * configuration (shared with 89 * configuration (shared with
90 * macb for common data */ 90 * macb for common data */
91 void __iomem *emac_base; /* base register address */
91 struct clk *ether_clk; /* clock */ 92 struct clk *ether_clk; /* clock */
92 93
93 /* PHY */ 94 /* PHY */
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index c4834c23be35..1466bc4e3dda 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1213,6 +1213,7 @@ static const struct ethtool_ops macb_ethtool_ops = {
1213 .set_settings = macb_set_settings, 1213 .set_settings = macb_set_settings,
1214 .get_drvinfo = macb_get_drvinfo, 1214 .get_drvinfo = macb_get_drvinfo,
1215 .get_link = ethtool_op_get_link, 1215 .get_link = ethtool_op_get_link,
1216 .get_ts_info = ethtool_op_get_ts_info,
1216}; 1217};
1217 1218
1218static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1219static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 63bfdd10bd6d..abb6ce7c1b7e 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1150,6 +1150,48 @@ release_tpsram:
1150} 1150}
1151 1151
1152/** 1152/**
1153 * t3_synchronize_rx - wait for current Rx processing on a port to complete
1154 * @adap: the adapter
1155 * @p: the port
1156 *
1157 * Ensures that current Rx processing on any of the queues associated with
1158 * the given port completes before returning. We do this by acquiring and
1159 * releasing the locks of the response queues associated with the port.
1160 */
1161static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1162{
1163 int i;
1164
1165 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1166 struct sge_rspq *q = &adap->sge.qs[i].rspq;
1167
1168 spin_lock_irq(&q->lock);
1169 spin_unlock_irq(&q->lock);
1170 }
1171}
1172
1173static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1174{
1175 struct port_info *pi = netdev_priv(dev);
1176 struct adapter *adapter = pi->adapter;
1177
1178 if (adapter->params.rev > 0) {
1179 t3_set_vlan_accel(adapter, 1 << pi->port_id,
1180 features & NETIF_F_HW_VLAN_RX);
1181 } else {
1182 /* single control for all ports */
1183 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX;
1184
1185 for_each_port(adapter, i)
1186 have_vlans |=
1187 adapter->port[i]->features & NETIF_F_HW_VLAN_RX;
1188
1189 t3_set_vlan_accel(adapter, 1, have_vlans);
1190 }
1191 t3_synchronize_rx(adapter, pi);
1192}
1193
1194/**
1153 * cxgb_up - enable the adapter 1195 * cxgb_up - enable the adapter
1154 * @adapter: adapter being enabled 1196 * @adapter: adapter being enabled
1155 * 1197 *
@@ -1161,7 +1203,7 @@ release_tpsram:
1161 */ 1203 */
1162static int cxgb_up(struct adapter *adap) 1204static int cxgb_up(struct adapter *adap)
1163{ 1205{
1164 int err; 1206 int i, err;
1165 1207
1166 if (!(adap->flags & FULL_INIT_DONE)) { 1208 if (!(adap->flags & FULL_INIT_DONE)) {
1167 err = t3_check_fw_version(adap); 1209 err = t3_check_fw_version(adap);
@@ -1198,6 +1240,9 @@ static int cxgb_up(struct adapter *adap)
1198 if (err) 1240 if (err)
1199 goto out; 1241 goto out;
1200 1242
1243 for_each_port(adap, i)
1244 cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1245
1201 setup_rss(adap); 1246 setup_rss(adap);
1202 if (!(adap->flags & NAPI_INIT)) 1247 if (!(adap->flags & NAPI_INIT))
1203 init_napi(adap); 1248 init_napi(adap);
@@ -2508,48 +2553,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2508 return 0; 2553 return 0;
2509} 2554}
2510 2555
2511/**
2512 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2513 * @adap: the adapter
2514 * @p: the port
2515 *
2516 * Ensures that current Rx processing on any of the queues associated with
2517 * the given port completes before returning. We do this by acquiring and
2518 * releasing the locks of the response queues associated with the port.
2519 */
2520static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2521{
2522 int i;
2523
2524 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2525 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2526
2527 spin_lock_irq(&q->lock);
2528 spin_unlock_irq(&q->lock);
2529 }
2530}
2531
2532static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
2533{
2534 struct port_info *pi = netdev_priv(dev);
2535 struct adapter *adapter = pi->adapter;
2536
2537 if (adapter->params.rev > 0) {
2538 t3_set_vlan_accel(adapter, 1 << pi->port_id,
2539 features & NETIF_F_HW_VLAN_RX);
2540 } else {
2541 /* single control for all ports */
2542 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX;
2543
2544 for_each_port(adapter, i)
2545 have_vlans |=
2546 adapter->port[i]->features & NETIF_F_HW_VLAN_RX;
2547
2548 t3_set_vlan_accel(adapter, 1, have_vlans);
2549 }
2550 t3_synchronize_rx(adapter, pi);
2551}
2552
2553static netdev_features_t cxgb_fix_features(struct net_device *dev, 2556static netdev_features_t cxgb_fix_features(struct net_device *dev,
2554 netdev_features_t features) 2557 netdev_features_t features)
2555{ 2558{
@@ -3353,9 +3356,6 @@ static int __devinit init_one(struct pci_dev *pdev,
3353 err = sysfs_create_group(&adapter->port[0]->dev.kobj, 3356 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3354 &cxgb3_attr_group); 3357 &cxgb3_attr_group);
3355 3358
3356 for_each_port(adapter, i)
3357 cxgb_vlan_mode(adapter->port[i], adapter->port[i]->features);
3358
3359 print_port_info(adapter, ai); 3359 print_port_info(adapter, ai);
3360 return 0; 3360 return 0;
3361 3361
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index b9406cbfc180..845b2020f291 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1,105 +1,27 @@
1/* cs89x0.c: A Crystal Semiconductor (Now Cirrus Logic) CS89[02]0 1/* cs89x0.c: A Crystal Semiconductor (Now Cirrus Logic) CS89[02]0
2 * driver for linux. 2 * driver for linux.
3 * Written 1996 by Russell Nelson, with reference to skeleton.c
4 * written 1993-1994 by Donald Becker.
5 *
6 * This software may be used and distributed according to the terms
7 * of the GNU General Public License, incorporated herein by reference.
8 *
9 * The author may be reached at nelson@crynwr.com, Crynwr
10 * Software, 521 Pleasant Valley Rd., Potsdam, NY 13676
11 *
12 * Other contributors:
13 * Mike Cruse : mcruse@cti-ltd.com
14 * Russ Nelson
15 * Melody Lee : ethernet@crystal.cirrus.com
16 * Alan Cox
17 * Andrew Morton
18 * Oskar Schirmer : oskar@scara.com
19 * Deepak Saxena : dsaxena@plexity.net
20 * Dmitry Pervushin : dpervushin@ru.mvista.com
21 * Deepak Saxena : dsaxena@plexity.net
22 * Domenico Andreoli : cavokz@gmail.com
3 */ 23 */
4 24
5/*
6 Written 1996 by Russell Nelson, with reference to skeleton.c
7 written 1993-1994 by Donald Becker.
8
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
11
12 The author may be reached at nelson@crynwr.com, Crynwr
13 Software, 521 Pleasant Valley Rd., Potsdam, NY 13676
14
15 Changelog:
16
17 Mike Cruse : mcruse@cti-ltd.com
18 : Changes for Linux 2.0 compatibility.
19 : Added dev_id parameter in net_interrupt(),
20 : request_irq() and free_irq(). Just NULL for now.
21
22 Mike Cruse : Added MOD_INC_USE_COUNT and MOD_DEC_USE_COUNT macros
23 : in net_open() and net_close() so kerneld would know
24 : that the module is in use and wouldn't eject the
25 : driver prematurely.
26
27 Mike Cruse : Rewrote init_module() and cleanup_module using 8390.c
28 : as an example. Disabled autoprobing in init_module(),
29 : not a good thing to do to other devices while Linux
30 : is running from all accounts.
31
32 Russ Nelson : Jul 13 1998. Added RxOnly DMA support.
33
34 Melody Lee : Aug 10 1999. Changes for Linux 2.2.5 compatibility.
35 : email: ethernet@crystal.cirrus.com
36
37 Alan Cox : Removed 1.2 support, added 2.1 extra counters.
38
39 Andrew Morton : Kernel 2.3.48
40 : Handle kmalloc() failures
41 : Other resource allocation fixes
42 : Add SMP locks
43 : Integrate Russ Nelson's ALLOW_DMA functionality back in.
44 : If ALLOW_DMA is true, make DMA runtime selectable
45 : Folded in changes from Cirrus (Melody Lee
46 : <klee@crystal.cirrus.com>)
47 : Don't call netif_wake_queue() in net_send_packet()
48 : Fixed an out-of-mem bug in dma_rx()
49 : Updated Documentation/networking/cs89x0.txt
50
51 Andrew Morton : Kernel 2.3.99-pre1
52 : Use skb_reserve to longword align IP header (two places)
53 : Remove a delay loop from dma_rx()
54 : Replace '100' with HZ
55 : Clean up a couple of skb API abuses
56 : Added 'cs89x0_dma=N' kernel boot option
57 : Correctly initialise lp->lock in non-module compile
58
59 Andrew Morton : Kernel 2.3.99-pre4-1
60 : MOD_INC/DEC race fix (see
61 : http://www.uwsg.indiana.edu/hypermail/linux/kernel/0003.3/1532.html)
62
63 Andrew Morton : Kernel 2.4.0-test7-pre2
64 : Enhanced EEPROM support to cover more devices,
65 : abstracted IRQ mapping to support CONFIG_ARCH_CLPS7500 arch
66 : (Jason Gunthorpe <jgg@ualberta.ca>)
67
68 Andrew Morton : Kernel 2.4.0-test11-pre4
69 : Use dev->name in request_*() (Andrey Panin)
70 : Fix an error-path memleak in init_module()
71 : Preserve return value from request_irq()
72 : Fix type of `media' module parm (Keith Owens)
73 : Use SET_MODULE_OWNER()
74 : Tidied up strange request_irq() abuse in net_open().
75
76 Andrew Morton : Kernel 2.4.3-pre1
77 : Request correct number of pages for DMA (Hugh Dickens)
78 : Select PP_ChipID _after_ unregister_netdev in cleanup_module()
79 : because unregister_netdev() calls get_stats.
80 : Make `version[]' __initdata
81 : Uninlined the read/write reg/word functions.
82
83 Oskar Schirmer : oskar@scara.com
84 : HiCO.SH4 (superh) support added (irq#1, cs89x0_media=)
85
86 Deepak Saxena : dsaxena@plexity.net
87 : Intel IXDP2x01 (XScale ixp2x00 NPU) platform support
88
89 Dmitry Pervushin : dpervushin@ru.mvista.com
90 : PNX010X platform support
91
92 Deepak Saxena : dsaxena@plexity.net
93 : Intel IXDP2351 platform support
94
95 Dmitry Pervushin : dpervushin@ru.mvista.com
96 : PNX010X platform support
97
98 Domenico Andreoli : cavokz@gmail.com
99 : QQ2440 platform support
100
101*/
102
103 25
104/* 26/*
105 * Set this to zero to disable DMA code 27 * Set this to zero to disable DMA code
@@ -119,14 +41,12 @@
119 */ 41 */
120#define DEBUGGING 1 42#define DEBUGGING 1
121 43
122/* 44/* Sources:
123 Sources: 45 * Crynwr packet driver epktisa.
124 46 * Crystal Semiconductor data sheets.
125 Crynwr packet driver epktisa. 47 */
126
127 Crystal Semiconductor data sheets.
128 48
129*/ 49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
130 50
131#include <linux/module.h> 51#include <linux/module.h>
132#include <linux/printk.h> 52#include <linux/printk.h>
@@ -147,8 +67,8 @@
147#include <linux/bitops.h> 67#include <linux/bitops.h>
148#include <linux/delay.h> 68#include <linux/delay.h>
149#include <linux/gfp.h> 69#include <linux/gfp.h>
70#include <linux/io.h>
150 71
151#include <asm/io.h>
152#include <asm/irq.h> 72#include <asm/irq.h>
153#include <linux/atomic.h> 73#include <linux/atomic.h>
154#if ALLOW_DMA 74#if ALLOW_DMA
@@ -157,35 +77,55 @@
157 77
158#include "cs89x0.h" 78#include "cs89x0.h"
159 79
80#define cs89_dbg(val, level, fmt, ...) \
81do { \
82 if (val <= net_debug) \
83 pr_##level(fmt, ##__VA_ARGS__); \
84} while (0)
85
160static char version[] __initdata = 86static char version[] __initdata =
161"cs89x0.c: v2.4.3-pre1 Russell Nelson <nelson@crynwr.com>, Andrew Morton\n"; 87 "v2.4.3-pre1 Russell Nelson <nelson@crynwr.com>, Andrew Morton";
162 88
163#define DRV_NAME "cs89x0" 89#define DRV_NAME "cs89x0"
164 90
165/* First, a few definitions that the brave might change. 91/* First, a few definitions that the brave might change.
166 A zero-terminated list of I/O addresses to be probed. Some special flags.. 92 * A zero-terminated list of I/O addresses to be probed. Some special flags..
167 Addr & 1 = Read back the address port, look for signature and reset 93 * Addr & 1 = Read back the address port, look for signature and reset
168 the page window before probing 94 * the page window before probing
169 Addr & 3 = Reset the page window and probe 95 * Addr & 3 = Reset the page window and probe
170 The CLPS eval board has the Cirrus chip at 0x80090300, in ARM IO space, 96 * The CLPS eval board has the Cirrus chip at 0x80090300, in ARM IO space,
171 but it is possible that a Cirrus board could be plugged into the ISA 97 * but it is possible that a Cirrus board could be plugged into the ISA
172 slots. */ 98 * slots.
99 */
173/* The cs8900 has 4 IRQ pins, software selectable. cs8900_irq_map maps 100/* The cs8900 has 4 IRQ pins, software selectable. cs8900_irq_map maps
174 them to system IRQ numbers. This mapping is card specific and is set to 101 * them to system IRQ numbers. This mapping is card specific and is set to
175 the configuration of the Cirrus Eval board for this chip. */ 102 * the configuration of the Cirrus Eval board for this chip.
103 */
176#if defined(CONFIG_MACH_IXDP2351) 104#if defined(CONFIG_MACH_IXDP2351)
177#define CS89x0_NONISA_IRQ 105#define CS89x0_NONISA_IRQ
178static unsigned int netcard_portlist[] __used __initdata = {IXDP2351_VIRT_CS8900_BASE, 0}; 106static unsigned int netcard_portlist[] __used __initdata = {
179static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0}; 107 IXDP2351_VIRT_CS8900_BASE, 0
108};
109static unsigned int cs8900_irq_map[] = {
110 IRQ_IXDP2351_CS8900, 0, 0, 0
111};
180#elif defined(CONFIG_ARCH_IXDP2X01) 112#elif defined(CONFIG_ARCH_IXDP2X01)
181#define CS89x0_NONISA_IRQ 113#define CS89x0_NONISA_IRQ
182static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0}; 114static unsigned int netcard_portlist[] __used __initdata = {
183static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0}; 115 IXDP2X01_CS8900_VIRT_BASE, 0
116};
117static unsigned int cs8900_irq_map[] = {
118 IRQ_IXDP2X01_CS8900, 0, 0, 0
119};
184#else 120#else
185#ifndef CONFIG_CS89x0_PLATFORM 121#ifndef CONFIG_CS89x0_PLATFORM
186static unsigned int netcard_portlist[] __used __initdata = 122static unsigned int netcard_portlist[] __used __initdata = {
187 { 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0}; 123 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240,
188static unsigned int cs8900_irq_map[] = {10,11,12,5}; 124 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0
125};
126static unsigned int cs8900_irq_map[] = {
127 10, 11, 12, 5
128};
189#endif 129#endif
190#endif 130#endif
191 131
@@ -222,6 +162,8 @@ struct net_local {
222 int send_underrun; /* keep track of how many underruns in a row we get */ 162 int send_underrun; /* keep track of how many underruns in a row we get */
223 int force; /* force various values; see FORCE* above. */ 163 int force; /* force various values; see FORCE* above. */
224 spinlock_t lock; 164 spinlock_t lock;
165 void __iomem *virt_addr;/* CS89x0 virtual address. */
166 unsigned long size; /* Length of CS89x0 memory region. */
225#if ALLOW_DMA 167#if ALLOW_DMA
226 int use_dma; /* Flag: we're using dma */ 168 int use_dma; /* Flag: we're using dma */
227 int dma; /* DMA channel */ 169 int dma; /* DMA channel */
@@ -230,119 +172,42 @@ struct net_local {
230 unsigned char *end_dma_buff; /* points to the end of the buffer */ 172 unsigned char *end_dma_buff; /* points to the end of the buffer */
231 unsigned char *rx_dma_ptr; /* points to the next packet */ 173 unsigned char *rx_dma_ptr; /* points to the next packet */
232#endif 174#endif
233#ifdef CONFIG_CS89x0_PLATFORM
234 void __iomem *virt_addr;/* Virtual address for accessing the CS89x0. */
235 unsigned long phys_addr;/* Physical address for accessing the CS89x0. */
236 unsigned long size; /* Length of CS89x0 memory region. */
237#endif
238}; 175};
239 176
240/* Index to functions, as function prototypes. */
241
242static int cs89x0_probe1(struct net_device *dev, unsigned long ioaddr, int modular);
243static int net_open(struct net_device *dev);
244static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev);
245static irqreturn_t net_interrupt(int irq, void *dev_id);
246static void set_multicast_list(struct net_device *dev);
247static void net_timeout(struct net_device *dev);
248static void net_rx(struct net_device *dev);
249static int net_close(struct net_device *dev);
250static struct net_device_stats *net_get_stats(struct net_device *dev);
251static void reset_chip(struct net_device *dev);
252static int get_eeprom_data(struct net_device *dev, int off, int len, int *buffer);
253static int get_eeprom_cksum(int off, int len, int *buffer);
254static int set_mac_address(struct net_device *dev, void *addr);
255static void count_rx_errors(int status, struct net_device *dev);
256#ifdef CONFIG_NET_POLL_CONTROLLER
257static void net_poll_controller(struct net_device *dev);
258#endif
259#if ALLOW_DMA
260static void get_dma_channel(struct net_device *dev);
261static void release_dma_buff(struct net_local *lp);
262#endif
263
264/* Example routines you must write ;->. */ 177/* Example routines you must write ;->. */
265#define tx_done(dev) 1 178#define tx_done(dev) 1
266 179
267/* 180/*
268 * Permit 'cs89x0_dma=N' in the kernel boot environment 181 * Permit 'cs89x0_dma=N' in the kernel boot environment
269 */ 182 */
270#if !defined(MODULE) && (ALLOW_DMA != 0) 183#if !defined(MODULE)
184#if ALLOW_DMA
271static int g_cs89x0_dma; 185static int g_cs89x0_dma;
272 186
273static int __init dma_fn(char *str) 187static int __init dma_fn(char *str)
274{ 188{
275 g_cs89x0_dma = simple_strtol(str,NULL,0); 189 g_cs89x0_dma = simple_strtol(str, NULL, 0);
276 return 1; 190 return 1;
277} 191}
278 192
279__setup("cs89x0_dma=", dma_fn); 193__setup("cs89x0_dma=", dma_fn);
280#endif /* !defined(MODULE) && (ALLOW_DMA != 0) */ 194#endif /* ALLOW_DMA */
281 195
282#ifndef MODULE
283static int g_cs89x0_media__force; 196static int g_cs89x0_media__force;
284 197
285static int __init media_fn(char *str) 198static int __init media_fn(char *str)
286{ 199{
287 if (!strcmp(str, "rj45")) g_cs89x0_media__force = FORCE_RJ45; 200 if (!strcmp(str, "rj45"))
288 else if (!strcmp(str, "aui")) g_cs89x0_media__force = FORCE_AUI; 201 g_cs89x0_media__force = FORCE_RJ45;
289 else if (!strcmp(str, "bnc")) g_cs89x0_media__force = FORCE_BNC; 202 else if (!strcmp(str, "aui"))
203 g_cs89x0_media__force = FORCE_AUI;
204 else if (!strcmp(str, "bnc"))
205 g_cs89x0_media__force = FORCE_BNC;
206
290 return 1; 207 return 1;
291} 208}
292 209
293__setup("cs89x0_media=", media_fn); 210__setup("cs89x0_media=", media_fn);
294
295
296#ifndef CONFIG_CS89x0_PLATFORM
297/* Check for a network adaptor of this type, and return '0' iff one exists.
298 If dev->base_addr == 0, probe all likely locations.
299 If dev->base_addr == 1, always return failure.
300 If dev->base_addr == 2, allocate space for the device and return success
301 (detachable devices only).
302 Return 0 on success.
303 */
304
305struct net_device * __init cs89x0_probe(int unit)
306{
307 struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
308 unsigned *port;
309 int err = 0;
310 int irq;
311 int io;
312
313 if (!dev)
314 return ERR_PTR(-ENODEV);
315
316 sprintf(dev->name, "eth%d", unit);
317 netdev_boot_setup_check(dev);
318 io = dev->base_addr;
319 irq = dev->irq;
320
321 if (net_debug)
322 printk("cs89x0:cs89x0_probe(0x%x)\n", io);
323
324 if (io > 0x1ff) { /* Check a single specified location. */
325 err = cs89x0_probe1(dev, io, 0);
326 } else if (io != 0) { /* Don't probe at all. */
327 err = -ENXIO;
328 } else {
329 for (port = netcard_portlist; *port; port++) {
330 if (cs89x0_probe1(dev, *port, 0) == 0)
331 break;
332 dev->irq = irq;
333 }
334 if (!*port)
335 err = -ENODEV;
336 }
337 if (err)
338 goto out;
339 return dev;
340out:
341 free_netdev(dev);
342 printk(KERN_WARNING "cs89x0: no cs8900 or cs8920 detected. Be sure to disable PnP with SETUP\n");
343 return ERR_PTR(err);
344}
345#endif
346#endif 211#endif
347 212
348#if defined(CONFIG_MACH_IXDP2351) 213#if defined(CONFIG_MACH_IXDP2351)
@@ -369,36 +234,22 @@ writeword(unsigned long base_addr, int portno, u16 value)
369{ 234{
370 __raw_writel(value, base_addr + (portno << 1)); 235 __raw_writel(value, base_addr + (portno << 1));
371} 236}
372#else
373static u16
374readword(unsigned long base_addr, int portno)
375{
376 return inw(base_addr + portno);
377}
378
379static void
380writeword(unsigned long base_addr, int portno, u16 value)
381{
382 outw(value, base_addr + portno);
383}
384#endif 237#endif
385 238
386static void 239static void readwords(struct net_local *lp, int portno, void *buf, int length)
387readwords(unsigned long base_addr, int portno, void *buf, int length)
388{ 240{
389 u8 *buf8 = (u8 *)buf; 241 u8 *buf8 = (u8 *)buf;
390 242
391 do { 243 do {
392 u16 tmp16; 244 u16 tmp16;
393 245
394 tmp16 = readword(base_addr, portno); 246 tmp16 = ioread16(lp->virt_addr + portno);
395 *buf8++ = (u8)tmp16; 247 *buf8++ = (u8)tmp16;
396 *buf8++ = (u8)(tmp16 >> 8); 248 *buf8++ = (u8)(tmp16 >> 8);
397 } while (--length); 249 } while (--length);
398} 250}
399 251
400static void 252static void writewords(struct net_local *lp, int portno, void *buf, int length)
401writewords(unsigned long base_addr, int portno, void *buf, int length)
402{ 253{
403 u8 *buf8 = (u8 *)buf; 254 u8 *buf8 = (u8 *)buf;
404 255
@@ -407,32 +258,37 @@ writewords(unsigned long base_addr, int portno, void *buf, int length)
407 258
408 tmp16 = *buf8++; 259 tmp16 = *buf8++;
409 tmp16 |= (*buf8++) << 8; 260 tmp16 |= (*buf8++) << 8;
410 writeword(base_addr, portno, tmp16); 261 iowrite16(tmp16, lp->virt_addr + portno);
411 } while (--length); 262 } while (--length);
412} 263}
413 264
414static u16 265static u16
415readreg(struct net_device *dev, u16 regno) 266readreg(struct net_device *dev, u16 regno)
416{ 267{
417 writeword(dev->base_addr, ADD_PORT, regno); 268 struct net_local *lp = netdev_priv(dev);
418 return readword(dev->base_addr, DATA_PORT); 269
270 iowrite16(regno, lp->virt_addr + ADD_PORT);
271 return ioread16(lp->virt_addr + DATA_PORT);
419} 272}
420 273
421static void 274static void
422writereg(struct net_device *dev, u16 regno, u16 value) 275writereg(struct net_device *dev, u16 regno, u16 value)
423{ 276{
424 writeword(dev->base_addr, ADD_PORT, regno); 277 struct net_local *lp = netdev_priv(dev);
425 writeword(dev->base_addr, DATA_PORT, value); 278
279 iowrite16(regno, lp->virt_addr + ADD_PORT);
280 iowrite16(value, lp->virt_addr + DATA_PORT);
426} 281}
427 282
428static int __init 283static int __init
429wait_eeprom_ready(struct net_device *dev) 284wait_eeprom_ready(struct net_device *dev)
430{ 285{
431 int timeout = jiffies; 286 int timeout = jiffies;
432 /* check to see if the EEPROM is ready, a timeout is used - 287 /* check to see if the EEPROM is ready,
433 just in case EEPROM is ready when SI_BUSY in the 288 * a timeout is used just in case EEPROM is ready when
434 PP_SelfST is clear */ 289 * SI_BUSY in the PP_SelfST is clear
435 while(readreg(dev, PP_SelfST) & SI_BUSY) 290 */
291 while (readreg(dev, PP_SelfST) & SI_BUSY)
436 if (jiffies - timeout >= 40) 292 if (jiffies - timeout >= 40)
437 return -1; 293 return -1;
438 return 0; 294 return 0;
@@ -443,17 +299,19 @@ get_eeprom_data(struct net_device *dev, int off, int len, int *buffer)
443{ 299{
444 int i; 300 int i;
445 301
446 if (net_debug > 3) printk("EEPROM data from %x for %x:\n",off,len); 302 cs89_dbg(3, info, "EEPROM data from %x for %x:", off, len);
447 for (i = 0; i < len; i++) { 303 for (i = 0; i < len; i++) {
448 if (wait_eeprom_ready(dev) < 0) return -1; 304 if (wait_eeprom_ready(dev) < 0)
305 return -1;
449 /* Now send the EEPROM read command and EEPROM location to read */ 306 /* Now send the EEPROM read command and EEPROM location to read */
450 writereg(dev, PP_EECMD, (off + i) | EEPROM_READ_CMD); 307 writereg(dev, PP_EECMD, (off + i) | EEPROM_READ_CMD);
451 if (wait_eeprom_ready(dev) < 0) return -1; 308 if (wait_eeprom_ready(dev) < 0)
309 return -1;
452 buffer[i] = readreg(dev, PP_EEData); 310 buffer[i] = readreg(dev, PP_EEData);
453 if (net_debug > 3) printk("%04x ", buffer[i]); 311 cs89_dbg(3, cont, " %04x", buffer[i]);
454 } 312 }
455 if (net_debug > 3) printk("\n"); 313 cs89_dbg(3, cont, "\n");
456 return 0; 314 return 0;
457} 315}
458 316
459static int __init 317static int __init
@@ -470,341 +328,52 @@ get_eeprom_cksum(int off, int len, int *buffer)
470 return -1; 328 return -1;
471} 329}
472 330
473#ifdef CONFIG_NET_POLL_CONTROLLER 331static void
474/* 332write_irq(struct net_device *dev, int chip_type, int irq)
475 * Polling receive - used by netconsole and other diagnostic tools
476 * to allow network i/o with interrupts disabled.
477 */
478static void net_poll_controller(struct net_device *dev)
479{
480 disable_irq(dev->irq);
481 net_interrupt(dev->irq, dev);
482 enable_irq(dev->irq);
483}
484#endif
485
486static const struct net_device_ops net_ops = {
487 .ndo_open = net_open,
488 .ndo_stop = net_close,
489 .ndo_tx_timeout = net_timeout,
490 .ndo_start_xmit = net_send_packet,
491 .ndo_get_stats = net_get_stats,
492 .ndo_set_rx_mode = set_multicast_list,
493 .ndo_set_mac_address = set_mac_address,
494#ifdef CONFIG_NET_POLL_CONTROLLER
495 .ndo_poll_controller = net_poll_controller,
496#endif
497 .ndo_change_mtu = eth_change_mtu,
498 .ndo_validate_addr = eth_validate_addr,
499};
500
501/* This is the real probe routine. Linux has a history of friendly device
502 probes on the ISA bus. A good device probes avoids doing writes, and
503 verifies that the correct device exists and functions.
504 Return 0 on success.
505 */
506
507static int __init
508cs89x0_probe1(struct net_device *dev, unsigned long ioaddr, int modular)
509{ 333{
510 struct net_local *lp = netdev_priv(dev);
511 static unsigned version_printed;
512 int i; 334 int i;
513 int tmp;
514 unsigned rev_type = 0;
515 int eeprom_buff[CHKSUM_LEN];
516 int retval;
517
518 /* Initialize the device structure. */
519 if (!modular) {
520 memset(lp, 0, sizeof(*lp));
521 spin_lock_init(&lp->lock);
522#ifndef MODULE
523#if ALLOW_DMA
524 if (g_cs89x0_dma) {
525 lp->use_dma = 1;
526 lp->dma = g_cs89x0_dma;
527 lp->dmasize = 16; /* Could make this an option... */
528 }
529#endif
530 lp->force = g_cs89x0_media__force;
531#endif
532
533 }
534
535 /* Grab the region so we can find another board if autoIRQ fails. */
536 /* WTF is going on here? */
537 if (!request_region(ioaddr & ~3, NETCARD_IO_EXTENT, DRV_NAME)) {
538 printk(KERN_ERR "%s: request_region(0x%lx, 0x%x) failed\n",
539 DRV_NAME, ioaddr, NETCARD_IO_EXTENT);
540 retval = -EBUSY;
541 goto out1;
542 }
543
544 /* if they give us an odd I/O address, then do ONE write to
545 the address port, to get it back to address zero, where we
546 expect to find the EISA signature word. An IO with a base of 0x3
547 will skip the test for the ADD_PORT. */
548 if (ioaddr & 1) {
549 if (net_debug > 1)
550 printk(KERN_INFO "%s: odd ioaddr 0x%lx\n", dev->name, ioaddr);
551 if ((ioaddr & 2) != 2)
552 if ((readword(ioaddr & ~3, ADD_PORT) & ADD_MASK) != ADD_SIG) {
553 printk(KERN_ERR "%s: bad signature 0x%x\n",
554 dev->name, readword(ioaddr & ~3, ADD_PORT));
555 retval = -ENODEV;
556 goto out2;
557 }
558 }
559
560 ioaddr &= ~3;
561 printk(KERN_DEBUG "PP_addr at %lx[%x]: 0x%x\n",
562 ioaddr, ADD_PORT, readword(ioaddr, ADD_PORT));
563 writeword(ioaddr, ADD_PORT, PP_ChipID);
564
565 tmp = readword(ioaddr, DATA_PORT);
566 if (tmp != CHIP_EISA_ID_SIG) {
567 printk(KERN_DEBUG "%s: incorrect signature at %lx[%x]: 0x%x!="
568 CHIP_EISA_ID_SIG_STR "\n",
569 dev->name, ioaddr, DATA_PORT, tmp);
570 retval = -ENODEV;
571 goto out2;
572 }
573
574 /* Fill in the 'dev' fields. */
575 dev->base_addr = ioaddr;
576
577 /* get the chip type */
578 rev_type = readreg(dev, PRODUCT_ID_ADD);
579 lp->chip_type = rev_type &~ REVISON_BITS;
580 lp->chip_revision = ((rev_type & REVISON_BITS) >> 8) + 'A';
581
582 /* Check the chip type and revision in order to set the correct send command
583 CS8920 revision C and CS8900 revision F can use the faster send. */
584 lp->send_cmd = TX_AFTER_381;
585 if (lp->chip_type == CS8900 && lp->chip_revision >= 'F')
586 lp->send_cmd = TX_NOW;
587 if (lp->chip_type != CS8900 && lp->chip_revision >= 'C')
588 lp->send_cmd = TX_NOW;
589
590 if (net_debug && version_printed++ == 0)
591 printk(version);
592
593 printk(KERN_INFO "%s: cs89%c0%s rev %c found at %#3lx ",
594 dev->name,
595 lp->chip_type==CS8900?'0':'2',
596 lp->chip_type==CS8920M?"M":"",
597 lp->chip_revision,
598 dev->base_addr);
599
600 reset_chip(dev);
601
602 /* Here we read the current configuration of the chip. If there
603 is no Extended EEPROM then the idea is to not disturb the chip
604 configuration, it should have been correctly setup by automatic
605 EEPROM read on reset. So, if the chip says it read the EEPROM
606 the driver will always do *something* instead of complain that
607 adapter_cnf is 0. */
608
609
610 if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) ==
611 (EEPROM_OK|EEPROM_PRESENT)) {
612 /* Load the MAC. */
613 for (i=0; i < ETH_ALEN/2; i++) {
614 unsigned int Addr;
615 Addr = readreg(dev, PP_IA+i*2);
616 dev->dev_addr[i*2] = Addr & 0xFF;
617 dev->dev_addr[i*2+1] = Addr >> 8;
618 }
619
620 /* Load the Adapter Configuration.
621 Note: Barring any more specific information from some
622 other source (ie EEPROM+Schematics), we would not know
623 how to operate a 10Base2 interface on the AUI port.
624 However, since we do read the status of HCB1 and use
625 settings that always result in calls to control_dc_dc(dev,0)
626 a BNC interface should work if the enable pin
627 (dc/dc converter) is on HCB1. It will be called AUI
628 however. */
629
630 lp->adapter_cnf = 0;
631 i = readreg(dev, PP_LineCTL);
632 /* Preserve the setting of the HCB1 pin. */
633 if ((i & (HCB1 | HCB1_ENBL)) == (HCB1 | HCB1_ENBL))
634 lp->adapter_cnf |= A_CNF_DC_DC_POLARITY;
635 /* Save the sqelch bit */
636 if ((i & LOW_RX_SQUELCH) == LOW_RX_SQUELCH)
637 lp->adapter_cnf |= A_CNF_EXTND_10B_2 | A_CNF_LOW_RX_SQUELCH;
638 /* Check if the card is in 10Base-t only mode */
639 if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == 0)
640 lp->adapter_cnf |= A_CNF_10B_T | A_CNF_MEDIA_10B_T;
641 /* Check if the card is in AUI only mode */
642 if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUI_ONLY)
643 lp->adapter_cnf |= A_CNF_AUI | A_CNF_MEDIA_AUI;
644 /* Check if the card is in Auto mode. */
645 if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUTO_AUI_10BASET)
646 lp->adapter_cnf |= A_CNF_AUI | A_CNF_10B_T |
647 A_CNF_MEDIA_AUI | A_CNF_MEDIA_10B_T | A_CNF_MEDIA_AUTO;
648
649 if (net_debug > 1)
650 printk(KERN_INFO "%s: PP_LineCTL=0x%x, adapter_cnf=0x%x\n",
651 dev->name, i, lp->adapter_cnf);
652
653 /* IRQ. Other chips already probe, see below. */
654 if (lp->chip_type == CS8900)
655 lp->isa_config = readreg(dev, PP_CS8900_ISAINT) & INT_NO_MASK;
656
657 printk( "[Cirrus EEPROM] ");
658 }
659
660 printk("\n");
661
662 /* First check to see if an EEPROM is attached. */
663
664 if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0)
665 printk(KERN_WARNING "cs89x0: No EEPROM, relying on command line....\n");
666 else if (get_eeprom_data(dev, START_EEPROM_DATA,CHKSUM_LEN,eeprom_buff) < 0) {
667 printk(KERN_WARNING "\ncs89x0: EEPROM read failed, relying on command line.\n");
668 } else if (get_eeprom_cksum(START_EEPROM_DATA,CHKSUM_LEN,eeprom_buff) < 0) {
669 /* Check if the chip was able to read its own configuration starting
670 at 0 in the EEPROM*/
671 if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) !=
672 (EEPROM_OK|EEPROM_PRESENT))
673 printk(KERN_WARNING "cs89x0: Extended EEPROM checksum bad and no Cirrus EEPROM, relying on command line\n");
674
675 } else {
676 /* This reads an extended EEPROM that is not documented
677 in the CS8900 datasheet. */
678
679 /* get transmission control word but keep the autonegotiation bits */
680 if (!lp->auto_neg_cnf) lp->auto_neg_cnf = eeprom_buff[AUTO_NEG_CNF_OFFSET/2];
681 /* Store adapter configuration */
682 if (!lp->adapter_cnf) lp->adapter_cnf = eeprom_buff[ADAPTER_CNF_OFFSET/2];
683 /* Store ISA configuration */
684 lp->isa_config = eeprom_buff[ISA_CNF_OFFSET/2];
685 dev->mem_start = eeprom_buff[PACKET_PAGE_OFFSET/2] << 8;
686
687 /* eeprom_buff has 32-bit ints, so we can't just memcpy it */
688 /* store the initial memory base address */
689 for (i = 0; i < ETH_ALEN/2; i++) {
690 dev->dev_addr[i*2] = eeprom_buff[i];
691 dev->dev_addr[i*2+1] = eeprom_buff[i] >> 8;
692 }
693 if (net_debug > 1)
694 printk(KERN_DEBUG "%s: new adapter_cnf: 0x%x\n",
695 dev->name, lp->adapter_cnf);
696 }
697
698 /* allow them to force multiple transceivers. If they force multiple, autosense */
699 {
700 int count = 0;
701 if (lp->force & FORCE_RJ45) {lp->adapter_cnf |= A_CNF_10B_T; count++; }
702 if (lp->force & FORCE_AUI) {lp->adapter_cnf |= A_CNF_AUI; count++; }
703 if (lp->force & FORCE_BNC) {lp->adapter_cnf |= A_CNF_10B_2; count++; }
704 if (count > 1) {lp->adapter_cnf |= A_CNF_MEDIA_AUTO; }
705 else if (lp->force & FORCE_RJ45){lp->adapter_cnf |= A_CNF_MEDIA_10B_T; }
706 else if (lp->force & FORCE_AUI) {lp->adapter_cnf |= A_CNF_MEDIA_AUI; }
707 else if (lp->force & FORCE_BNC) {lp->adapter_cnf |= A_CNF_MEDIA_10B_2; }
708 }
709
710 if (net_debug > 1)
711 printk(KERN_DEBUG "%s: after force 0x%x, adapter_cnf=0x%x\n",
712 dev->name, lp->force, lp->adapter_cnf);
713
714 /* FIXME: We don't let you set dc-dc polarity or low RX squelch from the command line: add it here */
715
716 /* FIXME: We don't let you set the IMM bit from the command line: add it to lp->auto_neg_cnf here */
717
718 /* FIXME: we don't set the Ethernet address on the command line. Use
719 ifconfig IFACE hw ether AABBCCDDEEFF */
720
721 printk(KERN_INFO "cs89x0 media %s%s%s",
722 (lp->adapter_cnf & A_CNF_10B_T)?"RJ-45,":"",
723 (lp->adapter_cnf & A_CNF_AUI)?"AUI,":"",
724 (lp->adapter_cnf & A_CNF_10B_2)?"BNC,":"");
725
726 lp->irq_map = 0xffff;
727 335
728 /* If this is a CS8900 then no pnp soft */ 336 if (chip_type == CS8900) {
729 if (lp->chip_type != CS8900 &&
730 /* Check if the ISA IRQ has been set */
731 (i = readreg(dev, PP_CS8920_ISAINT) & 0xff,
732 (i != 0 && i < CS8920_NO_INTS))) {
733 if (!dev->irq)
734 dev->irq = i;
735 } else {
736 i = lp->isa_config & INT_NO_MASK;
737#ifndef CONFIG_CS89x0_PLATFORM 337#ifndef CONFIG_CS89x0_PLATFORM
738 if (lp->chip_type == CS8900) { 338 /* Search the mapping table for the corresponding IRQ pin. */
739#ifdef CS89x0_NONISA_IRQ 339 for (i = 0; i != ARRAY_SIZE(cs8900_irq_map); i++)
740 i = cs8900_irq_map[0]; 340 if (cs8900_irq_map[i] == irq)
341 break;
342 /* Not found */
343 if (i == ARRAY_SIZE(cs8900_irq_map))
344 i = 3;
741#else 345#else
742 /* Translate the IRQ using the IRQ mapping table. */ 346 /* INTRQ0 pin is used for interrupt generation. */
743 if (i >= ARRAY_SIZE(cs8900_irq_map)) 347 i = 0;
744 printk("\ncs89x0: invalid ISA interrupt number %d\n", i);
745 else
746 i = cs8900_irq_map[i];
747
748 lp->irq_map = CS8900_IRQ_MAP; /* fixed IRQ map for CS8900 */
749 } else {
750 int irq_map_buff[IRQ_MAP_LEN/2];
751
752 if (get_eeprom_data(dev, IRQ_MAP_EEPROM_DATA,
753 IRQ_MAP_LEN/2,
754 irq_map_buff) >= 0) {
755 if ((irq_map_buff[0] & 0xff) == PNP_IRQ_FRMT)
756 lp->irq_map = (irq_map_buff[0]>>8) | (irq_map_buff[1] << 8);
757 }
758#endif
759 }
760#endif
761 if (!dev->irq)
762 dev->irq = i;
763 }
764
765 printk(" IRQ %d", dev->irq);
766
767#if ALLOW_DMA
768 if (lp->use_dma) {
769 get_dma_channel(dev);
770 printk(", DMA %d", dev->dma);
771 }
772 else
773#endif 348#endif
774 { 349 writereg(dev, PP_CS8900_ISAINT, i);
775 printk(", programmed I/O"); 350 } else {
351 writereg(dev, PP_CS8920_ISAINT, irq);
776 } 352 }
777
778 /* print the ethernet address. */
779 printk(", MAC %pM", dev->dev_addr);
780
781 dev->netdev_ops = &net_ops;
782 dev->watchdog_timeo = HZ;
783
784 printk("\n");
785 if (net_debug)
786 printk("cs89x0_probe1() successful\n");
787
788 retval = register_netdev(dev);
789 if (retval)
790 goto out3;
791 return 0;
792out3:
793 writeword(dev->base_addr, ADD_PORT, PP_ChipID);
794out2:
795 release_region(ioaddr & ~3, NETCARD_IO_EXTENT);
796out1:
797 return retval;
798} 353}
799 354
355static void
356count_rx_errors(int status, struct net_device *dev)
357{
358 dev->stats.rx_errors++;
359 if (status & RX_RUNT)
360 dev->stats.rx_length_errors++;
361 if (status & RX_EXTRA_DATA)
362 dev->stats.rx_length_errors++;
363 if ((status & RX_CRC_ERROR) && !(status & (RX_EXTRA_DATA | RX_RUNT)))
364 /* per str 172 */
365 dev->stats.rx_crc_errors++;
366 if (status & RX_DRIBBLE)
367 dev->stats.rx_frame_errors++;
368}
800 369
801/********************************* 370/*********************************
802 * This page contains DMA routines 371 * This page contains DMA routines
803**********************************/ 372 *********************************/
804 373
805#if ALLOW_DMA 374#if ALLOW_DMA
806 375
807#define dma_page_eq(ptr1, ptr2) ((long)(ptr1)>>17 == (long)(ptr2)>>17) 376#define dma_page_eq(ptr1, ptr2) ((long)(ptr1) >> 17 == (long)(ptr2) >> 17)
808 377
809static void 378static void
810get_dma_channel(struct net_device *dev) 379get_dma_channel(struct net_device *dev)
@@ -833,11 +402,10 @@ write_dma(struct net_device *dev, int chip_type, int dma)
833 struct net_local *lp = netdev_priv(dev); 402 struct net_local *lp = netdev_priv(dev);
834 if ((lp->isa_config & ANY_ISA_DMA) == 0) 403 if ((lp->isa_config & ANY_ISA_DMA) == 0)
835 return; 404 return;
836 if (chip_type == CS8900) { 405 if (chip_type == CS8900)
837 writereg(dev, PP_CS8900_ISADMA, dma-5); 406 writereg(dev, PP_CS8900_ISADMA, dma - 5);
838 } else { 407 else
839 writereg(dev, PP_CS8920_ISADMA, dma); 408 writereg(dev, PP_CS8920_ISADMA, dma);
840 }
841} 409}
842 410
843static void 411static void
@@ -847,18 +415,15 @@ set_dma_cfg(struct net_device *dev)
847 415
848 if (lp->use_dma) { 416 if (lp->use_dma) {
849 if ((lp->isa_config & ANY_ISA_DMA) == 0) { 417 if ((lp->isa_config & ANY_ISA_DMA) == 0) {
850 if (net_debug > 3) 418 cs89_dbg(3, err, "set_dma_cfg(): no DMA\n");
851 printk("set_dma_cfg(): no DMA\n");
852 return; 419 return;
853 } 420 }
854 if (lp->isa_config & ISA_RxDMA) { 421 if (lp->isa_config & ISA_RxDMA) {
855 lp->curr_rx_cfg |= RX_DMA_ONLY; 422 lp->curr_rx_cfg |= RX_DMA_ONLY;
856 if (net_debug > 3) 423 cs89_dbg(3, info, "set_dma_cfg(): RX_DMA_ONLY\n");
857 printk("set_dma_cfg(): RX_DMA_ONLY\n");
858 } else { 424 } else {
859 lp->curr_rx_cfg |= AUTO_RX_DMA; /* not that we support it... */ 425 lp->curr_rx_cfg |= AUTO_RX_DMA; /* not that we support it... */
860 if (net_debug > 3) 426 cs89_dbg(3, info, "set_dma_cfg(): AUTO_RX_DMA\n");
861 printk("set_dma_cfg(): AUTO_RX_DMA\n");
862 } 427 }
863 } 428 }
864} 429}
@@ -868,7 +433,7 @@ dma_bufcfg(struct net_device *dev)
868{ 433{
869 struct net_local *lp = netdev_priv(dev); 434 struct net_local *lp = netdev_priv(dev);
870 if (lp->use_dma) 435 if (lp->use_dma)
871 return (lp->isa_config & ANY_ISA_DMA)? RX_DMA_ENBL : 0; 436 return (lp->isa_config & ANY_ISA_DMA) ? RX_DMA_ENBL : 0;
872 else 437 else
873 return 0; 438 return 0;
874} 439}
@@ -898,13 +463,13 @@ dma_rx(struct net_device *dev)
898 int status, length; 463 int status, length;
899 unsigned char *bp = lp->rx_dma_ptr; 464 unsigned char *bp = lp->rx_dma_ptr;
900 465
901 status = bp[0] + (bp[1]<<8); 466 status = bp[0] + (bp[1] << 8);
902 length = bp[2] + (bp[3]<<8); 467 length = bp[2] + (bp[3] << 8);
903 bp += 4; 468 bp += 4;
904 if (net_debug > 5) { 469
905 printk( "%s: receiving DMA packet at %lx, status %x, length %x\n", 470 cs89_dbg(5, debug, "%s: receiving DMA packet at %lx, status %x, length %x\n",
906 dev->name, (unsigned long)bp, status, length); 471 dev->name, (unsigned long)bp, status, length);
907 } 472
908 if ((status & RX_OK) == 0) { 473 if ((status & RX_OK) == 0) {
909 count_rx_errors(status, dev); 474 count_rx_errors(status, dev);
910 goto skip_this_frame; 475 goto skip_this_frame;
@@ -913,14 +478,16 @@ dma_rx(struct net_device *dev)
913 /* Malloc up new buffer. */ 478 /* Malloc up new buffer. */
914 skb = netdev_alloc_skb(dev, length + 2); 479 skb = netdev_alloc_skb(dev, length + 2);
915 if (skb == NULL) { 480 if (skb == NULL) {
916 if (net_debug) /* I don't think we want to do this to a stressed system */ 481 /* I don't think we want to do this to a stressed system */
917 printk("%s: Memory squeeze, dropping packet.\n", dev->name); 482 cs89_dbg(0, err, "%s: Memory squeeze, dropping packet\n",
483 dev->name);
918 dev->stats.rx_dropped++; 484 dev->stats.rx_dropped++;
919 485
920 /* AKPM: advance bp to the next frame */ 486 /* AKPM: advance bp to the next frame */
921skip_this_frame: 487skip_this_frame:
922 bp += (length + 3) & ~3; 488 bp += (length + 3) & ~3;
923 if (bp >= lp->end_dma_buff) bp -= lp->dmasize*1024; 489 if (bp >= lp->end_dma_buff)
490 bp -= lp->dmasize * 1024;
924 lp->rx_dma_ptr = bp; 491 lp->rx_dma_ptr = bp;
925 return; 492 return;
926 } 493 }
@@ -928,63 +495,38 @@ skip_this_frame:
928 495
929 if (bp + length > lp->end_dma_buff) { 496 if (bp + length > lp->end_dma_buff) {
930 int semi_cnt = lp->end_dma_buff - bp; 497 int semi_cnt = lp->end_dma_buff - bp;
931 memcpy(skb_put(skb,semi_cnt), bp, semi_cnt); 498 memcpy(skb_put(skb, semi_cnt), bp, semi_cnt);
932 memcpy(skb_put(skb,length - semi_cnt), lp->dma_buff, 499 memcpy(skb_put(skb, length - semi_cnt), lp->dma_buff,
933 length - semi_cnt); 500 length - semi_cnt);
934 } else { 501 } else {
935 memcpy(skb_put(skb,length), bp, length); 502 memcpy(skb_put(skb, length), bp, length);
936 } 503 }
937 bp += (length + 3) & ~3; 504 bp += (length + 3) & ~3;
938 if (bp >= lp->end_dma_buff) bp -= lp->dmasize*1024; 505 if (bp >= lp->end_dma_buff)
506 bp -= lp->dmasize*1024;
939 lp->rx_dma_ptr = bp; 507 lp->rx_dma_ptr = bp;
940 508
941 if (net_debug > 3) { 509 cs89_dbg(3, info, "%s: received %d byte DMA packet of type %x\n",
942 printk( "%s: received %d byte DMA packet of type %x\n", 510 dev->name, length,
943 dev->name, length, 511 ((skb->data[ETH_ALEN + ETH_ALEN] << 8) |
944 (skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]); 512 skb->data[ETH_ALEN + ETH_ALEN + 1]));
945 } 513
946 skb->protocol=eth_type_trans(skb,dev); 514 skb->protocol = eth_type_trans(skb, dev);
947 netif_rx(skb); 515 netif_rx(skb);
948 dev->stats.rx_packets++; 516 dev->stats.rx_packets++;
949 dev->stats.rx_bytes += length; 517 dev->stats.rx_bytes += length;
950} 518}
951 519
952#endif /* ALLOW_DMA */ 520static void release_dma_buff(struct net_local *lp)
953
954static void __init reset_chip(struct net_device *dev)
955{ 521{
956#if !defined(CONFIG_MACH_MX31ADS) 522 if (lp->dma_buff) {
957#if !defined(CS89x0_NONISA_IRQ) 523 free_pages((unsigned long)(lp->dma_buff),
958 struct net_local *lp = netdev_priv(dev); 524 get_order(lp->dmasize * 1024));
959 int ioaddr = dev->base_addr; 525 lp->dma_buff = NULL;
960#endif /* CS89x0_NONISA_IRQ */
961 int reset_start_time;
962
963 writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
964
965 /* wait 30 ms */
966 msleep(30);
967
968#if !defined(CS89x0_NONISA_IRQ)
969 if (lp->chip_type != CS8900) {
970 /* Hardware problem requires PNP registers to be reconfigured after a reset */
971 writeword(ioaddr, ADD_PORT, PP_CS8920_ISAINT);
972 outb(dev->irq, ioaddr + DATA_PORT);
973 outb(0, ioaddr + DATA_PORT + 1);
974
975 writeword(ioaddr, ADD_PORT, PP_CS8920_ISAMemB);
976 outb((dev->mem_start >> 16) & 0xff, ioaddr + DATA_PORT);
977 outb((dev->mem_start >> 8) & 0xff, ioaddr + DATA_PORT + 1);
978 } 526 }
979#endif /* CS89x0_NONISA_IRQ */
980
981 /* Wait until the chip is reset */
982 reset_start_time = jiffies;
983 while( (readreg(dev, PP_SelfST) & INIT_DONE) == 0 && jiffies - reset_start_time < 2)
984 ;
985#endif /* !CONFIG_MACH_MX31ADS */
986} 527}
987 528
529#endif /* ALLOW_DMA */
988 530
989static void 531static void
990control_dc_dc(struct net_device *dev, int on_not_off) 532control_dc_dc(struct net_device *dev, int on_not_off)
@@ -993,8 +535,9 @@ control_dc_dc(struct net_device *dev, int on_not_off)
993 unsigned int selfcontrol; 535 unsigned int selfcontrol;
994 int timenow = jiffies; 536 int timenow = jiffies;
995 /* control the DC to DC convertor in the SelfControl register. 537 /* control the DC to DC convertor in the SelfControl register.
996 Note: This is hooked up to a general purpose pin, might not 538 * Note: This is hooked up to a general purpose pin, might not
997 always be a DC to DC convertor. */ 539 * always be a DC to DC convertor.
540 */
998 541
999 selfcontrol = HCB1_ENBL; /* Enable the HCB1 bit as an output */ 542 selfcontrol = HCB1_ENBL; /* Enable the HCB1 bit as an output */
1000 if (((lp->adapter_cnf & A_CNF_DC_DC_POLARITY) != 0) ^ on_not_off) 543 if (((lp->adapter_cnf & A_CNF_DC_DC_POLARITY) != 0) ^ on_not_off)
@@ -1008,6 +551,49 @@ control_dc_dc(struct net_device *dev, int on_not_off)
1008 ; 551 ;
1009} 552}
1010 553
554/* send a test packet - return true if carrier bits are ok */
555static int
556send_test_pkt(struct net_device *dev)
557{
558 struct net_local *lp = netdev_priv(dev);
559 char test_packet[] = {
560 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
561 0, 46, /* A 46 in network order */
562 0, 0, /* DSAP=0 & SSAP=0 fields */
563 0xf3, 0 /* Control (Test Req + P bit set) */
564 };
565 long timenow = jiffies;
566
567 writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_TX_ON);
568
569 memcpy(test_packet, dev->dev_addr, ETH_ALEN);
570 memcpy(test_packet + ETH_ALEN, dev->dev_addr, ETH_ALEN);
571
572 iowrite16(TX_AFTER_ALL, lp->virt_addr + TX_CMD_PORT);
573 iowrite16(ETH_ZLEN, lp->virt_addr + TX_LEN_PORT);
574
575 /* Test to see if the chip has allocated memory for the packet */
576 while (jiffies - timenow < 5)
577 if (readreg(dev, PP_BusST) & READY_FOR_TX_NOW)
578 break;
579 if (jiffies - timenow >= 5)
580 return 0; /* this shouldn't happen */
581
582 /* Write the contents of the packet */
583 writewords(lp, TX_FRAME_PORT, test_packet, (ETH_ZLEN + 1) >> 1);
584
585 cs89_dbg(1, debug, "Sending test packet ");
586 /* wait a couple of jiffies for packet to be received */
587 for (timenow = jiffies; jiffies - timenow < 3;)
588 ;
589 if ((readreg(dev, PP_TxEvent) & TX_SEND_OK_BITS) == TX_OK) {
590 cs89_dbg(1, cont, "succeeded\n");
591 return 1;
592 }
593 cs89_dbg(1, cont, "failed\n");
594 return 0;
595}
596
1011#define DETECTED_NONE 0 597#define DETECTED_NONE 0
1012#define DETECTED_RJ45H 1 598#define DETECTED_RJ45H 1
1013#define DETECTED_RJ45F 2 599#define DETECTED_RJ45F 2
@@ -1021,40 +607,46 @@ detect_tp(struct net_device *dev)
1021 int timenow = jiffies; 607 int timenow = jiffies;
1022 int fdx; 608 int fdx;
1023 609
1024 if (net_debug > 1) printk("%s: Attempting TP\n", dev->name); 610 cs89_dbg(1, debug, "%s: Attempting TP\n", dev->name);
1025 611
1026 /* If connected to another full duplex capable 10-Base-T card the link pulses 612 /* If connected to another full duplex capable 10-Base-T card
1027 seem to be lost when the auto detect bit in the LineCTL is set. 613 * the link pulses seem to be lost when the auto detect bit in
1028 To overcome this the auto detect bit will be cleared whilst testing the 614 * the LineCTL is set. To overcome this the auto detect bit will
1029 10-Base-T interface. This would not be necessary for the sparrow chip but 615 * be cleared whilst testing the 10-Base-T interface. This would
1030 is simpler to do it anyway. */ 616 * not be necessary for the sparrow chip but is simpler to do it
1031 writereg(dev, PP_LineCTL, lp->linectl &~ AUI_ONLY); 617 * anyway.
618 */
619 writereg(dev, PP_LineCTL, lp->linectl & ~AUI_ONLY);
1032 control_dc_dc(dev, 0); 620 control_dc_dc(dev, 0);
1033 621
1034 /* Delay for the hardware to work out if the TP cable is present - 150ms */ 622 /* Delay for the hardware to work out if the TP cable is present
1035 for (timenow = jiffies; jiffies - timenow < 15; ) 623 * - 150ms
1036 ; 624 */
625 for (timenow = jiffies; jiffies - timenow < 15;)
626 ;
1037 if ((readreg(dev, PP_LineST) & LINK_OK) == 0) 627 if ((readreg(dev, PP_LineST) & LINK_OK) == 0)
1038 return DETECTED_NONE; 628 return DETECTED_NONE;
1039 629
1040 if (lp->chip_type == CS8900) { 630 if (lp->chip_type == CS8900) {
1041 switch (lp->force & 0xf0) { 631 switch (lp->force & 0xf0) {
1042#if 0 632#if 0
1043 case FORCE_AUTO: 633 case FORCE_AUTO:
1044 printk("%s: cs8900 doesn't autonegotiate\n",dev->name); 634 pr_info("%s: cs8900 doesn't autonegotiate\n",
1045 return DETECTED_NONE; 635 dev->name);
636 return DETECTED_NONE;
1046#endif 637#endif
1047 /* CS8900 doesn't support AUTO, change to HALF*/ 638 /* CS8900 doesn't support AUTO, change to HALF*/
1048 case FORCE_AUTO: 639 case FORCE_AUTO:
1049 lp->force &= ~FORCE_AUTO; 640 lp->force &= ~FORCE_AUTO;
1050 lp->force |= FORCE_HALF; 641 lp->force |= FORCE_HALF;
1051 break; 642 break;
1052 case FORCE_HALF: 643 case FORCE_HALF:
1053 break; 644 break;
1054 case FORCE_FULL: 645 case FORCE_FULL:
1055 writereg(dev, PP_TestCTL, readreg(dev, PP_TestCTL) | FDX_8900); 646 writereg(dev, PP_TestCTL,
647 readreg(dev, PP_TestCTL) | FDX_8900);
1056 break; 648 break;
1057 } 649 }
1058 fdx = readreg(dev, PP_TestCTL) & FDX_8900; 650 fdx = readreg(dev, PP_TestCTL) & FDX_8900;
1059 } else { 651 } else {
1060 switch (lp->force & 0xf0) { 652 switch (lp->force & 0xf0) {
@@ -1067,15 +659,15 @@ detect_tp(struct net_device *dev)
1067 case FORCE_FULL: 659 case FORCE_FULL:
1068 lp->auto_neg_cnf = RE_NEG_NOW | ALLOW_FDX; 660 lp->auto_neg_cnf = RE_NEG_NOW | ALLOW_FDX;
1069 break; 661 break;
1070 } 662 }
1071 663
1072 writereg(dev, PP_AutoNegCTL, lp->auto_neg_cnf & AUTO_NEG_MASK); 664 writereg(dev, PP_AutoNegCTL, lp->auto_neg_cnf & AUTO_NEG_MASK);
1073 665
1074 if ((lp->auto_neg_cnf & AUTO_NEG_BITS) == AUTO_NEG_ENABLE) { 666 if ((lp->auto_neg_cnf & AUTO_NEG_BITS) == AUTO_NEG_ENABLE) {
1075 printk(KERN_INFO "%s: negotiating duplex...\n",dev->name); 667 pr_info("%s: negotiating duplex...\n", dev->name);
1076 while (readreg(dev, PP_AutoNegST) & AUTO_NEG_BUSY) { 668 while (readreg(dev, PP_AutoNegST) & AUTO_NEG_BUSY) {
1077 if (jiffies - timenow > 4000) { 669 if (jiffies - timenow > 4000) {
1078 printk(KERN_ERR "**** Full / half duplex auto-negotiation timed out ****\n"); 670 pr_err("**** Full / half duplex auto-negotiation timed out ****\n");
1079 break; 671 break;
1080 } 672 }
1081 } 673 }
@@ -1088,56 +680,31 @@ detect_tp(struct net_device *dev)
1088 return DETECTED_RJ45H; 680 return DETECTED_RJ45H;
1089} 681}
1090 682
1091/* send a test packet - return true if carrier bits are ok */
1092static int 683static int
1093send_test_pkt(struct net_device *dev) 684detect_bnc(struct net_device *dev)
1094{ 685{
1095 char test_packet[] = { 0,0,0,0,0,0, 0,0,0,0,0,0, 686 struct net_local *lp = netdev_priv(dev);
1096 0, 46, /* A 46 in network order */
1097 0, 0, /* DSAP=0 & SSAP=0 fields */
1098 0xf3, 0 /* Control (Test Req + P bit set) */ };
1099 long timenow = jiffies;
1100
1101 writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_TX_ON);
1102
1103 memcpy(test_packet, dev->dev_addr, ETH_ALEN);
1104 memcpy(test_packet+ETH_ALEN, dev->dev_addr, ETH_ALEN);
1105
1106 writeword(dev->base_addr, TX_CMD_PORT, TX_AFTER_ALL);
1107 writeword(dev->base_addr, TX_LEN_PORT, ETH_ZLEN);
1108 687
1109 /* Test to see if the chip has allocated memory for the packet */ 688 cs89_dbg(1, debug, "%s: Attempting BNC\n", dev->name);
1110 while (jiffies - timenow < 5) 689 control_dc_dc(dev, 1);
1111 if (readreg(dev, PP_BusST) & READY_FOR_TX_NOW)
1112 break;
1113 if (jiffies - timenow >= 5)
1114 return 0; /* this shouldn't happen */
1115 690
1116 /* Write the contents of the packet */ 691 writereg(dev, PP_LineCTL, (lp->linectl & ~AUTO_AUI_10BASET) | AUI_ONLY);
1117 writewords(dev->base_addr, TX_FRAME_PORT,test_packet,(ETH_ZLEN+1) >>1);
1118 692
1119 if (net_debug > 1) printk("Sending test packet "); 693 if (send_test_pkt(dev))
1120 /* wait a couple of jiffies for packet to be received */ 694 return DETECTED_BNC;
1121 for (timenow = jiffies; jiffies - timenow < 3; ) 695 else
1122 ; 696 return DETECTED_NONE;
1123 if ((readreg(dev, PP_TxEvent) & TX_SEND_OK_BITS) == TX_OK) {
1124 if (net_debug > 1) printk("succeeded\n");
1125 return 1;
1126 }
1127 if (net_debug > 1) printk("failed\n");
1128 return 0;
1129} 697}
1130 698
1131
1132static int 699static int
1133detect_aui(struct net_device *dev) 700detect_aui(struct net_device *dev)
1134{ 701{
1135 struct net_local *lp = netdev_priv(dev); 702 struct net_local *lp = netdev_priv(dev);
1136 703
1137 if (net_debug > 1) printk("%s: Attempting AUI\n", dev->name); 704 cs89_dbg(1, debug, "%s: Attempting AUI\n", dev->name);
1138 control_dc_dc(dev, 0); 705 control_dc_dc(dev, 0);
1139 706
1140 writereg(dev, PP_LineCTL, (lp->linectl &~ AUTO_AUI_10BASET) | AUI_ONLY); 707 writereg(dev, PP_LineCTL, (lp->linectl & ~AUTO_AUI_10BASET) | AUI_ONLY);
1141 708
1142 if (send_test_pkt(dev)) 709 if (send_test_pkt(dev))
1143 return DETECTED_AUI; 710 return DETECTED_AUI;
@@ -1145,45 +712,154 @@ detect_aui(struct net_device *dev)
1145 return DETECTED_NONE; 712 return DETECTED_NONE;
1146} 713}
1147 714
1148static int 715/* We have a good packet(s), get it/them out of the buffers. */
1149detect_bnc(struct net_device *dev) 716static void
717net_rx(struct net_device *dev)
1150{ 718{
1151 struct net_local *lp = netdev_priv(dev); 719 struct net_local *lp = netdev_priv(dev);
720 struct sk_buff *skb;
721 int status, length;
1152 722
1153 if (net_debug > 1) printk("%s: Attempting BNC\n", dev->name); 723 status = ioread16(lp->virt_addr + RX_FRAME_PORT);
1154 control_dc_dc(dev, 1); 724 length = ioread16(lp->virt_addr + RX_FRAME_PORT);
1155 725
1156 writereg(dev, PP_LineCTL, (lp->linectl &~ AUTO_AUI_10BASET) | AUI_ONLY); 726 if ((status & RX_OK) == 0) {
727 count_rx_errors(status, dev);
728 return;
729 }
1157 730
1158 if (send_test_pkt(dev)) 731 /* Malloc up new buffer. */
1159 return DETECTED_BNC; 732 skb = netdev_alloc_skb(dev, length + 2);
1160 else 733 if (skb == NULL) {
1161 return DETECTED_NONE; 734#if 0 /* Again, this seems a cruel thing to do */
735 pr_warn("%s: Memory squeeze, dropping packet\n", dev->name);
736#endif
737 dev->stats.rx_dropped++;
738 return;
739 }
740 skb_reserve(skb, 2); /* longword align L3 header */
741
742 readwords(lp, RX_FRAME_PORT, skb_put(skb, length), length >> 1);
743 if (length & 1)
744 skb->data[length-1] = ioread16(lp->virt_addr + RX_FRAME_PORT);
745
746 cs89_dbg(3, debug, "%s: received %d byte packet of type %x\n",
747 dev->name, length,
748 (skb->data[ETH_ALEN + ETH_ALEN] << 8) |
749 skb->data[ETH_ALEN + ETH_ALEN + 1]);
750
751 skb->protocol = eth_type_trans(skb, dev);
752 netif_rx(skb);
753 dev->stats.rx_packets++;
754 dev->stats.rx_bytes += length;
1162} 755}
1163 756
757/* The typical workload of the driver:
758 * Handle the network interface interrupts.
759 */
1164 760
1165static void 761static irqreturn_t net_interrupt(int irq, void *dev_id)
1166write_irq(struct net_device *dev, int chip_type, int irq)
1167{ 762{
1168 int i; 763 struct net_device *dev = dev_id;
764 struct net_local *lp;
765 int status;
766 int handled = 0;
1169 767
1170 if (chip_type == CS8900) { 768 lp = netdev_priv(dev);
1171#ifndef CONFIG_CS89x0_PLATFORM 769
1172 /* Search the mapping table for the corresponding IRQ pin. */ 770 /* we MUST read all the events out of the ISQ, otherwise we'll never
1173 for (i = 0; i != ARRAY_SIZE(cs8900_irq_map); i++) 771 * get interrupted again. As a consequence, we can't have any limit
1174 if (cs8900_irq_map[i] == irq) 772 * on the number of times we loop in the interrupt handler. The
1175 break; 773 * hardware guarantees that eventually we'll run out of events. Of
1176 /* Not found */ 774 * course, if you're on a slow machine, and packets are arriving
1177 if (i == ARRAY_SIZE(cs8900_irq_map)) 775 * faster than you can read them off, you're screwed. Hasta la
1178 i = 3; 776 * vista, baby!
1179#else 777 */
1180 /* INTRQ0 pin is used for interrupt generation. */ 778 while ((status = ioread16(lp->virt_addr + ISQ_PORT))) {
1181 i = 0; 779 cs89_dbg(4, debug, "%s: event=%04x\n", dev->name, status);
780 handled = 1;
781 switch (status & ISQ_EVENT_MASK) {
782 case ISQ_RECEIVER_EVENT:
783 /* Got a packet(s). */
784 net_rx(dev);
785 break;
786 case ISQ_TRANSMITTER_EVENT:
787 dev->stats.tx_packets++;
788 netif_wake_queue(dev); /* Inform upper layers. */
789 if ((status & (TX_OK |
790 TX_LOST_CRS |
791 TX_SQE_ERROR |
792 TX_LATE_COL |
793 TX_16_COL)) != TX_OK) {
794 if ((status & TX_OK) == 0)
795 dev->stats.tx_errors++;
796 if (status & TX_LOST_CRS)
797 dev->stats.tx_carrier_errors++;
798 if (status & TX_SQE_ERROR)
799 dev->stats.tx_heartbeat_errors++;
800 if (status & TX_LATE_COL)
801 dev->stats.tx_window_errors++;
802 if (status & TX_16_COL)
803 dev->stats.tx_aborted_errors++;
804 }
805 break;
806 case ISQ_BUFFER_EVENT:
807 if (status & READY_FOR_TX) {
808 /* we tried to transmit a packet earlier,
809 * but inexplicably ran out of buffers.
810 * That shouldn't happen since we only ever
811 * load one packet. Shrug. Do the right
812 * thing anyway.
813 */
814 netif_wake_queue(dev); /* Inform upper layers. */
815 }
816 if (status & TX_UNDERRUN) {
817 cs89_dbg(0, err, "%s: transmit underrun\n",
818 dev->name);
819 lp->send_underrun++;
820 if (lp->send_underrun == 3)
821 lp->send_cmd = TX_AFTER_381;
822 else if (lp->send_underrun == 6)
823 lp->send_cmd = TX_AFTER_ALL;
824 /* transmit cycle is done, although
825 * frame wasn't transmitted - this
826 * avoids having to wait for the upper
827 * layers to timeout on us, in the
828 * event of a tx underrun
829 */
830 netif_wake_queue(dev); /* Inform upper layers. */
831 }
832#if ALLOW_DMA
833 if (lp->use_dma && (status & RX_DMA)) {
834 int count = readreg(dev, PP_DmaFrameCnt);
835 while (count) {
836 cs89_dbg(5, debug,
837 "%s: receiving %d DMA frames\n",
838 dev->name, count);
839 if (count > 1)
840 cs89_dbg(2, debug,
841 "%s: receiving %d DMA frames\n",
842 dev->name, count);
843 dma_rx(dev);
844 if (--count == 0)
845 count = readreg(dev, PP_DmaFrameCnt);
846 if (count > 0)
847 cs89_dbg(2, debug,
848 "%s: continuing with %d DMA frames\n",
849 dev->name, count);
850 }
851 }
1182#endif 852#endif
1183 writereg(dev, PP_CS8900_ISAINT, i); 853 break;
1184 } else { 854 case ISQ_RX_MISS_EVENT:
1185 writereg(dev, PP_CS8920_ISAINT, irq); 855 dev->stats.rx_missed_errors += (status >> 6);
856 break;
857 case ISQ_TX_COL_EVENT:
858 dev->stats.collisions += (status >> 6);
859 break;
860 }
1186 } 861 }
862 return IRQ_RETVAL(handled);
1187} 863}
1188 864
1189/* Open/initialize the board. This is called (in the current kernel) 865/* Open/initialize the board. This is called (in the current kernel)
@@ -1192,7 +868,7 @@ write_irq(struct net_device *dev, int chip_type, int irq)
1192 This routine should set everything up anew at each open, even 868 This routine should set everything up anew at each open, even
1193 registers that "should" only need to be set once at boot, so that 869 registers that "should" only need to be set once at boot, so that
1194 there is non-reboot way to recover if something goes wrong. 870 there is non-reboot way to recover if something goes wrong.
1195 */ 871*/
1196 872
1197/* AKPM: do we need to do any locking here? */ 873/* AKPM: do we need to do any locking here? */
1198 874
@@ -1208,14 +884,15 @@ net_open(struct net_device *dev)
1208 /* Allow interrupts to be generated by the chip */ 884 /* Allow interrupts to be generated by the chip */
1209/* Cirrus' release had this: */ 885/* Cirrus' release had this: */
1210#if 0 886#if 0
1211 writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ ); 887 writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL) | ENABLE_IRQ);
1212#endif 888#endif
1213/* And 2.3.47 had this: */ 889/* And 2.3.47 had this: */
1214 writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON); 890 writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON);
1215 891
1216 for (i = 2; i < CS8920_NO_INTS; i++) { 892 for (i = 2; i < CS8920_NO_INTS; i++) {
1217 if ((1 << i) & lp->irq_map) { 893 if ((1 << i) & lp->irq_map) {
1218 if (request_irq(i, net_interrupt, 0, dev->name, dev) == 0) { 894 if (request_irq(i, net_interrupt, 0, dev->name,
895 dev) == 0) {
1219 dev->irq = i; 896 dev->irq = i;
1220 write_irq(dev, lp->chip_type, i); 897 write_irq(dev, lp->chip_type, i);
1221 /* writereg(dev, PP_BufCFG, GENERATE_SW_INTERRUPT); */ 898 /* writereg(dev, PP_BufCFG, GENERATE_SW_INTERRUPT); */
@@ -1226,23 +903,21 @@ net_open(struct net_device *dev)
1226 903
1227 if (i >= CS8920_NO_INTS) { 904 if (i >= CS8920_NO_INTS) {
1228 writereg(dev, PP_BusCTL, 0); /* disable interrupts. */ 905 writereg(dev, PP_BusCTL, 0); /* disable interrupts. */
1229 printk(KERN_ERR "cs89x0: can't get an interrupt\n"); 906 pr_err("can't get an interrupt\n");
1230 ret = -EAGAIN; 907 ret = -EAGAIN;
1231 goto bad_out; 908 goto bad_out;
1232 } 909 }
1233 } 910 } else {
1234 else
1235 {
1236#if !defined(CS89x0_NONISA_IRQ) && !defined(CONFIG_CS89x0_PLATFORM) 911#if !defined(CS89x0_NONISA_IRQ) && !defined(CONFIG_CS89x0_PLATFORM)
1237 if (((1 << dev->irq) & lp->irq_map) == 0) { 912 if (((1 << dev->irq) & lp->irq_map) == 0) {
1238 printk(KERN_ERR "%s: IRQ %d is not in our map of allowable IRQs, which is %x\n", 913 pr_err("%s: IRQ %d is not in our map of allowable IRQs, which is %x\n",
1239 dev->name, dev->irq, lp->irq_map); 914 dev->name, dev->irq, lp->irq_map);
1240 ret = -EAGAIN; 915 ret = -EAGAIN;
1241 goto bad_out; 916 goto bad_out;
1242 } 917 }
1243#endif 918#endif
1244/* FIXME: Cirrus' release had this: */ 919/* FIXME: Cirrus' release had this: */
1245 writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ ); 920 writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ);
1246/* And 2.3.47 had this: */ 921/* And 2.3.47 had this: */
1247#if 0 922#if 0
1248 writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON); 923 writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON);
@@ -1250,147 +925,168 @@ net_open(struct net_device *dev)
1250 write_irq(dev, lp->chip_type, dev->irq); 925 write_irq(dev, lp->chip_type, dev->irq);
1251 ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev); 926 ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev);
1252 if (ret) { 927 if (ret) {
1253 printk(KERN_ERR "cs89x0: request_irq(%d) failed\n", dev->irq); 928 pr_err("request_irq(%d) failed\n", dev->irq);
1254 goto bad_out; 929 goto bad_out;
1255 } 930 }
1256 } 931 }
1257 932
1258#if ALLOW_DMA 933#if ALLOW_DMA
1259 if (lp->use_dma) { 934 if (lp->use_dma && (lp->isa_config & ANY_ISA_DMA)) {
1260 if (lp->isa_config & ANY_ISA_DMA) { 935 unsigned long flags;
1261 unsigned long flags; 936 lp->dma_buff = (unsigned char *)__get_dma_pages(GFP_KERNEL,
1262 lp->dma_buff = (unsigned char *)__get_dma_pages(GFP_KERNEL, 937 get_order(lp->dmasize * 1024));
1263 get_order(lp->dmasize * 1024)); 938 if (!lp->dma_buff) {
1264 939 pr_err("%s: cannot get %dK memory for DMA\n",
1265 if (!lp->dma_buff) { 940 dev->name, lp->dmasize);
1266 printk(KERN_ERR "%s: cannot get %dK memory for DMA\n", dev->name, lp->dmasize); 941 goto release_irq;
1267 goto release_irq; 942 }
1268 } 943 cs89_dbg(1, debug, "%s: dma %lx %lx\n",
1269 if (net_debug > 1) { 944 dev->name,
1270 printk( "%s: dma %lx %lx\n", 945 (unsigned long)lp->dma_buff,
1271 dev->name, 946 (unsigned long)isa_virt_to_bus(lp->dma_buff));
1272 (unsigned long)lp->dma_buff, 947 if ((unsigned long)lp->dma_buff >= MAX_DMA_ADDRESS ||
1273 (unsigned long)isa_virt_to_bus(lp->dma_buff)); 948 !dma_page_eq(lp->dma_buff,
1274 } 949 lp->dma_buff + lp->dmasize * 1024 - 1)) {
1275 if ((unsigned long) lp->dma_buff >= MAX_DMA_ADDRESS || 950 pr_err("%s: not usable as DMA buffer\n", dev->name);
1276 !dma_page_eq(lp->dma_buff, lp->dma_buff+lp->dmasize*1024-1)) { 951 goto release_irq;
1277 printk(KERN_ERR "%s: not usable as DMA buffer\n", dev->name);
1278 goto release_irq;
1279 }
1280 memset(lp->dma_buff, 0, lp->dmasize * 1024); /* Why? */
1281 if (request_dma(dev->dma, dev->name)) {
1282 printk(KERN_ERR "%s: cannot get dma channel %d\n", dev->name, dev->dma);
1283 goto release_irq;
1284 }
1285 write_dma(dev, lp->chip_type, dev->dma);
1286 lp->rx_dma_ptr = lp->dma_buff;
1287 lp->end_dma_buff = lp->dma_buff + lp->dmasize*1024;
1288 spin_lock_irqsave(&lp->lock, flags);
1289 disable_dma(dev->dma);
1290 clear_dma_ff(dev->dma);
1291 set_dma_mode(dev->dma, DMA_RX_MODE); /* auto_init as well */
1292 set_dma_addr(dev->dma, isa_virt_to_bus(lp->dma_buff));
1293 set_dma_count(dev->dma, lp->dmasize*1024);
1294 enable_dma(dev->dma);
1295 spin_unlock_irqrestore(&lp->lock, flags);
1296 } 952 }
953 memset(lp->dma_buff, 0, lp->dmasize * 1024); /* Why? */
954 if (request_dma(dev->dma, dev->name)) {
955 pr_err("%s: cannot get dma channel %d\n",
956 dev->name, dev->dma);
957 goto release_irq;
958 }
959 write_dma(dev, lp->chip_type, dev->dma);
960 lp->rx_dma_ptr = lp->dma_buff;
961 lp->end_dma_buff = lp->dma_buff + lp->dmasize * 1024;
962 spin_lock_irqsave(&lp->lock, flags);
963 disable_dma(dev->dma);
964 clear_dma_ff(dev->dma);
965 set_dma_mode(dev->dma, DMA_RX_MODE); /* auto_init as well */
966 set_dma_addr(dev->dma, isa_virt_to_bus(lp->dma_buff));
967 set_dma_count(dev->dma, lp->dmasize * 1024);
968 enable_dma(dev->dma);
969 spin_unlock_irqrestore(&lp->lock, flags);
1297 } 970 }
1298#endif /* ALLOW_DMA */ 971#endif /* ALLOW_DMA */
1299 972
1300 /* set the Ethernet address */ 973 /* set the Ethernet address */
1301 for (i=0; i < ETH_ALEN/2; i++) 974 for (i = 0; i < ETH_ALEN / 2; i++)
1302 writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8)); 975 writereg(dev, PP_IA + i * 2,
976 (dev->dev_addr[i * 2] |
977 (dev->dev_addr[i * 2 + 1] << 8)));
1303 978
1304 /* while we're testing the interface, leave interrupts disabled */ 979 /* while we're testing the interface, leave interrupts disabled */
1305 writereg(dev, PP_BusCTL, MEMORY_ON); 980 writereg(dev, PP_BusCTL, MEMORY_ON);
1306 981
1307 /* Set the LineCTL quintuplet based on adapter configuration read from EEPROM */ 982 /* Set the LineCTL quintuplet based on adapter configuration read from EEPROM */
1308 if ((lp->adapter_cnf & A_CNF_EXTND_10B_2) && (lp->adapter_cnf & A_CNF_LOW_RX_SQUELCH)) 983 if ((lp->adapter_cnf & A_CNF_EXTND_10B_2) &&
1309 lp->linectl = LOW_RX_SQUELCH; 984 (lp->adapter_cnf & A_CNF_LOW_RX_SQUELCH))
985 lp->linectl = LOW_RX_SQUELCH;
1310 else 986 else
1311 lp->linectl = 0; 987 lp->linectl = 0;
1312 988
1313 /* check to make sure that they have the "right" hardware available */ 989 /* check to make sure that they have the "right" hardware available */
1314 switch(lp->adapter_cnf & A_CNF_MEDIA_TYPE) { 990 switch (lp->adapter_cnf & A_CNF_MEDIA_TYPE) {
1315 case A_CNF_MEDIA_10B_T: result = lp->adapter_cnf & A_CNF_10B_T; break; 991 case A_CNF_MEDIA_10B_T:
1316 case A_CNF_MEDIA_AUI: result = lp->adapter_cnf & A_CNF_AUI; break; 992 result = lp->adapter_cnf & A_CNF_10B_T;
1317 case A_CNF_MEDIA_10B_2: result = lp->adapter_cnf & A_CNF_10B_2; break; 993 break;
1318 default: result = lp->adapter_cnf & (A_CNF_10B_T | A_CNF_AUI | A_CNF_10B_2); 994 case A_CNF_MEDIA_AUI:
1319 } 995 result = lp->adapter_cnf & A_CNF_AUI;
1320 if (!result) { 996 break;
1321 printk(KERN_ERR "%s: EEPROM is configured for unavailable media\n", dev->name); 997 case A_CNF_MEDIA_10B_2:
998 result = lp->adapter_cnf & A_CNF_10B_2;
999 break;
1000 default:
1001 result = lp->adapter_cnf & (A_CNF_10B_T |
1002 A_CNF_AUI |
1003 A_CNF_10B_2);
1004 }
1005 if (!result) {
1006 pr_err("%s: EEPROM is configured for unavailable media\n",
1007 dev->name);
1322release_dma: 1008release_dma:
1323#if ALLOW_DMA 1009#if ALLOW_DMA
1324 free_dma(dev->dma); 1010 free_dma(dev->dma);
1325release_irq: 1011release_irq:
1326 release_dma_buff(lp); 1012 release_dma_buff(lp);
1327#endif 1013#endif
1328 writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON)); 1014 writereg(dev, PP_LineCTL,
1329 free_irq(dev->irq, dev); 1015 readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON));
1016 free_irq(dev->irq, dev);
1330 ret = -EAGAIN; 1017 ret = -EAGAIN;
1331 goto bad_out; 1018 goto bad_out;
1332 } 1019 }
1333 1020
1334 /* set the hardware to the configured choice */ 1021 /* set the hardware to the configured choice */
1335 switch(lp->adapter_cnf & A_CNF_MEDIA_TYPE) { 1022 switch (lp->adapter_cnf & A_CNF_MEDIA_TYPE) {
1336 case A_CNF_MEDIA_10B_T: 1023 case A_CNF_MEDIA_10B_T:
1337 result = detect_tp(dev); 1024 result = detect_tp(dev);
1338 if (result==DETECTED_NONE) { 1025 if (result == DETECTED_NONE) {
1339 printk(KERN_WARNING "%s: 10Base-T (RJ-45) has no cable\n", dev->name); 1026 pr_warn("%s: 10Base-T (RJ-45) has no cable\n",
1340 if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */ 1027 dev->name);
1341 result = DETECTED_RJ45H; /* Yes! I don't care if I see a link pulse */ 1028 if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
1342 } 1029 result = DETECTED_RJ45H; /* Yes! I don't care if I see a link pulse */
1030 }
1343 break; 1031 break;
1344 case A_CNF_MEDIA_AUI: 1032 case A_CNF_MEDIA_AUI:
1345 result = detect_aui(dev); 1033 result = detect_aui(dev);
1346 if (result==DETECTED_NONE) { 1034 if (result == DETECTED_NONE) {
1347 printk(KERN_WARNING "%s: 10Base-5 (AUI) has no cable\n", dev->name); 1035 pr_warn("%s: 10Base-5 (AUI) has no cable\n", dev->name);
1348 if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */ 1036 if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
1349 result = DETECTED_AUI; /* Yes! I don't care if I see a carrrier */ 1037 result = DETECTED_AUI; /* Yes! I don't care if I see a carrrier */
1350 } 1038 }
1351 break; 1039 break;
1352 case A_CNF_MEDIA_10B_2: 1040 case A_CNF_MEDIA_10B_2:
1353 result = detect_bnc(dev); 1041 result = detect_bnc(dev);
1354 if (result==DETECTED_NONE) { 1042 if (result == DETECTED_NONE) {
1355 printk(KERN_WARNING "%s: 10Base-2 (BNC) has no cable\n", dev->name); 1043 pr_warn("%s: 10Base-2 (BNC) has no cable\n", dev->name);
1356 if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */ 1044 if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
1357 result = DETECTED_BNC; /* Yes! I don't care if I can xmit a packet */ 1045 result = DETECTED_BNC; /* Yes! I don't care if I can xmit a packet */
1358 } 1046 }
1359 break; 1047 break;
1360 case A_CNF_MEDIA_AUTO: 1048 case A_CNF_MEDIA_AUTO:
1361 writereg(dev, PP_LineCTL, lp->linectl | AUTO_AUI_10BASET); 1049 writereg(dev, PP_LineCTL, lp->linectl | AUTO_AUI_10BASET);
1362 if (lp->adapter_cnf & A_CNF_10B_T) 1050 if (lp->adapter_cnf & A_CNF_10B_T) {
1363 if ((result = detect_tp(dev)) != DETECTED_NONE) 1051 result = detect_tp(dev);
1052 if (result != DETECTED_NONE)
1364 break; 1053 break;
1365 if (lp->adapter_cnf & A_CNF_AUI) 1054 }
1366 if ((result = detect_aui(dev)) != DETECTED_NONE) 1055 if (lp->adapter_cnf & A_CNF_AUI) {
1056 result = detect_aui(dev);
1057 if (result != DETECTED_NONE)
1367 break; 1058 break;
1368 if (lp->adapter_cnf & A_CNF_10B_2) 1059 }
1369 if ((result = detect_bnc(dev)) != DETECTED_NONE) 1060 if (lp->adapter_cnf & A_CNF_10B_2) {
1061 result = detect_bnc(dev);
1062 if (result != DETECTED_NONE)
1370 break; 1063 break;
1371 printk(KERN_ERR "%s: no media detected\n", dev->name); 1064 }
1065 pr_err("%s: no media detected\n", dev->name);
1372 goto release_dma; 1066 goto release_dma;
1373 } 1067 }
1374 switch(result) { 1068 switch (result) {
1375 case DETECTED_NONE: 1069 case DETECTED_NONE:
1376 printk(KERN_ERR "%s: no network cable attached to configured media\n", dev->name); 1070 pr_err("%s: no network cable attached to configured media\n",
1071 dev->name);
1377 goto release_dma; 1072 goto release_dma;
1378 case DETECTED_RJ45H: 1073 case DETECTED_RJ45H:
1379 printk(KERN_INFO "%s: using half-duplex 10Base-T (RJ-45)\n", dev->name); 1074 pr_info("%s: using half-duplex 10Base-T (RJ-45)\n", dev->name);
1380 break; 1075 break;
1381 case DETECTED_RJ45F: 1076 case DETECTED_RJ45F:
1382 printk(KERN_INFO "%s: using full-duplex 10Base-T (RJ-45)\n", dev->name); 1077 pr_info("%s: using full-duplex 10Base-T (RJ-45)\n", dev->name);
1383 break; 1078 break;
1384 case DETECTED_AUI: 1079 case DETECTED_AUI:
1385 printk(KERN_INFO "%s: using 10Base-5 (AUI)\n", dev->name); 1080 pr_info("%s: using 10Base-5 (AUI)\n", dev->name);
1386 break; 1081 break;
1387 case DETECTED_BNC: 1082 case DETECTED_BNC:
1388 printk(KERN_INFO "%s: using 10Base-2 (BNC)\n", dev->name); 1083 pr_info("%s: using 10Base-2 (BNC)\n", dev->name);
1389 break; 1084 break;
1390 } 1085 }
1391 1086
1392 /* Turn on both receive and transmit operations */ 1087 /* Turn on both receive and transmit operations */
1393 writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_RX_ON | SERIAL_TX_ON); 1088 writereg(dev, PP_LineCTL,
1089 readreg(dev, PP_LineCTL) | SERIAL_RX_ON | SERIAL_TX_ON);
1394 1090
1395 /* Receive only error free packets addressed to this card */ 1091 /* Receive only error free packets addressed to this card */
1396 lp->rx_mode = 0; 1092 lp->rx_mode = 0;
@@ -1405,358 +1101,653 @@ release_irq:
1405#endif 1101#endif
1406 writereg(dev, PP_RxCFG, lp->curr_rx_cfg); 1102 writereg(dev, PP_RxCFG, lp->curr_rx_cfg);
1407 1103
1408 writereg(dev, PP_TxCFG, TX_LOST_CRS_ENBL | TX_SQE_ERROR_ENBL | TX_OK_ENBL | 1104 writereg(dev, PP_TxCFG, (TX_LOST_CRS_ENBL |
1409 TX_LATE_COL_ENBL | TX_JBR_ENBL | TX_ANY_COL_ENBL | TX_16_COL_ENBL); 1105 TX_SQE_ERROR_ENBL |
1106 TX_OK_ENBL |
1107 TX_LATE_COL_ENBL |
1108 TX_JBR_ENBL |
1109 TX_ANY_COL_ENBL |
1110 TX_16_COL_ENBL));
1410 1111
1411 writereg(dev, PP_BufCFG, READY_FOR_TX_ENBL | RX_MISS_COUNT_OVRFLOW_ENBL | 1112 writereg(dev, PP_BufCFG, (READY_FOR_TX_ENBL |
1113 RX_MISS_COUNT_OVRFLOW_ENBL |
1412#if ALLOW_DMA 1114#if ALLOW_DMA
1413 dma_bufcfg(dev) | 1115 dma_bufcfg(dev) |
1414#endif 1116#endif
1415 TX_COL_COUNT_OVRFLOW_ENBL | TX_UNDERRUN_ENBL); 1117 TX_COL_COUNT_OVRFLOW_ENBL |
1118 TX_UNDERRUN_ENBL));
1416 1119
1417 /* now that we've got our act together, enable everything */ 1120 /* now that we've got our act together, enable everything */
1418 writereg(dev, PP_BusCTL, ENABLE_IRQ 1121 writereg(dev, PP_BusCTL, (ENABLE_IRQ
1419 | (dev->mem_start?MEMORY_ON : 0) /* turn memory on */ 1122 | (dev->mem_start ? MEMORY_ON : 0) /* turn memory on */
1420#if ALLOW_DMA 1123#if ALLOW_DMA
1421 | dma_busctl(dev) 1124 | dma_busctl(dev)
1422#endif 1125#endif
1423 ); 1126 ));
1424 netif_start_queue(dev); 1127 netif_start_queue(dev);
1425 if (net_debug > 1) 1128 cs89_dbg(1, debug, "net_open() succeeded\n");
1426 printk("cs89x0: net_open() succeeded\n");
1427 return 0; 1129 return 0;
1428bad_out: 1130bad_out:
1429 return ret; 1131 return ret;
1430} 1132}
1431 1133
1134/* The inverse routine to net_open(). */
1135static int
1136net_close(struct net_device *dev)
1137{
1138#if ALLOW_DMA
1139 struct net_local *lp = netdev_priv(dev);
1140#endif
1141
1142 netif_stop_queue(dev);
1143
1144 writereg(dev, PP_RxCFG, 0);
1145 writereg(dev, PP_TxCFG, 0);
1146 writereg(dev, PP_BufCFG, 0);
1147 writereg(dev, PP_BusCTL, 0);
1148
1149 free_irq(dev->irq, dev);
1150
1151#if ALLOW_DMA
1152 if (lp->use_dma && lp->dma) {
1153 free_dma(dev->dma);
1154 release_dma_buff(lp);
1155 }
1156#endif
1157
1158 /* Update the statistics here. */
1159 return 0;
1160}
1161
1162/* Get the current statistics.
1163 * This may be called with the card open or closed.
1164 */
1165static struct net_device_stats *
1166net_get_stats(struct net_device *dev)
1167{
1168 struct net_local *lp = netdev_priv(dev);
1169 unsigned long flags;
1170
1171 spin_lock_irqsave(&lp->lock, flags);
1172 /* Update the statistics from the device registers. */
1173 dev->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6);
1174 dev->stats.collisions += (readreg(dev, PP_TxCol) >> 6);
1175 spin_unlock_irqrestore(&lp->lock, flags);
1176
1177 return &dev->stats;
1178}
1179
1432static void net_timeout(struct net_device *dev) 1180static void net_timeout(struct net_device *dev)
1433{ 1181{
1434 /* If we get here, some higher level has decided we are broken. 1182 /* If we get here, some higher level has decided we are broken.
1435 There should really be a "kick me" function call instead. */ 1183 There should really be a "kick me" function call instead. */
1436 if (net_debug > 0) printk("%s: transmit timed out, %s?\n", dev->name, 1184 cs89_dbg(0, err, "%s: transmit timed out, %s?\n",
1437 tx_done(dev) ? "IRQ conflict ?" : "network cable problem"); 1185 dev->name,
1186 tx_done(dev) ? "IRQ conflict" : "network cable problem");
1438 /* Try to restart the adaptor. */ 1187 /* Try to restart the adaptor. */
1439 netif_wake_queue(dev); 1188 netif_wake_queue(dev);
1440} 1189}
1441 1190
1442static netdev_tx_t net_send_packet(struct sk_buff *skb,struct net_device *dev) 1191static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev)
1443{ 1192{
1444 struct net_local *lp = netdev_priv(dev); 1193 struct net_local *lp = netdev_priv(dev);
1445 unsigned long flags; 1194 unsigned long flags;
1446 1195
1447 if (net_debug > 3) { 1196 cs89_dbg(3, debug, "%s: sent %d byte packet of type %x\n",
1448 printk("%s: sent %d byte packet of type %x\n", 1197 dev->name, skb->len,
1449 dev->name, skb->len, 1198 ((skb->data[ETH_ALEN + ETH_ALEN] << 8) |
1450 (skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]); 1199 skb->data[ETH_ALEN + ETH_ALEN + 1]));
1451 }
1452 1200
1453 /* keep the upload from being interrupted, since we 1201 /* keep the upload from being interrupted, since we
1454 ask the chip to start transmitting before the 1202 * ask the chip to start transmitting before the
1455 whole packet has been completely uploaded. */ 1203 * whole packet has been completely uploaded.
1204 */
1456 1205
1457 spin_lock_irqsave(&lp->lock, flags); 1206 spin_lock_irqsave(&lp->lock, flags);
1458 netif_stop_queue(dev); 1207 netif_stop_queue(dev);
1459 1208
1460 /* initiate a transmit sequence */ 1209 /* initiate a transmit sequence */
1461 writeword(dev->base_addr, TX_CMD_PORT, lp->send_cmd); 1210 iowrite16(lp->send_cmd, lp->virt_addr + TX_CMD_PORT);
1462 writeword(dev->base_addr, TX_LEN_PORT, skb->len); 1211 iowrite16(skb->len, lp->virt_addr + TX_LEN_PORT);
1463 1212
1464 /* Test to see if the chip has allocated memory for the packet */ 1213 /* Test to see if the chip has allocated memory for the packet */
1465 if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) { 1214 if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) {
1466 /* 1215 /* Gasp! It hasn't. But that shouldn't happen since
1467 * Gasp! It hasn't. But that shouldn't happen since
1468 * we're waiting for TxOk, so return 1 and requeue this packet. 1216 * we're waiting for TxOk, so return 1 and requeue this packet.
1469 */ 1217 */
1470 1218
1471 spin_unlock_irqrestore(&lp->lock, flags); 1219 spin_unlock_irqrestore(&lp->lock, flags);
1472 if (net_debug) printk("cs89x0: Tx buffer not free!\n"); 1220 cs89_dbg(0, err, "Tx buffer not free!\n");
1473 return NETDEV_TX_BUSY; 1221 return NETDEV_TX_BUSY;
1474 } 1222 }
1475 /* Write the contents of the packet */ 1223 /* Write the contents of the packet */
1476 writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1); 1224 writewords(lp, TX_FRAME_PORT, skb->data, (skb->len + 1) >> 1);
1477 spin_unlock_irqrestore(&lp->lock, flags); 1225 spin_unlock_irqrestore(&lp->lock, flags);
1478 dev->stats.tx_bytes += skb->len; 1226 dev->stats.tx_bytes += skb->len;
1479 dev_kfree_skb (skb); 1227 dev_kfree_skb(skb);
1480 1228
1481 /* 1229 /* We DO NOT call netif_wake_queue() here.
1482 * We DO NOT call netif_wake_queue() here.
1483 * We also DO NOT call netif_start_queue(). 1230 * We also DO NOT call netif_start_queue().
1484 * 1231 *
1485 * Either of these would cause another bottom half run through 1232 * Either of these would cause another bottom half run through
1486 * net_send_packet() before this packet has fully gone out. That causes 1233 * net_send_packet() before this packet has fully gone out.
1487 * us to hit the "Gasp!" above and the send is rescheduled. it runs like 1234 * That causes us to hit the "Gasp!" above and the send is rescheduled.
1488 * a dog. We just return and wait for the Tx completion interrupt handler 1235 * it runs like a dog. We just return and wait for the Tx completion
1489 * to restart the netdevice layer 1236 * interrupt handler to restart the netdevice layer
1490 */ 1237 */
1491 1238
1492 return NETDEV_TX_OK; 1239 return NETDEV_TX_OK;
1493} 1240}
1494 1241
1495/* The typical workload of the driver: 1242static void set_multicast_list(struct net_device *dev)
1496 Handle the network interface interrupts. */ 1243{
1244 struct net_local *lp = netdev_priv(dev);
1245 unsigned long flags;
1497 1246
1498static irqreturn_t net_interrupt(int irq, void *dev_id) 1247 spin_lock_irqsave(&lp->lock, flags);
1248 if (dev->flags & IFF_PROMISC)
1249 lp->rx_mode = RX_ALL_ACCEPT;
1250 else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
1251 /* The multicast-accept list is initialized to accept-all,
1252 * and we rely on higher-level filtering for now.
1253 */
1254 lp->rx_mode = RX_MULTCAST_ACCEPT;
1255 else
1256 lp->rx_mode = 0;
1257
1258 writereg(dev, PP_RxCTL, DEF_RX_ACCEPT | lp->rx_mode);
1259
1260 /* in promiscuous mode, we accept errored packets,
1261 * so we have to enable interrupts on them also
1262 */
1263 writereg(dev, PP_RxCFG,
1264 (lp->curr_rx_cfg |
1265 (lp->rx_mode == RX_ALL_ACCEPT)
1266 ? (RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL)
1267 : 0));
1268 spin_unlock_irqrestore(&lp->lock, flags);
1269}
1270
1271static int set_mac_address(struct net_device *dev, void *p)
1499{ 1272{
1500 struct net_device *dev = dev_id; 1273 int i;
1501 struct net_local *lp; 1274 struct sockaddr *addr = p;
1502 int ioaddr, status;
1503 int handled = 0;
1504 1275
1505 ioaddr = dev->base_addr; 1276 if (netif_running(dev))
1506 lp = netdev_priv(dev); 1277 return -EBUSY;
1507 1278
1508 /* we MUST read all the events out of the ISQ, otherwise we'll never 1279 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1509 get interrupted again. As a consequence, we can't have any limit 1280
1510 on the number of times we loop in the interrupt handler. The 1281 cs89_dbg(0, debug, "%s: Setting MAC address to %pM\n",
1511 hardware guarantees that eventually we'll run out of events. Of 1282 dev->name, dev->dev_addr);
1512 course, if you're on a slow machine, and packets are arriving 1283
1513 faster than you can read them off, you're screwed. Hasta la 1284 /* set the Ethernet address */
1514 vista, baby! */ 1285 for (i = 0; i < ETH_ALEN / 2; i++)
1515 while ((status = readword(dev->base_addr, ISQ_PORT))) { 1286 writereg(dev, PP_IA + i * 2,
1516 if (net_debug > 4)printk("%s: event=%04x\n", dev->name, status); 1287 (dev->dev_addr[i * 2] |
1517 handled = 1; 1288 (dev->dev_addr[i * 2 + 1] << 8)));
1518 switch(status & ISQ_EVENT_MASK) { 1289
1519 case ISQ_RECEIVER_EVENT: 1290 return 0;
1520 /* Got a packet(s). */
1521 net_rx(dev);
1522 break;
1523 case ISQ_TRANSMITTER_EVENT:
1524 dev->stats.tx_packets++;
1525 netif_wake_queue(dev); /* Inform upper layers. */
1526 if ((status & ( TX_OK |
1527 TX_LOST_CRS |
1528 TX_SQE_ERROR |
1529 TX_LATE_COL |
1530 TX_16_COL)) != TX_OK) {
1531 if ((status & TX_OK) == 0)
1532 dev->stats.tx_errors++;
1533 if (status & TX_LOST_CRS)
1534 dev->stats.tx_carrier_errors++;
1535 if (status & TX_SQE_ERROR)
1536 dev->stats.tx_heartbeat_errors++;
1537 if (status & TX_LATE_COL)
1538 dev->stats.tx_window_errors++;
1539 if (status & TX_16_COL)
1540 dev->stats.tx_aborted_errors++;
1541 }
1542 break;
1543 case ISQ_BUFFER_EVENT:
1544 if (status & READY_FOR_TX) {
1545 /* we tried to transmit a packet earlier,
1546 but inexplicably ran out of buffers.
1547 That shouldn't happen since we only ever
1548 load one packet. Shrug. Do the right
1549 thing anyway. */
1550 netif_wake_queue(dev); /* Inform upper layers. */
1551 }
1552 if (status & TX_UNDERRUN) {
1553 if (net_debug > 0) printk("%s: transmit underrun\n", dev->name);
1554 lp->send_underrun++;
1555 if (lp->send_underrun == 3) lp->send_cmd = TX_AFTER_381;
1556 else if (lp->send_underrun == 6) lp->send_cmd = TX_AFTER_ALL;
1557 /* transmit cycle is done, although
1558 frame wasn't transmitted - this
1559 avoids having to wait for the upper
1560 layers to timeout on us, in the
1561 event of a tx underrun */
1562 netif_wake_queue(dev); /* Inform upper layers. */
1563 }
1564#if ALLOW_DMA
1565 if (lp->use_dma && (status & RX_DMA)) {
1566 int count = readreg(dev, PP_DmaFrameCnt);
1567 while(count) {
1568 if (net_debug > 5)
1569 printk("%s: receiving %d DMA frames\n", dev->name, count);
1570 if (net_debug > 2 && count >1)
1571 printk("%s: receiving %d DMA frames\n", dev->name, count);
1572 dma_rx(dev);
1573 if (--count == 0)
1574 count = readreg(dev, PP_DmaFrameCnt);
1575 if (net_debug > 2 && count > 0)
1576 printk("%s: continuing with %d DMA frames\n", dev->name, count);
1577 }
1578 }
1579#endif
1580 break;
1581 case ISQ_RX_MISS_EVENT:
1582 dev->stats.rx_missed_errors += (status >> 6);
1583 break;
1584 case ISQ_TX_COL_EVENT:
1585 dev->stats.collisions += (status >> 6);
1586 break;
1587 }
1588 }
1589 return IRQ_RETVAL(handled);
1590} 1291}
1591 1292
1592static void 1293#ifdef CONFIG_NET_POLL_CONTROLLER
1593count_rx_errors(int status, struct net_device *dev) 1294/*
1295 * Polling receive - used by netconsole and other diagnostic tools
1296 * to allow network i/o with interrupts disabled.
1297 */
1298static void net_poll_controller(struct net_device *dev)
1594{ 1299{
1595 dev->stats.rx_errors++; 1300 disable_irq(dev->irq);
1596 if (status & RX_RUNT) 1301 net_interrupt(dev->irq, dev);
1597 dev->stats.rx_length_errors++; 1302 enable_irq(dev->irq);
1598 if (status & RX_EXTRA_DATA)
1599 dev->stats.rx_length_errors++;
1600 if ((status & RX_CRC_ERROR) && !(status & (RX_EXTRA_DATA|RX_RUNT)))
1601 /* per str 172 */
1602 dev->stats.rx_crc_errors++;
1603 if (status & RX_DRIBBLE)
1604 dev->stats.rx_frame_errors++;
1605} 1303}
1304#endif
1606 1305
1607/* We have a good packet(s), get it/them out of the buffers. */ 1306static const struct net_device_ops net_ops = {
1608static void 1307 .ndo_open = net_open,
1609net_rx(struct net_device *dev) 1308 .ndo_stop = net_close,
1309 .ndo_tx_timeout = net_timeout,
1310 .ndo_start_xmit = net_send_packet,
1311 .ndo_get_stats = net_get_stats,
1312 .ndo_set_rx_mode = set_multicast_list,
1313 .ndo_set_mac_address = set_mac_address,
1314#ifdef CONFIG_NET_POLL_CONTROLLER
1315 .ndo_poll_controller = net_poll_controller,
1316#endif
1317 .ndo_change_mtu = eth_change_mtu,
1318 .ndo_validate_addr = eth_validate_addr,
1319};
1320
1321static void __init reset_chip(struct net_device *dev)
1610{ 1322{
1611 struct sk_buff *skb; 1323#if !defined(CONFIG_MACH_MX31ADS)
1612 int status, length; 1324#if !defined(CS89x0_NONISA_IRQ)
1325 struct net_local *lp = netdev_priv(dev);
1326#endif /* CS89x0_NONISA_IRQ */
1327 int reset_start_time;
1613 1328
1614 int ioaddr = dev->base_addr; 1329 writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
1615 status = readword(ioaddr, RX_FRAME_PORT);
1616 length = readword(ioaddr, RX_FRAME_PORT);
1617 1330
1618 if ((status & RX_OK) == 0) { 1331 /* wait 30 ms */
1619 count_rx_errors(status, dev); 1332 msleep(30);
1620 return; 1333
1334#if !defined(CS89x0_NONISA_IRQ)
1335 if (lp->chip_type != CS8900) {
1336 /* Hardware problem requires PNP registers to be reconfigured after a reset */
1337 iowrite16(PP_CS8920_ISAINT, lp->virt_addr + ADD_PORT);
1338 iowrite8(dev->irq, lp->virt_addr + DATA_PORT);
1339 iowrite8(0, lp->virt_addr + DATA_PORT + 1);
1340
1341 iowrite16(PP_CS8920_ISAMemB, lp->virt_addr + ADD_PORT);
1342 iowrite8((dev->mem_start >> 16) & 0xff,
1343 lp->virt_addr + DATA_PORT);
1344 iowrite8((dev->mem_start >> 8) & 0xff,
1345 lp->virt_addr + DATA_PORT + 1);
1621 } 1346 }
1347#endif /* CS89x0_NONISA_IRQ */
1622 1348
1623 /* Malloc up new buffer. */ 1349 /* Wait until the chip is reset */
1624 skb = netdev_alloc_skb(dev, length + 2); 1350 reset_start_time = jiffies;
1625 if (skb == NULL) { 1351 while ((readreg(dev, PP_SelfST) & INIT_DONE) == 0 &&
1626#if 0 /* Again, this seems a cruel thing to do */ 1352 jiffies - reset_start_time < 2)
1627 printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); 1353 ;
1354#endif /* !CONFIG_MACH_MX31ADS */
1355}
1356
1357/* This is the real probe routine.
1358 * Linux has a history of friendly device probes on the ISA bus.
1359 * A good device probes avoids doing writes, and
1360 * verifies that the correct device exists and functions.
1361 * Return 0 on success.
1362 */
1363static int __init
1364cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
1365{
1366 struct net_local *lp = netdev_priv(dev);
1367 int i;
1368 int tmp;
1369 unsigned rev_type = 0;
1370 int eeprom_buff[CHKSUM_LEN];
1371 int retval;
1372
1373 /* Initialize the device structure. */
1374 if (!modular) {
1375 memset(lp, 0, sizeof(*lp));
1376 spin_lock_init(&lp->lock);
1377#ifndef MODULE
1378#if ALLOW_DMA
1379 if (g_cs89x0_dma) {
1380 lp->use_dma = 1;
1381 lp->dma = g_cs89x0_dma;
1382 lp->dmasize = 16; /* Could make this an option... */
1383 }
1384#endif
1385 lp->force = g_cs89x0_media__force;
1628#endif 1386#endif
1629 dev->stats.rx_dropped++;
1630 return;
1631 } 1387 }
1632 skb_reserve(skb, 2); /* longword align L3 header */
1633 1388
1634 readwords(ioaddr, RX_FRAME_PORT, skb_put(skb, length), length >> 1); 1389 pr_debug("PP_addr at %p[%x]: 0x%x\n",
1635 if (length & 1) 1390 ioaddr, ADD_PORT, ioread16(ioaddr + ADD_PORT));
1636 skb->data[length-1] = readword(ioaddr, RX_FRAME_PORT); 1391 iowrite16(PP_ChipID, ioaddr + ADD_PORT);
1637 1392
1638 if (net_debug > 3) { 1393 tmp = ioread16(ioaddr + DATA_PORT);
1639 printk( "%s: received %d byte packet of type %x\n", 1394 if (tmp != CHIP_EISA_ID_SIG) {
1640 dev->name, length, 1395 pr_debug("%s: incorrect signature at %p[%x]: 0x%x!="
1641 (skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]); 1396 CHIP_EISA_ID_SIG_STR "\n",
1397 dev->name, ioaddr, DATA_PORT, tmp);
1398 retval = -ENODEV;
1399 goto out1;
1642 } 1400 }
1643 1401
1644 skb->protocol=eth_type_trans(skb,dev); 1402 lp->virt_addr = ioaddr;
1645 netif_rx(skb);
1646 dev->stats.rx_packets++;
1647 dev->stats.rx_bytes += length;
1648}
1649 1403
1650#if ALLOW_DMA 1404 /* get the chip type */
1651static void release_dma_buff(struct net_local *lp) 1405 rev_type = readreg(dev, PRODUCT_ID_ADD);
1652{ 1406 lp->chip_type = rev_type & ~REVISON_BITS;
1653 if (lp->dma_buff) { 1407 lp->chip_revision = ((rev_type & REVISON_BITS) >> 8) + 'A';
1654 free_pages((unsigned long)(lp->dma_buff), get_order(lp->dmasize * 1024)); 1408
1655 lp->dma_buff = NULL; 1409 /* Check the chip type and revision in order to set the correct
1410 * send command. CS8920 revision C and CS8900 revision F can use
1411 * the faster send.
1412 */
1413 lp->send_cmd = TX_AFTER_381;
1414 if (lp->chip_type == CS8900 && lp->chip_revision >= 'F')
1415 lp->send_cmd = TX_NOW;
1416 if (lp->chip_type != CS8900 && lp->chip_revision >= 'C')
1417 lp->send_cmd = TX_NOW;
1418
1419 pr_info_once("%s\n", version);
1420
1421 pr_info("%s: cs89%c0%s rev %c found at %p ",
1422 dev->name,
1423 lp->chip_type == CS8900 ? '0' : '2',
1424 lp->chip_type == CS8920M ? "M" : "",
1425 lp->chip_revision,
1426 lp->virt_addr);
1427
1428 reset_chip(dev);
1429
1430 /* Here we read the current configuration of the chip.
1431 * If there is no Extended EEPROM then the idea is to not disturb
1432 * the chip configuration, it should have been correctly setup by
1433 * automatic EEPROM read on reset. So, if the chip says it read
1434 * the EEPROM the driver will always do *something* instead of
1435 * complain that adapter_cnf is 0.
1436 */
1437
1438 if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) ==
1439 (EEPROM_OK | EEPROM_PRESENT)) {
1440 /* Load the MAC. */
1441 for (i = 0; i < ETH_ALEN / 2; i++) {
1442 unsigned int Addr;
1443 Addr = readreg(dev, PP_IA + i * 2);
1444 dev->dev_addr[i * 2] = Addr & 0xFF;
1445 dev->dev_addr[i * 2 + 1] = Addr >> 8;
1446 }
1447
1448 /* Load the Adapter Configuration.
1449 * Note: Barring any more specific information from some
1450 * other source (ie EEPROM+Schematics), we would not know
1451 * how to operate a 10Base2 interface on the AUI port.
1452 * However, since we do read the status of HCB1 and use
1453 * settings that always result in calls to control_dc_dc(dev,0)
1454 * a BNC interface should work if the enable pin
1455 * (dc/dc converter) is on HCB1.
1456 * It will be called AUI however.
1457 */
1458
1459 lp->adapter_cnf = 0;
1460 i = readreg(dev, PP_LineCTL);
1461 /* Preserve the setting of the HCB1 pin. */
1462 if ((i & (HCB1 | HCB1_ENBL)) == (HCB1 | HCB1_ENBL))
1463 lp->adapter_cnf |= A_CNF_DC_DC_POLARITY;
1464 /* Save the sqelch bit */
1465 if ((i & LOW_RX_SQUELCH) == LOW_RX_SQUELCH)
1466 lp->adapter_cnf |= A_CNF_EXTND_10B_2 | A_CNF_LOW_RX_SQUELCH;
1467 /* Check if the card is in 10Base-t only mode */
1468 if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == 0)
1469 lp->adapter_cnf |= A_CNF_10B_T | A_CNF_MEDIA_10B_T;
1470 /* Check if the card is in AUI only mode */
1471 if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUI_ONLY)
1472 lp->adapter_cnf |= A_CNF_AUI | A_CNF_MEDIA_AUI;
1473 /* Check if the card is in Auto mode. */
1474 if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUTO_AUI_10BASET)
1475 lp->adapter_cnf |= A_CNF_AUI | A_CNF_10B_T |
1476 A_CNF_MEDIA_AUI | A_CNF_MEDIA_10B_T | A_CNF_MEDIA_AUTO;
1477
1478 cs89_dbg(1, info, "%s: PP_LineCTL=0x%x, adapter_cnf=0x%x\n",
1479 dev->name, i, lp->adapter_cnf);
1480
1481 /* IRQ. Other chips already probe, see below. */
1482 if (lp->chip_type == CS8900)
1483 lp->isa_config = readreg(dev, PP_CS8900_ISAINT) & INT_NO_MASK;
1484
1485 pr_cont("[Cirrus EEPROM] ");
1656 } 1486 }
1657}
1658#endif
1659 1487
1660/* The inverse routine to net_open(). */ 1488 pr_cont("\n");
1661static int
1662net_close(struct net_device *dev)
1663{
1664#if ALLOW_DMA
1665 struct net_local *lp = netdev_priv(dev);
1666#endif
1667 1489
1668 netif_stop_queue(dev); 1490 /* First check to see if an EEPROM is attached. */
1669 1491
1670 writereg(dev, PP_RxCFG, 0); 1492 if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0)
1671 writereg(dev, PP_TxCFG, 0); 1493 pr_warn("No EEPROM, relying on command line....\n");
1672 writereg(dev, PP_BufCFG, 0); 1494 else if (get_eeprom_data(dev, START_EEPROM_DATA, CHKSUM_LEN, eeprom_buff) < 0) {
1673 writereg(dev, PP_BusCTL, 0); 1495 pr_warn("EEPROM read failed, relying on command line\n");
1496 } else if (get_eeprom_cksum(START_EEPROM_DATA, CHKSUM_LEN, eeprom_buff) < 0) {
1497 /* Check if the chip was able to read its own configuration starting
1498 at 0 in the EEPROM*/
1499 if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) !=
1500 (EEPROM_OK | EEPROM_PRESENT))
1501 pr_warn("Extended EEPROM checksum bad and no Cirrus EEPROM, relying on command line\n");
1674 1502
1675 free_irq(dev->irq, dev); 1503 } else {
1504 /* This reads an extended EEPROM that is not documented
1505 * in the CS8900 datasheet.
1506 */
1676 1507
1677#if ALLOW_DMA 1508 /* get transmission control word but keep the autonegotiation bits */
1678 if (lp->use_dma && lp->dma) { 1509 if (!lp->auto_neg_cnf)
1679 free_dma(dev->dma); 1510 lp->auto_neg_cnf = eeprom_buff[AUTO_NEG_CNF_OFFSET / 2];
1680 release_dma_buff(lp); 1511 /* Store adapter configuration */
1512 if (!lp->adapter_cnf)
1513 lp->adapter_cnf = eeprom_buff[ADAPTER_CNF_OFFSET / 2];
1514 /* Store ISA configuration */
1515 lp->isa_config = eeprom_buff[ISA_CNF_OFFSET / 2];
1516 dev->mem_start = eeprom_buff[PACKET_PAGE_OFFSET / 2] << 8;
1517
1518 /* eeprom_buff has 32-bit ints, so we can't just memcpy it */
1519 /* store the initial memory base address */
1520 for (i = 0; i < ETH_ALEN / 2; i++) {
1521 dev->dev_addr[i * 2] = eeprom_buff[i];
1522 dev->dev_addr[i * 2 + 1] = eeprom_buff[i] >> 8;
1523 }
1524 cs89_dbg(1, debug, "%s: new adapter_cnf: 0x%x\n",
1525 dev->name, lp->adapter_cnf);
1681 } 1526 }
1527
1528 /* allow them to force multiple transceivers. If they force multiple, autosense */
1529 {
1530 int count = 0;
1531 if (lp->force & FORCE_RJ45) {
1532 lp->adapter_cnf |= A_CNF_10B_T;
1533 count++;
1534 }
1535 if (lp->force & FORCE_AUI) {
1536 lp->adapter_cnf |= A_CNF_AUI;
1537 count++;
1538 }
1539 if (lp->force & FORCE_BNC) {
1540 lp->adapter_cnf |= A_CNF_10B_2;
1541 count++;
1542 }
1543 if (count > 1)
1544 lp->adapter_cnf |= A_CNF_MEDIA_AUTO;
1545 else if (lp->force & FORCE_RJ45)
1546 lp->adapter_cnf |= A_CNF_MEDIA_10B_T;
1547 else if (lp->force & FORCE_AUI)
1548 lp->adapter_cnf |= A_CNF_MEDIA_AUI;
1549 else if (lp->force & FORCE_BNC)
1550 lp->adapter_cnf |= A_CNF_MEDIA_10B_2;
1551 }
1552
1553 cs89_dbg(1, debug, "%s: after force 0x%x, adapter_cnf=0x%x\n",
1554 dev->name, lp->force, lp->adapter_cnf);
1555
1556 /* FIXME: We don't let you set dc-dc polarity or low RX squelch from the command line: add it here */
1557
1558 /* FIXME: We don't let you set the IMM bit from the command line: add it to lp->auto_neg_cnf here */
1559
1560 /* FIXME: we don't set the Ethernet address on the command line. Use
1561 * ifconfig IFACE hw ether AABBCCDDEEFF
1562 */
1563
1564 pr_info("media %s%s%s",
1565 (lp->adapter_cnf & A_CNF_10B_T) ? "RJ-45," : "",
1566 (lp->adapter_cnf & A_CNF_AUI) ? "AUI," : "",
1567 (lp->adapter_cnf & A_CNF_10B_2) ? "BNC," : "");
1568
1569 lp->irq_map = 0xffff;
1570
1571 /* If this is a CS8900 then no pnp soft */
1572 if (lp->chip_type != CS8900 &&
1573 /* Check if the ISA IRQ has been set */
1574 (i = readreg(dev, PP_CS8920_ISAINT) & 0xff,
1575 (i != 0 && i < CS8920_NO_INTS))) {
1576 if (!dev->irq)
1577 dev->irq = i;
1578 } else {
1579 i = lp->isa_config & INT_NO_MASK;
1580#ifndef CONFIG_CS89x0_PLATFORM
1581 if (lp->chip_type == CS8900) {
1582#ifdef CS89x0_NONISA_IRQ
1583 i = cs8900_irq_map[0];
1584#else
1585 /* Translate the IRQ using the IRQ mapping table. */
1586 if (i >= ARRAY_SIZE(cs8900_irq_map))
1587 pr_err("invalid ISA interrupt number %d\n", i);
1588 else
1589 i = cs8900_irq_map[i];
1590
1591 lp->irq_map = CS8900_IRQ_MAP; /* fixed IRQ map for CS8900 */
1592 } else {
1593 int irq_map_buff[IRQ_MAP_LEN/2];
1594
1595 if (get_eeprom_data(dev, IRQ_MAP_EEPROM_DATA,
1596 IRQ_MAP_LEN / 2,
1597 irq_map_buff) >= 0) {
1598 if ((irq_map_buff[0] & 0xff) == PNP_IRQ_FRMT)
1599 lp->irq_map = ((irq_map_buff[0] >> 8) |
1600 (irq_map_buff[1] << 8));
1601 }
1682#endif 1602#endif
1603 }
1604#endif
1605 if (!dev->irq)
1606 dev->irq = i;
1607 }
1683 1608
1684 /* Update the statistics here. */ 1609 pr_cont(" IRQ %d", dev->irq);
1685 return 0;
1686}
1687 1610
1688/* Get the current statistics. This may be called with the card open or 1611#if ALLOW_DMA
1689 closed. */ 1612 if (lp->use_dma) {
1690static struct net_device_stats * 1613 get_dma_channel(dev);
1691net_get_stats(struct net_device *dev) 1614 pr_cont(", DMA %d", dev->dma);
1692{ 1615 } else
1693 struct net_local *lp = netdev_priv(dev); 1616#endif
1694 unsigned long flags; 1617 pr_cont(", programmed I/O");
1695 1618
1696 spin_lock_irqsave(&lp->lock, flags); 1619 /* print the ethernet address. */
1697 /* Update the statistics from the device registers. */ 1620 pr_cont(", MAC %pM\n", dev->dev_addr);
1698 dev->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6);
1699 dev->stats.collisions += (readreg(dev, PP_TxCol) >> 6);
1700 spin_unlock_irqrestore(&lp->lock, flags);
1701 1621
1702 return &dev->stats; 1622 dev->netdev_ops = &net_ops;
1623 dev->watchdog_timeo = HZ;
1624
1625 cs89_dbg(0, info, "cs89x0_probe1() successful\n");
1626
1627 retval = register_netdev(dev);
1628 if (retval)
1629 goto out2;
1630 return 0;
1631out2:
1632 iowrite16(PP_ChipID, lp->virt_addr + ADD_PORT);
1633out1:
1634 return retval;
1703} 1635}
1704 1636
1705static void set_multicast_list(struct net_device *dev) 1637#ifndef CONFIG_CS89x0_PLATFORM
1638/*
1639 * This function converts the I/O port addres used by the cs89x0_probe() and
1640 * init_module() functions to the I/O memory address used by the
1641 * cs89x0_probe1() function.
1642 */
1643static int __init
1644cs89x0_ioport_probe(struct net_device *dev, unsigned long ioport, int modular)
1706{ 1645{
1707 struct net_local *lp = netdev_priv(dev); 1646 struct net_local *lp = netdev_priv(dev);
1708 unsigned long flags; 1647 int ret;
1648 void __iomem *io_mem;
1709 1649
1710 spin_lock_irqsave(&lp->lock, flags); 1650 if (!lp)
1711 if(dev->flags&IFF_PROMISC) 1651 return -ENOMEM;
1712 { 1652
1713 lp->rx_mode = RX_ALL_ACCEPT; 1653 dev->base_addr = ioport;
1654
1655 if (!request_region(ioport, NETCARD_IO_EXTENT, DRV_NAME)) {
1656 ret = -EBUSY;
1657 goto out;
1714 } 1658 }
1715 else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) 1659
1716 { 1660 io_mem = ioport_map(ioport & ~3, NETCARD_IO_EXTENT);
1717 /* The multicast-accept list is initialized to accept-all, and we 1661 if (!io_mem) {
1718 rely on higher-level filtering for now. */ 1662 ret = -ENOMEM;
1719 lp->rx_mode = RX_MULTCAST_ACCEPT; 1663 goto release;
1720 } 1664 }
1721 else
1722 lp->rx_mode = 0;
1723 1665
1724 writereg(dev, PP_RxCTL, DEF_RX_ACCEPT | lp->rx_mode); 1666 /* if they give us an odd I/O address, then do ONE write to
1667 * the address port, to get it back to address zero, where we
1668 * expect to find the EISA signature word. An IO with a base of 0x3
1669 * will skip the test for the ADD_PORT.
1670 */
1671 if (ioport & 1) {
1672 cs89_dbg(1, info, "%s: odd ioaddr 0x%lx\n", dev->name, ioport);
1673 if ((ioport & 2) != 2) {
1674 if ((ioread16(io_mem + ADD_PORT) & ADD_MASK) !=
1675 ADD_SIG) {
1676 pr_err("%s: bad signature 0x%x\n",
1677 dev->name, ioread16(io_mem + ADD_PORT));
1678 ret = -ENODEV;
1679 goto unmap;
1680 }
1681 }
1682 }
1725 1683
1726 /* in promiscuous mode, we accept errored packets, so we have to enable interrupts on them also */ 1684 ret = cs89x0_probe1(dev, io_mem, modular);
1727 writereg(dev, PP_RxCFG, lp->curr_rx_cfg | 1685 if (!ret)
1728 (lp->rx_mode == RX_ALL_ACCEPT? (RX_CRC_ERROR_ENBL|RX_RUNT_ENBL|RX_EXTRA_DATA_ENBL) : 0)); 1686 goto out;
1729 spin_unlock_irqrestore(&lp->lock, flags); 1687unmap:
1688 ioport_unmap(io_mem);
1689release:
1690 release_region(ioport, NETCARD_IO_EXTENT);
1691out:
1692 return ret;
1730} 1693}
1731 1694
1695#ifndef MODULE
1696/* Check for a network adaptor of this type, and return '0' iff one exists.
1697 * If dev->base_addr == 0, probe all likely locations.
1698 * If dev->base_addr == 1, always return failure.
1699 * If dev->base_addr == 2, allocate space for the device and return success
1700 * (detachable devices only).
1701 * Return 0 on success.
1702 */
1732 1703
1733static int set_mac_address(struct net_device *dev, void *p) 1704struct net_device * __init cs89x0_probe(int unit)
1734{ 1705{
1735 int i; 1706 struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
1736 struct sockaddr *addr = p; 1707 unsigned *port;
1737 1708 int err = 0;
1738 if (netif_running(dev)) 1709 int irq;
1739 return -EBUSY; 1710 int io;
1740 1711
1741 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1712 if (!dev)
1713 return ERR_PTR(-ENODEV);
1742 1714
1743 if (net_debug) 1715 sprintf(dev->name, "eth%d", unit);
1744 printk("%s: Setting MAC address to %pM.\n", 1716 netdev_boot_setup_check(dev);
1745 dev->name, dev->dev_addr); 1717 io = dev->base_addr;
1718 irq = dev->irq;
1746 1719
1747 /* set the Ethernet address */ 1720 cs89_dbg(0, info, "cs89x0_probe(0x%x)\n", io);
1748 for (i=0; i < ETH_ALEN/2; i++)
1749 writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8));
1750 1721
1751 return 0; 1722 if (io > 0x1ff) { /* Check a single specified location. */
1723 err = cs89x0_ioport_probe(dev, io, 0);
1724 } else if (io != 0) { /* Don't probe at all. */
1725 err = -ENXIO;
1726 } else {
1727 for (port = netcard_portlist; *port; port++) {
1728 if (cs89x0_ioport_probe(dev, *port, 0) == 0)
1729 break;
1730 dev->irq = irq;
1731 }
1732 if (!*port)
1733 err = -ENODEV;
1734 }
1735 if (err)
1736 goto out;
1737 return dev;
1738out:
1739 free_netdev(dev);
1740 pr_warn("no cs8900 or cs8920 detected. Be sure to disable PnP with SETUP\n");
1741 return ERR_PTR(err);
1752} 1742}
1743#endif
1744#endif
1753 1745
1754#if defined(MODULE) && !defined(CONFIG_CS89x0_PLATFORM) 1746#if defined(MODULE) && !defined(CONFIG_CS89x0_PLATFORM)
1755 1747
1756static struct net_device *dev_cs89x0; 1748static struct net_device *dev_cs89x0;
1757 1749
1758/* 1750/* Support the 'debug' module parm even if we're compiled for non-debug to
1759 * Support the 'debug' module parm even if we're compiled for non-debug to
1760 * avoid breaking someone's startup scripts 1751 * avoid breaking someone's startup scripts
1761 */ 1752 */
1762 1753
@@ -1764,11 +1755,11 @@ static int io;
1764static int irq; 1755static int irq;
1765static int debug; 1756static int debug;
1766static char media[8]; 1757static char media[8];
1767static int duplex=-1; 1758static int duplex = -1;
1768 1759
1769static int use_dma; /* These generate unused var warnings if ALLOW_DMA = 0 */ 1760static int use_dma; /* These generate unused var warnings if ALLOW_DMA = 0 */
1770static int dma; 1761static int dma;
1771static int dmasize=16; /* or 64 */ 1762static int dmasize = 16; /* or 64 */
1772 1763
1773module_param(io, int, 0); 1764module_param(io, int, 0);
1774module_param(irq, int, 0); 1765module_param(irq, int, 0);
@@ -1801,32 +1792,28 @@ MODULE_PARM_DESC(use_dma , "(ignored)");
1801MODULE_AUTHOR("Mike Cruse, Russwll Nelson <nelson@crynwr.com>, Andrew Morton"); 1792MODULE_AUTHOR("Mike Cruse, Russwll Nelson <nelson@crynwr.com>, Andrew Morton");
1802MODULE_LICENSE("GPL"); 1793MODULE_LICENSE("GPL");
1803 1794
1804
1805/* 1795/*
1806* media=t - specify media type 1796 * media=t - specify media type
1807 or media=2 1797 * or media=2
1808 or media=aui 1798 * or media=aui
1809 or medai=auto 1799 * or medai=auto
1810* duplex=0 - specify forced half/full/autonegotiate duplex 1800 * duplex=0 - specify forced half/full/autonegotiate duplex
1811* debug=# - debug level 1801 * debug=# - debug level
1812 1802 *
1813 1803 * Default Chip Configuration:
1814* Default Chip Configuration: 1804 * DMA Burst = enabled
1815 * DMA Burst = enabled 1805 * IOCHRDY Enabled = enabled
1816 * IOCHRDY Enabled = enabled 1806 * UseSA = enabled
1817 * UseSA = enabled 1807 * CS8900 defaults to half-duplex if not specified on command-line
1818 * CS8900 defaults to half-duplex if not specified on command-line 1808 * CS8920 defaults to autoneg if not specified on command-line
1819 * CS8920 defaults to autoneg if not specified on command-line 1809 * Use reset defaults for other config parameters
1820 * Use reset defaults for other config parameters 1810 *
1821 1811 * Assumptions:
1822* Assumptions: 1812 * media type specified is supported (circuitry is present)
1823 * media type specified is supported (circuitry is present) 1813 * if memory address is > 1MB, then required mem decode hw is present
1824 * if memory address is > 1MB, then required mem decode hw is present 1814 * if 10B-2, then agent other than driver will enable DC/DC converter
1825 * if 10B-2, then agent other than driver will enable DC/DC converter 1815 * (hw or software util)
1826 (hw or software util) 1816 */
1827
1828
1829*/
1830 1817
1831int __init init_module(void) 1818int __init init_module(void)
1832{ 1819{
@@ -1856,8 +1843,8 @@ int __init init_module(void)
1856 1843
1857 spin_lock_init(&lp->lock); 1844 spin_lock_init(&lp->lock);
1858 1845
1859 /* boy, they'd better get these right */ 1846 /* boy, they'd better get these right */
1860 if (!strcmp(media, "rj45")) 1847 if (!strcmp(media, "rj45"))
1861 lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T; 1848 lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T;
1862 else if (!strcmp(media, "aui")) 1849 else if (!strcmp(media, "aui"))
1863 lp->adapter_cnf = A_CNF_MEDIA_AUI | A_CNF_AUI; 1850 lp->adapter_cnf = A_CNF_MEDIA_AUI | A_CNF_AUI;
@@ -1866,27 +1853,28 @@ int __init init_module(void)
1866 else 1853 else
1867 lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T; 1854 lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T;
1868 1855
1869 if (duplex==-1) 1856 if (duplex == -1)
1870 lp->auto_neg_cnf = AUTO_NEG_ENABLE; 1857 lp->auto_neg_cnf = AUTO_NEG_ENABLE;
1871 1858
1872 if (io == 0) { 1859 if (io == 0) {
1873 printk(KERN_ERR "cs89x0.c: Module autoprobing not allowed.\n"); 1860 pr_err("Module autoprobing not allowed\n");
1874 printk(KERN_ERR "cs89x0.c: Append io=0xNNN\n"); 1861 pr_err("Append io=0xNNN\n");
1875 ret = -EPERM; 1862 ret = -EPERM;
1876 goto out; 1863 goto out;
1877 } else if (io <= 0x1ff) { 1864 } else if (io <= 0x1ff) {
1878 ret = -ENXIO; 1865 ret = -ENXIO;
1879 goto out; 1866 goto out;
1880 } 1867 }
1881 1868
1882#if ALLOW_DMA 1869#if ALLOW_DMA
1883 if (use_dma && dmasize != 16 && dmasize != 64) { 1870 if (use_dma && dmasize != 16 && dmasize != 64) {
1884 printk(KERN_ERR "cs89x0.c: dma size must be either 16K or 64K, not %dK\n", dmasize); 1871 pr_err("dma size must be either 16K or 64K, not %dK\n",
1872 dmasize);
1885 ret = -EPERM; 1873 ret = -EPERM;
1886 goto out; 1874 goto out;
1887 } 1875 }
1888#endif 1876#endif
1889 ret = cs89x0_probe1(dev, io, 1); 1877 ret = cs89x0_ioport_probe(dev, io, 1);
1890 if (ret) 1878 if (ret)
1891 goto out; 1879 goto out;
1892 1880
@@ -1900,8 +1888,11 @@ out:
1900void __exit 1888void __exit
1901cleanup_module(void) 1889cleanup_module(void)
1902{ 1890{
1891 struct net_local *lp = netdev_priv(dev_cs89x0);
1892
1903 unregister_netdev(dev_cs89x0); 1893 unregister_netdev(dev_cs89x0);
1904 writeword(dev_cs89x0->base_addr, ADD_PORT, PP_ChipID); 1894 iowrite16(PP_ChipID, lp->virt_addr + ADD_PORT);
1895 ioport_unmap(lp->virt_addr);
1905 release_region(dev_cs89x0->base_addr, NETCARD_IO_EXTENT); 1896 release_region(dev_cs89x0->base_addr, NETCARD_IO_EXTENT);
1906 free_netdev(dev_cs89x0); 1897 free_netdev(dev_cs89x0);
1907} 1898}
@@ -1913,6 +1904,7 @@ static int __init cs89x0_platform_probe(struct platform_device *pdev)
1913 struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); 1904 struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
1914 struct net_local *lp; 1905 struct net_local *lp;
1915 struct resource *mem_res; 1906 struct resource *mem_res;
1907 void __iomem *virt_addr;
1916 int err; 1908 int err;
1917 1909
1918 if (!dev) 1910 if (!dev)
@@ -1923,29 +1915,28 @@ static int __init cs89x0_platform_probe(struct platform_device *pdev)
1923 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1915 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1924 dev->irq = platform_get_irq(pdev, 0); 1916 dev->irq = platform_get_irq(pdev, 0);
1925 if (mem_res == NULL || dev->irq <= 0) { 1917 if (mem_res == NULL || dev->irq <= 0) {
1926 dev_warn(&dev->dev, "memory/interrupt resource missing.\n"); 1918 dev_warn(&dev->dev, "memory/interrupt resource missing\n");
1927 err = -ENXIO; 1919 err = -ENXIO;
1928 goto free; 1920 goto free;
1929 } 1921 }
1930 1922
1931 lp->phys_addr = mem_res->start;
1932 lp->size = resource_size(mem_res); 1923 lp->size = resource_size(mem_res);
1933 if (!request_mem_region(lp->phys_addr, lp->size, DRV_NAME)) { 1924 if (!request_mem_region(mem_res->start, lp->size, DRV_NAME)) {
1934 dev_warn(&dev->dev, "request_mem_region() failed.\n"); 1925 dev_warn(&dev->dev, "request_mem_region() failed\n");
1935 err = -EBUSY; 1926 err = -EBUSY;
1936 goto free; 1927 goto free;
1937 } 1928 }
1938 1929
1939 lp->virt_addr = ioremap(lp->phys_addr, lp->size); 1930 virt_addr = ioremap(mem_res->start, lp->size);
1940 if (!lp->virt_addr) { 1931 if (!virt_addr) {
1941 dev_warn(&dev->dev, "ioremap() failed.\n"); 1932 dev_warn(&dev->dev, "ioremap() failed\n");
1942 err = -ENOMEM; 1933 err = -ENOMEM;
1943 goto release; 1934 goto release;
1944 } 1935 }
1945 1936
1946 err = cs89x0_probe1(dev, (unsigned long)lp->virt_addr, 0); 1937 err = cs89x0_probe1(dev, virt_addr, 0);
1947 if (err) { 1938 if (err) {
1948 dev_warn(&dev->dev, "no cs8900 or cs8920 detected.\n"); 1939 dev_warn(&dev->dev, "no cs8900 or cs8920 detected\n");
1949 goto unmap; 1940 goto unmap;
1950 } 1941 }
1951 1942
@@ -1953,9 +1944,9 @@ static int __init cs89x0_platform_probe(struct platform_device *pdev)
1953 return 0; 1944 return 0;
1954 1945
1955unmap: 1946unmap:
1956 iounmap(lp->virt_addr); 1947 iounmap(virt_addr);
1957release: 1948release:
1958 release_mem_region(lp->phys_addr, lp->size); 1949 release_mem_region(mem_res->start, lp->size);
1959free: 1950free:
1960 free_netdev(dev); 1951 free_netdev(dev);
1961 return err; 1952 return err;
@@ -1965,10 +1956,16 @@ static int cs89x0_platform_remove(struct platform_device *pdev)
1965{ 1956{
1966 struct net_device *dev = platform_get_drvdata(pdev); 1957 struct net_device *dev = platform_get_drvdata(pdev);
1967 struct net_local *lp = netdev_priv(dev); 1958 struct net_local *lp = netdev_priv(dev);
1959 struct resource *mem_res;
1968 1960
1961 /* This platform_get_resource() call will not return NULL, because
1962 * the same call in cs89x0_platform_probe() has returned a non NULL
1963 * value.
1964 */
1965 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1969 unregister_netdev(dev); 1966 unregister_netdev(dev);
1970 iounmap(lp->virt_addr); 1967 iounmap(lp->virt_addr);
1971 release_mem_region(lp->phys_addr, lp->size); 1968 release_mem_region(mem_res->start, lp->size);
1972 free_netdev(dev); 1969 free_netdev(dev);
1973 return 0; 1970 return 0;
1974} 1971}
@@ -1996,13 +1993,3 @@ static void __exit cs89x0_cleanup(void)
1996module_exit(cs89x0_cleanup); 1993module_exit(cs89x0_cleanup);
1997 1994
1998#endif /* CONFIG_CS89x0_PLATFORM */ 1995#endif /* CONFIG_CS89x0_PLATFORM */
1999
2000/*
2001 * Local variables:
2002 * version-control: t
2003 * kept-new-versions: 5
2004 * c-indent-level: 8
2005 * tab-width: 8
2006 * End:
2007 *
2008 */
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 77b4e873f91c..8132c785cea8 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -944,8 +944,7 @@ static void enic_update_multicast_addr_list(struct enic *enic)
944 944
945 for (i = 0; i < enic->mc_count; i++) { 945 for (i = 0; i < enic->mc_count; i++) {
946 for (j = 0; j < mc_count; j++) 946 for (j = 0; j < mc_count; j++)
947 if (compare_ether_addr(enic->mc_addr[i], 947 if (ether_addr_equal(enic->mc_addr[i], mc_addr[j]))
948 mc_addr[j]) == 0)
949 break; 948 break;
950 if (j == mc_count) 949 if (j == mc_count)
951 enic_dev_del_addr(enic, enic->mc_addr[i]); 950 enic_dev_del_addr(enic, enic->mc_addr[i]);
@@ -953,8 +952,7 @@ static void enic_update_multicast_addr_list(struct enic *enic)
953 952
954 for (i = 0; i < mc_count; i++) { 953 for (i = 0; i < mc_count; i++) {
955 for (j = 0; j < enic->mc_count; j++) 954 for (j = 0; j < enic->mc_count; j++)
956 if (compare_ether_addr(mc_addr[i], 955 if (ether_addr_equal(mc_addr[i], enic->mc_addr[j]))
957 enic->mc_addr[j]) == 0)
958 break; 956 break;
959 if (j == enic->mc_count) 957 if (j == enic->mc_count)
960 enic_dev_add_addr(enic, mc_addr[i]); 958 enic_dev_add_addr(enic, mc_addr[i]);
@@ -999,8 +997,7 @@ static void enic_update_unicast_addr_list(struct enic *enic)
999 997
1000 for (i = 0; i < enic->uc_count; i++) { 998 for (i = 0; i < enic->uc_count; i++) {
1001 for (j = 0; j < uc_count; j++) 999 for (j = 0; j < uc_count; j++)
1002 if (compare_ether_addr(enic->uc_addr[i], 1000 if (ether_addr_equal(enic->uc_addr[i], uc_addr[j]))
1003 uc_addr[j]) == 0)
1004 break; 1001 break;
1005 if (j == uc_count) 1002 if (j == uc_count)
1006 enic_dev_del_addr(enic, enic->uc_addr[i]); 1003 enic_dev_del_addr(enic, enic->uc_addr[i]);
@@ -1008,8 +1005,7 @@ static void enic_update_unicast_addr_list(struct enic *enic)
1008 1005
1009 for (i = 0; i < uc_count; i++) { 1006 for (i = 0; i < uc_count; i++) {
1010 for (j = 0; j < enic->uc_count; j++) 1007 for (j = 0; j < enic->uc_count; j++)
1011 if (compare_ether_addr(uc_addr[i], 1008 if (ether_addr_equal(uc_addr[i], enic->uc_addr[j]))
1012 enic->uc_addr[j]) == 0)
1013 break; 1009 break;
1014 if (j == enic->uc_count) 1010 if (j == enic->uc_count)
1015 enic_dev_add_addr(enic, uc_addr[i]); 1011 enic_dev_add_addr(enic, uc_addr[i]);
@@ -1193,18 +1189,16 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
1193 if (err) 1189 if (err)
1194 return err; 1190 return err;
1195 1191
1196 NLA_PUT_U16(skb, IFLA_PORT_REQUEST, pp->request); 1192 if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
1197 NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response); 1193 nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
1198 if (pp->set & ENIC_SET_NAME) 1194 ((pp->set & ENIC_SET_NAME) &&
1199 NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, 1195 nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
1200 pp->name); 1196 ((pp->set & ENIC_SET_INSTANCE) &&
1201 if (pp->set & ENIC_SET_INSTANCE) 1197 nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
1202 NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX, 1198 pp->instance_uuid)) ||
1203 pp->instance_uuid); 1199 ((pp->set & ENIC_SET_HOST) &&
1204 if (pp->set & ENIC_SET_HOST) 1200 nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
1205 NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, 1201 goto nla_put_failure;
1206 pp->host_uuid);
1207
1208 return 0; 1202 return 0;
1209 1203
1210nla_put_failure: 1204nla_put_failure:
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
index dafea1ecb7b1..43464f0a4f99 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.c
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -184,7 +184,7 @@ static int (*enic_pp_handlers[])(struct enic *enic, int vf,
184}; 184};
185 185
186static const int enic_pp_handlers_count = 186static const int enic_pp_handlers_count =
187 sizeof(enic_pp_handlers)/sizeof(*enic_pp_handlers); 187 ARRAY_SIZE(enic_pp_handlers);
188 188
189static int enic_pp_preassociate(struct enic *enic, int vf, 189static int enic_pp_preassociate(struct enic *enic, int vf,
190 struct enic_port_profile *prev_pp, int *restore_pp) 190 struct enic_port_profile *prev_pp, int *restore_pp)
diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig
index 972b62b31837..9745fe5e8039 100644
--- a/drivers/net/ethernet/davicom/Kconfig
+++ b/drivers/net/ethernet/davicom/Kconfig
@@ -4,7 +4,7 @@
4 4
5config DM9000 5config DM9000
6 tristate "DM9000 support" 6 tristate "DM9000 support"
7 depends on ARM || BLACKFIN || MIPS 7 depends on ARM || BLACKFIN || MIPS || COLDFIRE
8 select CRC32 8 select CRC32
9 select NET_CORE 9 select NET_CORE
10 select MII 10 select MII
diff --git a/drivers/net/ethernet/dec/ewrk3.c b/drivers/net/ethernet/dec/ewrk3.c
index 1879f84a25a3..17ae8c619680 100644
--- a/drivers/net/ethernet/dec/ewrk3.c
+++ b/drivers/net/ethernet/dec/ewrk3.c
@@ -1016,7 +1016,8 @@ static int ewrk3_rx(struct net_device *dev)
1016 } else { 1016 } else {
1017 lp->pktStats.multicast++; 1017 lp->pktStats.multicast++;
1018 } 1018 }
1019 } else if (compare_ether_addr(p, dev->dev_addr) == 0) { 1019 } else if (ether_addr_equal(p,
1020 dev->dev_addr)) {
1020 lp->pktStats.unicast++; 1021 lp->pktStats.unicast++;
1021 } 1022 }
1022 lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */ 1023 lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 68f1c39184df..61cc09342865 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1380,6 +1380,7 @@ static void de_free_rings (struct de_private *de)
1380static int de_open (struct net_device *dev) 1380static int de_open (struct net_device *dev)
1381{ 1381{
1382 struct de_private *de = netdev_priv(dev); 1382 struct de_private *de = netdev_priv(dev);
1383 const int irq = de->pdev->irq;
1383 int rc; 1384 int rc;
1384 1385
1385 netif_dbg(de, ifup, dev, "enabling interface\n"); 1386 netif_dbg(de, ifup, dev, "enabling interface\n");
@@ -1394,10 +1395,9 @@ static int de_open (struct net_device *dev)
1394 1395
1395 dw32(IntrMask, 0); 1396 dw32(IntrMask, 0);
1396 1397
1397 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev); 1398 rc = request_irq(irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1398 if (rc) { 1399 if (rc) {
1399 netdev_err(dev, "IRQ %d request failure, err=%d\n", 1400 netdev_err(dev, "IRQ %d request failure, err=%d\n", irq, rc);
1400 dev->irq, rc);
1401 goto err_out_free; 1401 goto err_out_free;
1402 } 1402 }
1403 1403
@@ -1413,7 +1413,7 @@ static int de_open (struct net_device *dev)
1413 return 0; 1413 return 0;
1414 1414
1415err_out_free_irq: 1415err_out_free_irq:
1416 free_irq(dev->irq, dev); 1416 free_irq(irq, dev);
1417err_out_free: 1417err_out_free:
1418 de_free_rings(de); 1418 de_free_rings(de);
1419 return rc; 1419 return rc;
@@ -1434,7 +1434,7 @@ static int de_close (struct net_device *dev)
1434 netif_carrier_off(dev); 1434 netif_carrier_off(dev);
1435 spin_unlock_irqrestore(&de->lock, flags); 1435 spin_unlock_irqrestore(&de->lock, flags);
1436 1436
1437 free_irq(dev->irq, dev); 1437 free_irq(de->pdev->irq, dev);
1438 1438
1439 de_free_rings(de); 1439 de_free_rings(de);
1440 de_adapter_sleep(de); 1440 de_adapter_sleep(de);
@@ -1444,6 +1444,7 @@ static int de_close (struct net_device *dev)
1444static void de_tx_timeout (struct net_device *dev) 1444static void de_tx_timeout (struct net_device *dev)
1445{ 1445{
1446 struct de_private *de = netdev_priv(dev); 1446 struct de_private *de = netdev_priv(dev);
1447 const int irq = de->pdev->irq;
1447 1448
1448 netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n", 1449 netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1449 dr32(MacStatus), dr32(MacMode), dr32(SIAStatus), 1450 dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
@@ -1451,7 +1452,7 @@ static void de_tx_timeout (struct net_device *dev)
1451 1452
1452 del_timer_sync(&de->media_timer); 1453 del_timer_sync(&de->media_timer);
1453 1454
1454 disable_irq(dev->irq); 1455 disable_irq(irq);
1455 spin_lock_irq(&de->lock); 1456 spin_lock_irq(&de->lock);
1456 1457
1457 de_stop_hw(de); 1458 de_stop_hw(de);
@@ -1459,12 +1460,12 @@ static void de_tx_timeout (struct net_device *dev)
1459 netif_carrier_off(dev); 1460 netif_carrier_off(dev);
1460 1461
1461 spin_unlock_irq(&de->lock); 1462 spin_unlock_irq(&de->lock);
1462 enable_irq(dev->irq); 1463 enable_irq(irq);
1463 1464
1464 /* Update the error counts. */ 1465 /* Update the error counts. */
1465 __de_get_stats(de); 1466 __de_get_stats(de);
1466 1467
1467 synchronize_irq(dev->irq); 1468 synchronize_irq(irq);
1468 de_clean_rings(de); 1469 de_clean_rings(de);
1469 1470
1470 de_init_rings(de); 1471 de_init_rings(de);
@@ -2024,8 +2025,6 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2024 goto err_out_res; 2025 goto err_out_res;
2025 } 2026 }
2026 2027
2027 dev->irq = pdev->irq;
2028
2029 /* obtain and check validity of PCI I/O address */ 2028 /* obtain and check validity of PCI I/O address */
2030 pciaddr = pci_resource_start(pdev, 1); 2029 pciaddr = pci_resource_start(pdev, 1);
2031 if (!pciaddr) { 2030 if (!pciaddr) {
@@ -2050,7 +2049,6 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2050 pciaddr, pci_name(pdev)); 2049 pciaddr, pci_name(pdev));
2051 goto err_out_res; 2050 goto err_out_res;
2052 } 2051 }
2053 dev->base_addr = (unsigned long) regs;
2054 de->regs = regs; 2052 de->regs = regs;
2055 2053
2056 de_adapter_wake(de); 2054 de_adapter_wake(de);
@@ -2078,11 +2076,9 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2078 goto err_out_iomap; 2076 goto err_out_iomap;
2079 2077
2080 /* print info about board and interface just registered */ 2078 /* print info about board and interface just registered */
2081 netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n", 2079 netdev_info(dev, "%s at %p, %pM, IRQ %d\n",
2082 de->de21040 ? "21040" : "21041", 2080 de->de21040 ? "21040" : "21041",
2083 dev->base_addr, 2081 regs, dev->dev_addr, pdev->irq);
2084 dev->dev_addr,
2085 dev->irq);
2086 2082
2087 pci_set_drvdata(pdev, dev); 2083 pci_set_drvdata(pdev, dev);
2088 2084
@@ -2130,9 +2126,11 @@ static int de_suspend (struct pci_dev *pdev, pm_message_t state)
2130 2126
2131 rtnl_lock(); 2127 rtnl_lock();
2132 if (netif_running (dev)) { 2128 if (netif_running (dev)) {
2129 const int irq = pdev->irq;
2130
2133 del_timer_sync(&de->media_timer); 2131 del_timer_sync(&de->media_timer);
2134 2132
2135 disable_irq(dev->irq); 2133 disable_irq(irq);
2136 spin_lock_irq(&de->lock); 2134 spin_lock_irq(&de->lock);
2137 2135
2138 de_stop_hw(de); 2136 de_stop_hw(de);
@@ -2141,12 +2139,12 @@ static int de_suspend (struct pci_dev *pdev, pm_message_t state)
2141 netif_carrier_off(dev); 2139 netif_carrier_off(dev);
2142 2140
2143 spin_unlock_irq(&de->lock); 2141 spin_unlock_irq(&de->lock);
2144 enable_irq(dev->irq); 2142 enable_irq(irq);
2145 2143
2146 /* Update the error counts. */ 2144 /* Update the error counts. */
2147 __de_get_stats(de); 2145 __de_get_stats(de);
2148 2146
2149 synchronize_irq(dev->irq); 2147 synchronize_irq(irq);
2150 de_clean_rings(de); 2148 de_clean_rings(de);
2151 2149
2152 de_adapter_sleep(de); 2150 de_adapter_sleep(de);
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 18b106cc6d2b..d3cd489d11a2 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -1874,7 +1874,7 @@ de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
1874 } else { 1874 } else {
1875 lp->pktStats.multicast++; 1875 lp->pktStats.multicast++;
1876 } 1876 }
1877 } else if (compare_ether_addr(buf, dev->dev_addr) == 0) { 1877 } else if (ether_addr_equal(buf, dev->dev_addr)) {
1878 lp->pktStats.unicast++; 1878 lp->pktStats.unicast++;
1879 } 1879 }
1880 1880
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 1eccf4945485..4d6fe604fa64 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -150,6 +150,12 @@
150#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */ 150#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
151#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */ 151#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
152 152
153#define dw32(reg, val) iowrite32(val, ioaddr + (reg))
154#define dw16(reg, val) iowrite16(val, ioaddr + (reg))
155#define dr32(reg) ioread32(ioaddr + (reg))
156#define dr16(reg) ioread16(ioaddr + (reg))
157#define dr8(reg) ioread8(ioaddr + (reg))
158
153#define DMFE_DBUG(dbug_now, msg, value) \ 159#define DMFE_DBUG(dbug_now, msg, value) \
154 do { \ 160 do { \
155 if (dmfe_debug || (dbug_now)) \ 161 if (dmfe_debug || (dbug_now)) \
@@ -178,14 +184,6 @@
178 184
179#define SROM_V41_CODE 0x14 185#define SROM_V41_CODE 0x14
180 186
181#define SROM_CLK_WRITE(data, ioaddr) \
182 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
183 udelay(5); \
184 outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
185 udelay(5); \
186 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
187 udelay(5);
188
189#define __CHK_IO_SIZE(pci_id, dev_rev) \ 187#define __CHK_IO_SIZE(pci_id, dev_rev) \
190 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \ 188 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
191 DM9102A_IO_SIZE: DM9102_IO_SIZE) 189 DM9102A_IO_SIZE: DM9102_IO_SIZE)
@@ -213,11 +211,11 @@ struct rx_desc {
213struct dmfe_board_info { 211struct dmfe_board_info {
214 u32 chip_id; /* Chip vendor/Device ID */ 212 u32 chip_id; /* Chip vendor/Device ID */
215 u8 chip_revision; /* Chip revision */ 213 u8 chip_revision; /* Chip revision */
216 struct DEVICE *next_dev; /* next device */ 214 struct net_device *next_dev; /* next device */
217 struct pci_dev *pdev; /* PCI device */ 215 struct pci_dev *pdev; /* PCI device */
218 spinlock_t lock; 216 spinlock_t lock;
219 217
220 long ioaddr; /* I/O base address */ 218 void __iomem *ioaddr; /* I/O base address */
221 u32 cr0_data; 219 u32 cr0_data;
222 u32 cr5_data; 220 u32 cr5_data;
223 u32 cr6_data; 221 u32 cr6_data;
@@ -320,20 +318,20 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
320static int dmfe_stop(struct DEVICE *); 318static int dmfe_stop(struct DEVICE *);
321static void dmfe_set_filter_mode(struct DEVICE *); 319static void dmfe_set_filter_mode(struct DEVICE *);
322static const struct ethtool_ops netdev_ethtool_ops; 320static const struct ethtool_ops netdev_ethtool_ops;
323static u16 read_srom_word(long ,int); 321static u16 read_srom_word(void __iomem *, int);
324static irqreturn_t dmfe_interrupt(int , void *); 322static irqreturn_t dmfe_interrupt(int , void *);
325#ifdef CONFIG_NET_POLL_CONTROLLER 323#ifdef CONFIG_NET_POLL_CONTROLLER
326static void poll_dmfe (struct net_device *dev); 324static void poll_dmfe (struct net_device *dev);
327#endif 325#endif
328static void dmfe_descriptor_init(struct net_device *, unsigned long); 326static void dmfe_descriptor_init(struct net_device *);
329static void allocate_rx_buffer(struct net_device *); 327static void allocate_rx_buffer(struct net_device *);
330static void update_cr6(u32, unsigned long); 328static void update_cr6(u32, void __iomem *);
331static void send_filter_frame(struct DEVICE *); 329static void send_filter_frame(struct DEVICE *);
332static void dm9132_id_table(struct DEVICE *); 330static void dm9132_id_table(struct DEVICE *);
333static u16 phy_read(unsigned long, u8, u8, u32); 331static u16 phy_read(void __iomem *, u8, u8, u32);
334static void phy_write(unsigned long, u8, u8, u16, u32); 332static void phy_write(void __iomem *, u8, u8, u16, u32);
335static void phy_write_1bit(unsigned long, u32); 333static void phy_write_1bit(void __iomem *, u32);
336static u16 phy_read_1bit(unsigned long); 334static u16 phy_read_1bit(void __iomem *);
337static u8 dmfe_sense_speed(struct dmfe_board_info *); 335static u8 dmfe_sense_speed(struct dmfe_board_info *);
338static void dmfe_process_mode(struct dmfe_board_info *); 336static void dmfe_process_mode(struct dmfe_board_info *);
339static void dmfe_timer(unsigned long); 337static void dmfe_timer(unsigned long);
@@ -462,14 +460,16 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
462 db->buf_pool_dma_start = db->buf_pool_dma_ptr; 460 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
463 461
464 db->chip_id = ent->driver_data; 462 db->chip_id = ent->driver_data;
465 db->ioaddr = pci_resource_start(pdev, 0); 463 /* IO type range. */
464 db->ioaddr = pci_iomap(pdev, 0, 0);
465 if (!db->ioaddr)
466 goto err_out_free_buf;
467
466 db->chip_revision = pdev->revision; 468 db->chip_revision = pdev->revision;
467 db->wol_mode = 0; 469 db->wol_mode = 0;
468 470
469 db->pdev = pdev; 471 db->pdev = pdev;
470 472
471 dev->base_addr = db->ioaddr;
472 dev->irq = pdev->irq;
473 pci_set_drvdata(pdev, dev); 473 pci_set_drvdata(pdev, dev);
474 dev->netdev_ops = &netdev_ops; 474 dev->netdev_ops = &netdev_ops;
475 dev->ethtool_ops = &netdev_ethtool_ops; 475 dev->ethtool_ops = &netdev_ethtool_ops;
@@ -484,9 +484,10 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
484 db->chip_type = 0; 484 db->chip_type = 0;
485 485
486 /* read 64 word srom data */ 486 /* read 64 word srom data */
487 for (i = 0; i < 64; i++) 487 for (i = 0; i < 64; i++) {
488 ((__le16 *) db->srom)[i] = 488 ((__le16 *) db->srom)[i] =
489 cpu_to_le16(read_srom_word(db->ioaddr, i)); 489 cpu_to_le16(read_srom_word(db->ioaddr, i));
490 }
490 491
491 /* Set Node address */ 492 /* Set Node address */
492 for (i = 0; i < 6; i++) 493 for (i = 0; i < 6; i++)
@@ -494,16 +495,18 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
494 495
495 err = register_netdev (dev); 496 err = register_netdev (dev);
496 if (err) 497 if (err)
497 goto err_out_free_buf; 498 goto err_out_unmap;
498 499
499 dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n", 500 dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
500 ent->driver_data >> 16, 501 ent->driver_data >> 16,
501 pci_name(pdev), dev->dev_addr, dev->irq); 502 pci_name(pdev), dev->dev_addr, pdev->irq);
502 503
503 pci_set_master(pdev); 504 pci_set_master(pdev);
504 505
505 return 0; 506 return 0;
506 507
508err_out_unmap:
509 pci_iounmap(pdev, db->ioaddr);
507err_out_free_buf: 510err_out_free_buf:
508 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, 511 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
509 db->buf_pool_ptr, db->buf_pool_dma_ptr); 512 db->buf_pool_ptr, db->buf_pool_dma_ptr);
@@ -532,7 +535,7 @@ static void __devexit dmfe_remove_one (struct pci_dev *pdev)
532 if (dev) { 535 if (dev) {
533 536
534 unregister_netdev(dev); 537 unregister_netdev(dev);
535 538 pci_iounmap(db->pdev, db->ioaddr);
536 pci_free_consistent(db->pdev, sizeof(struct tx_desc) * 539 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
537 DESC_ALL_CNT + 0x20, db->desc_pool_ptr, 540 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
538 db->desc_pool_dma_ptr); 541 db->desc_pool_dma_ptr);
@@ -555,13 +558,13 @@ static void __devexit dmfe_remove_one (struct pci_dev *pdev)
555 558
556static int dmfe_open(struct DEVICE *dev) 559static int dmfe_open(struct DEVICE *dev)
557{ 560{
558 int ret;
559 struct dmfe_board_info *db = netdev_priv(dev); 561 struct dmfe_board_info *db = netdev_priv(dev);
562 const int irq = db->pdev->irq;
563 int ret;
560 564
561 DMFE_DBUG(0, "dmfe_open", 0); 565 DMFE_DBUG(0, "dmfe_open", 0);
562 566
563 ret = request_irq(dev->irq, dmfe_interrupt, 567 ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev);
564 IRQF_SHARED, dev->name, dev);
565 if (ret) 568 if (ret)
566 return ret; 569 return ret;
567 570
@@ -615,14 +618,14 @@ static int dmfe_open(struct DEVICE *dev)
615static void dmfe_init_dm910x(struct DEVICE *dev) 618static void dmfe_init_dm910x(struct DEVICE *dev)
616{ 619{
617 struct dmfe_board_info *db = netdev_priv(dev); 620 struct dmfe_board_info *db = netdev_priv(dev);
618 unsigned long ioaddr = db->ioaddr; 621 void __iomem *ioaddr = db->ioaddr;
619 622
620 DMFE_DBUG(0, "dmfe_init_dm910x()", 0); 623 DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
621 624
622 /* Reset DM910x MAC controller */ 625 /* Reset DM910x MAC controller */
623 outl(DM910X_RESET, ioaddr + DCR0); /* RESET MAC */ 626 dw32(DCR0, DM910X_RESET); /* RESET MAC */
624 udelay(100); 627 udelay(100);
625 outl(db->cr0_data, ioaddr + DCR0); 628 dw32(DCR0, db->cr0_data);
626 udelay(5); 629 udelay(5);
627 630
628 /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */ 631 /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
@@ -633,12 +636,12 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
633 db->media_mode = dmfe_media_mode; 636 db->media_mode = dmfe_media_mode;
634 637
635 /* RESET Phyxcer Chip by GPR port bit 7 */ 638 /* RESET Phyxcer Chip by GPR port bit 7 */
636 outl(0x180, ioaddr + DCR12); /* Let bit 7 output port */ 639 dw32(DCR12, 0x180); /* Let bit 7 output port */
637 if (db->chip_id == PCI_DM9009_ID) { 640 if (db->chip_id == PCI_DM9009_ID) {
638 outl(0x80, ioaddr + DCR12); /* Issue RESET signal */ 641 dw32(DCR12, 0x80); /* Issue RESET signal */
639 mdelay(300); /* Delay 300 ms */ 642 mdelay(300); /* Delay 300 ms */
640 } 643 }
641 outl(0x0, ioaddr + DCR12); /* Clear RESET signal */ 644 dw32(DCR12, 0x0); /* Clear RESET signal */
642 645
643 /* Process Phyxcer Media Mode */ 646 /* Process Phyxcer Media Mode */
644 if ( !(db->media_mode & 0x10) ) /* Force 1M mode */ 647 if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
@@ -649,7 +652,7 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
649 db->op_mode = db->media_mode; /* Force Mode */ 652 db->op_mode = db->media_mode; /* Force Mode */
650 653
651 /* Initialize Transmit/Receive decriptor and CR3/4 */ 654 /* Initialize Transmit/Receive decriptor and CR3/4 */
652 dmfe_descriptor_init(dev, ioaddr); 655 dmfe_descriptor_init(dev);
653 656
654 /* Init CR6 to program DM910x operation */ 657 /* Init CR6 to program DM910x operation */
655 update_cr6(db->cr6_data, ioaddr); 658 update_cr6(db->cr6_data, ioaddr);
@@ -662,10 +665,10 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
662 665
663 /* Init CR7, interrupt active bit */ 666 /* Init CR7, interrupt active bit */
664 db->cr7_data = CR7_DEFAULT; 667 db->cr7_data = CR7_DEFAULT;
665 outl(db->cr7_data, ioaddr + DCR7); 668 dw32(DCR7, db->cr7_data);
666 669
667 /* Init CR15, Tx jabber and Rx watchdog timer */ 670 /* Init CR15, Tx jabber and Rx watchdog timer */
668 outl(db->cr15_data, ioaddr + DCR15); 671 dw32(DCR15, db->cr15_data);
669 672
670 /* Enable DM910X Tx/Rx function */ 673 /* Enable DM910X Tx/Rx function */
671 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000; 674 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
@@ -682,6 +685,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
682 struct DEVICE *dev) 685 struct DEVICE *dev)
683{ 686{
684 struct dmfe_board_info *db = netdev_priv(dev); 687 struct dmfe_board_info *db = netdev_priv(dev);
688 void __iomem *ioaddr = db->ioaddr;
685 struct tx_desc *txptr; 689 struct tx_desc *txptr;
686 unsigned long flags; 690 unsigned long flags;
687 691
@@ -707,7 +711,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
707 } 711 }
708 712
709 /* Disable NIC interrupt */ 713 /* Disable NIC interrupt */
710 outl(0, dev->base_addr + DCR7); 714 dw32(DCR7, 0);
711 715
712 /* transmit this packet */ 716 /* transmit this packet */
713 txptr = db->tx_insert_ptr; 717 txptr = db->tx_insert_ptr;
@@ -721,11 +725,11 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
721 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) { 725 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
722 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ 726 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
723 db->tx_packet_cnt++; /* Ready to send */ 727 db->tx_packet_cnt++; /* Ready to send */
724 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ 728 dw32(DCR1, 0x1); /* Issue Tx polling */
725 dev->trans_start = jiffies; /* saved time stamp */ 729 dev->trans_start = jiffies; /* saved time stamp */
726 } else { 730 } else {
727 db->tx_queue_cnt++; /* queue TX packet */ 731 db->tx_queue_cnt++; /* queue TX packet */
728 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ 732 dw32(DCR1, 0x1); /* Issue Tx polling */
729 } 733 }
730 734
731 /* Tx resource check */ 735 /* Tx resource check */
@@ -734,7 +738,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
734 738
735 /* Restore CR7 to enable interrupt */ 739 /* Restore CR7 to enable interrupt */
736 spin_unlock_irqrestore(&db->lock, flags); 740 spin_unlock_irqrestore(&db->lock, flags);
737 outl(db->cr7_data, dev->base_addr + DCR7); 741 dw32(DCR7, db->cr7_data);
738 742
739 /* free this SKB */ 743 /* free this SKB */
740 dev_kfree_skb(skb); 744 dev_kfree_skb(skb);
@@ -751,7 +755,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
751static int dmfe_stop(struct DEVICE *dev) 755static int dmfe_stop(struct DEVICE *dev)
752{ 756{
753 struct dmfe_board_info *db = netdev_priv(dev); 757 struct dmfe_board_info *db = netdev_priv(dev);
754 unsigned long ioaddr = dev->base_addr; 758 void __iomem *ioaddr = db->ioaddr;
755 759
756 DMFE_DBUG(0, "dmfe_stop", 0); 760 DMFE_DBUG(0, "dmfe_stop", 0);
757 761
@@ -762,12 +766,12 @@ static int dmfe_stop(struct DEVICE *dev)
762 del_timer_sync(&db->timer); 766 del_timer_sync(&db->timer);
763 767
764 /* Reset & stop DM910X board */ 768 /* Reset & stop DM910X board */
765 outl(DM910X_RESET, ioaddr + DCR0); 769 dw32(DCR0, DM910X_RESET);
766 udelay(5); 770 udelay(100);
767 phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id); 771 phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
768 772
769 /* free interrupt */ 773 /* free interrupt */
770 free_irq(dev->irq, dev); 774 free_irq(db->pdev->irq, dev);
771 775
772 /* free allocated rx buffer */ 776 /* free allocated rx buffer */
773 dmfe_free_rxbuffer(db); 777 dmfe_free_rxbuffer(db);
@@ -794,7 +798,7 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
794{ 798{
795 struct DEVICE *dev = dev_id; 799 struct DEVICE *dev = dev_id;
796 struct dmfe_board_info *db = netdev_priv(dev); 800 struct dmfe_board_info *db = netdev_priv(dev);
797 unsigned long ioaddr = dev->base_addr; 801 void __iomem *ioaddr = db->ioaddr;
798 unsigned long flags; 802 unsigned long flags;
799 803
800 DMFE_DBUG(0, "dmfe_interrupt()", 0); 804 DMFE_DBUG(0, "dmfe_interrupt()", 0);
@@ -802,15 +806,15 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
802 spin_lock_irqsave(&db->lock, flags); 806 spin_lock_irqsave(&db->lock, flags);
803 807
804 /* Got DM910X status */ 808 /* Got DM910X status */
805 db->cr5_data = inl(ioaddr + DCR5); 809 db->cr5_data = dr32(DCR5);
806 outl(db->cr5_data, ioaddr + DCR5); 810 dw32(DCR5, db->cr5_data);
807 if ( !(db->cr5_data & 0xc1) ) { 811 if ( !(db->cr5_data & 0xc1) ) {
808 spin_unlock_irqrestore(&db->lock, flags); 812 spin_unlock_irqrestore(&db->lock, flags);
809 return IRQ_HANDLED; 813 return IRQ_HANDLED;
810 } 814 }
811 815
812 /* Disable all interrupt in CR7 to solve the interrupt edge problem */ 816 /* Disable all interrupt in CR7 to solve the interrupt edge problem */
813 outl(0, ioaddr + DCR7); 817 dw32(DCR7, 0);
814 818
815 /* Check system status */ 819 /* Check system status */
816 if (db->cr5_data & 0x2000) { 820 if (db->cr5_data & 0x2000) {
@@ -838,11 +842,11 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
838 if (db->dm910x_chk_mode & 0x2) { 842 if (db->dm910x_chk_mode & 0x2) {
839 db->dm910x_chk_mode = 0x4; 843 db->dm910x_chk_mode = 0x4;
840 db->cr6_data |= 0x100; 844 db->cr6_data |= 0x100;
841 update_cr6(db->cr6_data, db->ioaddr); 845 update_cr6(db->cr6_data, ioaddr);
842 } 846 }
843 847
844 /* Restore CR7 to enable interrupt mask */ 848 /* Restore CR7 to enable interrupt mask */
845 outl(db->cr7_data, ioaddr + DCR7); 849 dw32(DCR7, db->cr7_data);
846 850
847 spin_unlock_irqrestore(&db->lock, flags); 851 spin_unlock_irqrestore(&db->lock, flags);
848 return IRQ_HANDLED; 852 return IRQ_HANDLED;
@@ -858,11 +862,14 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
858 862
859static void poll_dmfe (struct net_device *dev) 863static void poll_dmfe (struct net_device *dev)
860{ 864{
865 struct dmfe_board_info *db = netdev_priv(dev);
866 const int irq = db->pdev->irq;
867
861 /* disable_irq here is not very nice, but with the lockless 868 /* disable_irq here is not very nice, but with the lockless
862 interrupt handler we have no other choice. */ 869 interrupt handler we have no other choice. */
863 disable_irq(dev->irq); 870 disable_irq(irq);
864 dmfe_interrupt (dev->irq, dev); 871 dmfe_interrupt (irq, dev);
865 enable_irq(dev->irq); 872 enable_irq(irq);
866} 873}
867#endif 874#endif
868 875
@@ -873,7 +880,7 @@ static void poll_dmfe (struct net_device *dev)
873static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db) 880static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
874{ 881{
875 struct tx_desc *txptr; 882 struct tx_desc *txptr;
876 unsigned long ioaddr = dev->base_addr; 883 void __iomem *ioaddr = db->ioaddr;
877 u32 tdes0; 884 u32 tdes0;
878 885
879 txptr = db->tx_remove_ptr; 886 txptr = db->tx_remove_ptr;
@@ -897,7 +904,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
897 db->tx_fifo_underrun++; 904 db->tx_fifo_underrun++;
898 if ( !(db->cr6_data & CR6_SFT) ) { 905 if ( !(db->cr6_data & CR6_SFT) ) {
899 db->cr6_data = db->cr6_data | CR6_SFT; 906 db->cr6_data = db->cr6_data | CR6_SFT;
900 update_cr6(db->cr6_data, db->ioaddr); 907 update_cr6(db->cr6_data, ioaddr);
901 } 908 }
902 } 909 }
903 if (tdes0 & 0x0100) 910 if (tdes0 & 0x0100)
@@ -924,7 +931,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
924 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ 931 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
925 db->tx_packet_cnt++; /* Ready to send */ 932 db->tx_packet_cnt++; /* Ready to send */
926 db->tx_queue_cnt--; 933 db->tx_queue_cnt--;
927 outl(0x1, ioaddr + DCR1); /* Issue Tx polling */ 934 dw32(DCR1, 0x1); /* Issue Tx polling */
928 dev->trans_start = jiffies; /* saved time stamp */ 935 dev->trans_start = jiffies; /* saved time stamp */
929 } 936 }
930 937
@@ -1087,12 +1094,7 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1087 1094
1088 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 1095 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1089 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1096 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1090 if (np->pdev) 1097 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
1091 strlcpy(info->bus_info, pci_name(np->pdev),
1092 sizeof(info->bus_info));
1093 else
1094 sprintf(info->bus_info, "EISA 0x%lx %d",
1095 dev->base_addr, dev->irq);
1096} 1098}
1097 1099
1098static int dmfe_ethtool_set_wol(struct net_device *dev, 1100static int dmfe_ethtool_set_wol(struct net_device *dev,
@@ -1132,10 +1134,11 @@ static const struct ethtool_ops netdev_ethtool_ops = {
1132 1134
1133static void dmfe_timer(unsigned long data) 1135static void dmfe_timer(unsigned long data)
1134{ 1136{
1137 struct net_device *dev = (struct net_device *)data;
1138 struct dmfe_board_info *db = netdev_priv(dev);
1139 void __iomem *ioaddr = db->ioaddr;
1135 u32 tmp_cr8; 1140 u32 tmp_cr8;
1136 unsigned char tmp_cr12; 1141 unsigned char tmp_cr12;
1137 struct DEVICE *dev = (struct DEVICE *) data;
1138 struct dmfe_board_info *db = netdev_priv(dev);
1139 unsigned long flags; 1142 unsigned long flags;
1140 1143
1141 int link_ok, link_ok_phy; 1144 int link_ok, link_ok_phy;
@@ -1148,11 +1151,10 @@ static void dmfe_timer(unsigned long data)
1148 db->first_in_callback = 1; 1151 db->first_in_callback = 1;
1149 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) { 1152 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1150 db->cr6_data &= ~0x40000; 1153 db->cr6_data &= ~0x40000;
1151 update_cr6(db->cr6_data, db->ioaddr); 1154 update_cr6(db->cr6_data, ioaddr);
1152 phy_write(db->ioaddr, 1155 phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1153 db->phy_addr, 0, 0x1000, db->chip_id);
1154 db->cr6_data |= 0x40000; 1156 db->cr6_data |= 0x40000;
1155 update_cr6(db->cr6_data, db->ioaddr); 1157 update_cr6(db->cr6_data, ioaddr);
1156 db->timer.expires = DMFE_TIMER_WUT + HZ * 2; 1158 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1157 add_timer(&db->timer); 1159 add_timer(&db->timer);
1158 spin_unlock_irqrestore(&db->lock, flags); 1160 spin_unlock_irqrestore(&db->lock, flags);
@@ -1167,7 +1169,7 @@ static void dmfe_timer(unsigned long data)
1167 db->dm910x_chk_mode = 0x4; 1169 db->dm910x_chk_mode = 0x4;
1168 1170
1169 /* Dynamic reset DM910X : system error or transmit time-out */ 1171 /* Dynamic reset DM910X : system error or transmit time-out */
1170 tmp_cr8 = inl(db->ioaddr + DCR8); 1172 tmp_cr8 = dr32(DCR8);
1171 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) { 1173 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1172 db->reset_cr8++; 1174 db->reset_cr8++;
1173 db->wait_reset = 1; 1175 db->wait_reset = 1;
@@ -1177,7 +1179,7 @@ static void dmfe_timer(unsigned long data)
1177 /* TX polling kick monitor */ 1179 /* TX polling kick monitor */
1178 if ( db->tx_packet_cnt && 1180 if ( db->tx_packet_cnt &&
1179 time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) { 1181 time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
1180 outl(0x1, dev->base_addr + DCR1); /* Tx polling again */ 1182 dw32(DCR1, 0x1); /* Tx polling again */
1181 1183
1182 /* TX Timeout */ 1184 /* TX Timeout */
1183 if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) { 1185 if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
@@ -1200,9 +1202,9 @@ static void dmfe_timer(unsigned long data)
1200 1202
1201 /* Link status check, Dynamic media type change */ 1203 /* Link status check, Dynamic media type change */
1202 if (db->chip_id == PCI_DM9132_ID) 1204 if (db->chip_id == PCI_DM9132_ID)
1203 tmp_cr12 = inb(db->ioaddr + DCR9 + 3); /* DM9132 */ 1205 tmp_cr12 = dr8(DCR9 + 3); /* DM9132 */
1204 else 1206 else
1205 tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */ 1207 tmp_cr12 = dr8(DCR12); /* DM9102/DM9102A */
1206 1208
1207 if ( ((db->chip_id == PCI_DM9102_ID) && 1209 if ( ((db->chip_id == PCI_DM9102_ID) &&
1208 (db->chip_revision == 0x30)) || 1210 (db->chip_revision == 0x30)) ||
@@ -1251,7 +1253,7 @@ static void dmfe_timer(unsigned long data)
1251 /* 10/100M link failed, used 1M Home-Net */ 1253 /* 10/100M link failed, used 1M Home-Net */
1252 db->cr6_data|=0x00040000; /* bit18=1, MII */ 1254 db->cr6_data|=0x00040000; /* bit18=1, MII */
1253 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */ 1255 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
1254 update_cr6(db->cr6_data, db->ioaddr); 1256 update_cr6(db->cr6_data, ioaddr);
1255 } 1257 }
1256 } else if (!netif_carrier_ok(dev)) { 1258 } else if (!netif_carrier_ok(dev)) {
1257 1259
@@ -1288,17 +1290,18 @@ static void dmfe_timer(unsigned long data)
1288 * Re-initialize DM910X board 1290 * Re-initialize DM910X board
1289 */ 1291 */
1290 1292
1291static void dmfe_dynamic_reset(struct DEVICE *dev) 1293static void dmfe_dynamic_reset(struct net_device *dev)
1292{ 1294{
1293 struct dmfe_board_info *db = netdev_priv(dev); 1295 struct dmfe_board_info *db = netdev_priv(dev);
1296 void __iomem *ioaddr = db->ioaddr;
1294 1297
1295 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0); 1298 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1296 1299
1297 /* Sopt MAC controller */ 1300 /* Sopt MAC controller */
1298 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */ 1301 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1299 update_cr6(db->cr6_data, dev->base_addr); 1302 update_cr6(db->cr6_data, ioaddr);
1300 outl(0, dev->base_addr + DCR7); /* Disable Interrupt */ 1303 dw32(DCR7, 0); /* Disable Interrupt */
1301 outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5); 1304 dw32(DCR5, dr32(DCR5));
1302 1305
1303 /* Disable upper layer interface */ 1306 /* Disable upper layer interface */
1304 netif_stop_queue(dev); 1307 netif_stop_queue(dev);
@@ -1364,9 +1367,10 @@ static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1364 * Using Chain structure, and allocate Tx/Rx buffer 1367 * Using Chain structure, and allocate Tx/Rx buffer
1365 */ 1368 */
1366 1369
1367static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr) 1370static void dmfe_descriptor_init(struct net_device *dev)
1368{ 1371{
1369 struct dmfe_board_info *db = netdev_priv(dev); 1372 struct dmfe_board_info *db = netdev_priv(dev);
1373 void __iomem *ioaddr = db->ioaddr;
1370 struct tx_desc *tmp_tx; 1374 struct tx_desc *tmp_tx;
1371 struct rx_desc *tmp_rx; 1375 struct rx_desc *tmp_rx;
1372 unsigned char *tmp_buf; 1376 unsigned char *tmp_buf;
@@ -1379,7 +1383,7 @@ static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr)
1379 /* tx descriptor start pointer */ 1383 /* tx descriptor start pointer */
1380 db->tx_insert_ptr = db->first_tx_desc; 1384 db->tx_insert_ptr = db->first_tx_desc;
1381 db->tx_remove_ptr = db->first_tx_desc; 1385 db->tx_remove_ptr = db->first_tx_desc;
1382 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */ 1386 dw32(DCR4, db->first_tx_desc_dma); /* TX DESC address */
1383 1387
1384 /* rx descriptor start pointer */ 1388 /* rx descriptor start pointer */
1385 db->first_rx_desc = (void *)db->first_tx_desc + 1389 db->first_rx_desc = (void *)db->first_tx_desc +
@@ -1389,7 +1393,7 @@ static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr)
1389 sizeof(struct tx_desc) * TX_DESC_CNT; 1393 sizeof(struct tx_desc) * TX_DESC_CNT;
1390 db->rx_insert_ptr = db->first_rx_desc; 1394 db->rx_insert_ptr = db->first_rx_desc;
1391 db->rx_ready_ptr = db->first_rx_desc; 1395 db->rx_ready_ptr = db->first_rx_desc;
1392 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */ 1396 dw32(DCR3, db->first_rx_desc_dma); /* RX DESC address */
1393 1397
1394 /* Init Transmit chain */ 1398 /* Init Transmit chain */
1395 tmp_buf = db->buf_pool_start; 1399 tmp_buf = db->buf_pool_start;
@@ -1431,14 +1435,14 @@ static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr)
1431 * Firstly stop DM910X , then written value and start 1435 * Firstly stop DM910X , then written value and start
1432 */ 1436 */
1433 1437
1434static void update_cr6(u32 cr6_data, unsigned long ioaddr) 1438static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
1435{ 1439{
1436 u32 cr6_tmp; 1440 u32 cr6_tmp;
1437 1441
1438 cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */ 1442 cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */
1439 outl(cr6_tmp, ioaddr + DCR6); 1443 dw32(DCR6, cr6_tmp);
1440 udelay(5); 1444 udelay(5);
1441 outl(cr6_data, ioaddr + DCR6); 1445 dw32(DCR6, cr6_data);
1442 udelay(5); 1446 udelay(5);
1443} 1447}
1444 1448
@@ -1448,24 +1452,19 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1448 * This setup frame initialize DM910X address filter mode 1452 * This setup frame initialize DM910X address filter mode
1449*/ 1453*/
1450 1454
1451static void dm9132_id_table(struct DEVICE *dev) 1455static void dm9132_id_table(struct net_device *dev)
1452{ 1456{
1457 struct dmfe_board_info *db = netdev_priv(dev);
1458 void __iomem *ioaddr = db->ioaddr + 0xc0;
1459 u16 *addrptr = (u16 *)dev->dev_addr;
1453 struct netdev_hw_addr *ha; 1460 struct netdev_hw_addr *ha;
1454 u16 * addrptr;
1455 unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
1456 u32 hash_val;
1457 u16 i, hash_table[4]; 1461 u16 i, hash_table[4];
1458 1462
1459 DMFE_DBUG(0, "dm9132_id_table()", 0);
1460
1461 /* Node address */ 1463 /* Node address */
1462 addrptr = (u16 *) dev->dev_addr; 1464 for (i = 0; i < 3; i++) {
1463 outw(addrptr[0], ioaddr); 1465 dw16(0, addrptr[i]);
1464 ioaddr += 4; 1466 ioaddr += 4;
1465 outw(addrptr[1], ioaddr); 1467 }
1466 ioaddr += 4;
1467 outw(addrptr[2], ioaddr);
1468 ioaddr += 4;
1469 1468
1470 /* Clear Hash Table */ 1469 /* Clear Hash Table */
1471 memset(hash_table, 0, sizeof(hash_table)); 1470 memset(hash_table, 0, sizeof(hash_table));
@@ -1475,13 +1474,14 @@ static void dm9132_id_table(struct DEVICE *dev)
1475 1474
1476 /* the multicast address in Hash Table : 64 bits */ 1475 /* the multicast address in Hash Table : 64 bits */
1477 netdev_for_each_mc_addr(ha, dev) { 1476 netdev_for_each_mc_addr(ha, dev) {
1478 hash_val = cal_CRC((char *) ha->addr, 6, 0) & 0x3f; 1477 u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f;
1478
1479 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); 1479 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1480 } 1480 }
1481 1481
1482 /* Write the hash table to MAC MD table */ 1482 /* Write the hash table to MAC MD table */
1483 for (i = 0; i < 4; i++, ioaddr += 4) 1483 for (i = 0; i < 4; i++, ioaddr += 4)
1484 outw(hash_table[i], ioaddr); 1484 dw16(0, hash_table[i]);
1485} 1485}
1486 1486
1487 1487
@@ -1490,7 +1490,7 @@ static void dm9132_id_table(struct DEVICE *dev)
1490 * This setup frame initialize DM910X address filter mode 1490 * This setup frame initialize DM910X address filter mode
1491 */ 1491 */
1492 1492
1493static void send_filter_frame(struct DEVICE *dev) 1493static void send_filter_frame(struct net_device *dev)
1494{ 1494{
1495 struct dmfe_board_info *db = netdev_priv(dev); 1495 struct dmfe_board_info *db = netdev_priv(dev);
1496 struct netdev_hw_addr *ha; 1496 struct netdev_hw_addr *ha;
@@ -1535,12 +1535,14 @@ static void send_filter_frame(struct DEVICE *dev)
1535 1535
1536 /* Resource Check and Send the setup packet */ 1536 /* Resource Check and Send the setup packet */
1537 if (!db->tx_packet_cnt) { 1537 if (!db->tx_packet_cnt) {
1538 void __iomem *ioaddr = db->ioaddr;
1539
1538 /* Resource Empty */ 1540 /* Resource Empty */
1539 db->tx_packet_cnt++; 1541 db->tx_packet_cnt++;
1540 txptr->tdes0 = cpu_to_le32(0x80000000); 1542 txptr->tdes0 = cpu_to_le32(0x80000000);
1541 update_cr6(db->cr6_data | 0x2000, dev->base_addr); 1543 update_cr6(db->cr6_data | 0x2000, ioaddr);
1542 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ 1544 dw32(DCR1, 0x1); /* Issue Tx polling */
1543 update_cr6(db->cr6_data, dev->base_addr); 1545 update_cr6(db->cr6_data, ioaddr);
1544 dev->trans_start = jiffies; 1546 dev->trans_start = jiffies;
1545 } else 1547 } else
1546 db->tx_queue_cnt++; /* Put in TX queue */ 1548 db->tx_queue_cnt++; /* Put in TX queue */
@@ -1575,43 +1577,59 @@ static void allocate_rx_buffer(struct net_device *dev)
1575 db->rx_insert_ptr = rxptr; 1577 db->rx_insert_ptr = rxptr;
1576} 1578}
1577 1579
1580static void srom_clk_write(void __iomem *ioaddr, u32 data)
1581{
1582 static const u32 cmd[] = {
1583 CR9_SROM_READ | CR9_SRCS,
1584 CR9_SROM_READ | CR9_SRCS | CR9_SRCLK,
1585 CR9_SROM_READ | CR9_SRCS
1586 };
1587 int i;
1588
1589 for (i = 0; i < ARRAY_SIZE(cmd); i++) {
1590 dw32(DCR9, data | cmd[i]);
1591 udelay(5);
1592 }
1593}
1578 1594
1579/* 1595/*
1580 * Read one word data from the serial ROM 1596 * Read one word data from the serial ROM
1581 */ 1597 */
1582 1598static u16 read_srom_word(void __iomem *ioaddr, int offset)
1583static u16 read_srom_word(long ioaddr, int offset)
1584{ 1599{
1600 u16 srom_data;
1585 int i; 1601 int i;
1586 u16 srom_data = 0;
1587 long cr9_ioaddr = ioaddr + DCR9;
1588 1602
1589 outl(CR9_SROM_READ, cr9_ioaddr); 1603 dw32(DCR9, CR9_SROM_READ);
1590 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); 1604 udelay(5);
1605 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1606 udelay(5);
1591 1607
1592 /* Send the Read Command 110b */ 1608 /* Send the Read Command 110b */
1593 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); 1609 srom_clk_write(ioaddr, SROM_DATA_1);
1594 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); 1610 srom_clk_write(ioaddr, SROM_DATA_1);
1595 SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr); 1611 srom_clk_write(ioaddr, SROM_DATA_0);
1596 1612
1597 /* Send the offset */ 1613 /* Send the offset */
1598 for (i = 5; i >= 0; i--) { 1614 for (i = 5; i >= 0; i--) {
1599 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0; 1615 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1600 SROM_CLK_WRITE(srom_data, cr9_ioaddr); 1616 srom_clk_write(ioaddr, srom_data);
1601 } 1617 }
1602 1618
1603 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); 1619 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1620 udelay(5);
1604 1621
1605 for (i = 16; i > 0; i--) { 1622 for (i = 16; i > 0; i--) {
1606 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr); 1623 dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
1607 udelay(5); 1624 udelay(5);
1608 srom_data = (srom_data << 1) | 1625 srom_data = (srom_data << 1) |
1609 ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0); 1626 ((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0);
1610 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); 1627 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1611 udelay(5); 1628 udelay(5);
1612 } 1629 }
1613 1630
1614 outl(CR9_SROM_READ, cr9_ioaddr); 1631 dw32(DCR9, CR9_SROM_READ);
1632 udelay(5);
1615 return srom_data; 1633 return srom_data;
1616} 1634}
1617 1635
@@ -1620,13 +1638,14 @@ static u16 read_srom_word(long ioaddr, int offset)
1620 * Auto sense the media mode 1638 * Auto sense the media mode
1621 */ 1639 */
1622 1640
1623static u8 dmfe_sense_speed(struct dmfe_board_info * db) 1641static u8 dmfe_sense_speed(struct dmfe_board_info *db)
1624{ 1642{
1643 void __iomem *ioaddr = db->ioaddr;
1625 u8 ErrFlag = 0; 1644 u8 ErrFlag = 0;
1626 u16 phy_mode; 1645 u16 phy_mode;
1627 1646
1628 /* CR6 bit18=0, select 10/100M */ 1647 /* CR6 bit18=0, select 10/100M */
1629 update_cr6( (db->cr6_data & ~0x40000), db->ioaddr); 1648 update_cr6(db->cr6_data & ~0x40000, ioaddr);
1630 1649
1631 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); 1650 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1632 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); 1651 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
@@ -1665,11 +1684,12 @@ static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1665 1684
1666static void dmfe_set_phyxcer(struct dmfe_board_info *db) 1685static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1667{ 1686{
1687 void __iomem *ioaddr = db->ioaddr;
1668 u16 phy_reg; 1688 u16 phy_reg;
1669 1689
1670 /* Select 10/100M phyxcer */ 1690 /* Select 10/100M phyxcer */
1671 db->cr6_data &= ~0x40000; 1691 db->cr6_data &= ~0x40000;
1672 update_cr6(db->cr6_data, db->ioaddr); 1692 update_cr6(db->cr6_data, ioaddr);
1673 1693
1674 /* DM9009 Chip: Phyxcer reg18 bit12=0 */ 1694 /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1675 if (db->chip_id == PCI_DM9009_ID) { 1695 if (db->chip_id == PCI_DM9009_ID) {
@@ -1765,18 +1785,15 @@ static void dmfe_process_mode(struct dmfe_board_info *db)
1765 * Write a word to Phy register 1785 * Write a word to Phy register
1766 */ 1786 */
1767 1787
1768static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, 1788static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
1769 u16 phy_data, u32 chip_id) 1789 u16 phy_data, u32 chip_id)
1770{ 1790{
1771 u16 i; 1791 u16 i;
1772 unsigned long ioaddr;
1773 1792
1774 if (chip_id == PCI_DM9132_ID) { 1793 if (chip_id == PCI_DM9132_ID) {
1775 ioaddr = iobase + 0x80 + offset * 4; 1794 dw16(0x80 + offset * 4, phy_data);
1776 outw(phy_data, ioaddr);
1777 } else { 1795 } else {
1778 /* DM9102/DM9102A Chip */ 1796 /* DM9102/DM9102A Chip */
1779 ioaddr = iobase + DCR9;
1780 1797
1781 /* Send 33 synchronization clock to Phy controller */ 1798 /* Send 33 synchronization clock to Phy controller */
1782 for (i = 0; i < 35; i++) 1799 for (i = 0; i < 35; i++)
@@ -1816,19 +1833,16 @@ static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
1816 * Read a word data from phy register 1833 * Read a word data from phy register
1817 */ 1834 */
1818 1835
1819static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id) 1836static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
1820{ 1837{
1821 int i; 1838 int i;
1822 u16 phy_data; 1839 u16 phy_data;
1823 unsigned long ioaddr;
1824 1840
1825 if (chip_id == PCI_DM9132_ID) { 1841 if (chip_id == PCI_DM9132_ID) {
1826 /* DM9132 Chip */ 1842 /* DM9132 Chip */
1827 ioaddr = iobase + 0x80 + offset * 4; 1843 phy_data = dr16(0x80 + offset * 4);
1828 phy_data = inw(ioaddr);
1829 } else { 1844 } else {
1830 /* DM9102/DM9102A Chip */ 1845 /* DM9102/DM9102A Chip */
1831 ioaddr = iobase + DCR9;
1832 1846
1833 /* Send 33 synchronization clock to Phy controller */ 1847 /* Send 33 synchronization clock to Phy controller */
1834 for (i = 0; i < 35; i++) 1848 for (i = 0; i < 35; i++)
@@ -1870,13 +1884,13 @@ static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1870 * Write one bit data to Phy Controller 1884 * Write one bit data to Phy Controller
1871 */ 1885 */
1872 1886
1873static void phy_write_1bit(unsigned long ioaddr, u32 phy_data) 1887static void phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
1874{ 1888{
1875 outl(phy_data, ioaddr); /* MII Clock Low */ 1889 dw32(DCR9, phy_data); /* MII Clock Low */
1876 udelay(1); 1890 udelay(1);
1877 outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */ 1891 dw32(DCR9, phy_data | MDCLKH); /* MII Clock High */
1878 udelay(1); 1892 udelay(1);
1879 outl(phy_data, ioaddr); /* MII Clock Low */ 1893 dw32(DCR9, phy_data); /* MII Clock Low */
1880 udelay(1); 1894 udelay(1);
1881} 1895}
1882 1896
@@ -1885,14 +1899,14 @@ static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
1885 * Read one bit phy data from PHY controller 1899 * Read one bit phy data from PHY controller
1886 */ 1900 */
1887 1901
1888static u16 phy_read_1bit(unsigned long ioaddr) 1902static u16 phy_read_1bit(void __iomem *ioaddr)
1889{ 1903{
1890 u16 phy_data; 1904 u16 phy_data;
1891 1905
1892 outl(0x50000, ioaddr); 1906 dw32(DCR9, 0x50000);
1893 udelay(1); 1907 udelay(1);
1894 phy_data = ( inl(ioaddr) >> 19 ) & 0x1; 1908 phy_data = (dr32(DCR9) >> 19) & 0x1;
1895 outl(0x40000, ioaddr); 1909 dw32(DCR9, 0x40000);
1896 udelay(1); 1910 udelay(1);
1897 1911
1898 return phy_data; 1912 return phy_data;
@@ -1978,7 +1992,7 @@ static void dmfe_parse_srom(struct dmfe_board_info * db)
1978 1992
1979 /* Check DM9801 or DM9802 present or not */ 1993 /* Check DM9801 or DM9802 present or not */
1980 db->HPNA_present = 0; 1994 db->HPNA_present = 0;
1981 update_cr6(db->cr6_data|0x40000, db->ioaddr); 1995 update_cr6(db->cr6_data | 0x40000, db->ioaddr);
1982 tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id); 1996 tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1983 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) { 1997 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1984 /* DM9801 or DM9802 present */ 1998 /* DM9801 or DM9802 present */
@@ -2095,6 +2109,7 @@ static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2095{ 2109{
2096 struct net_device *dev = pci_get_drvdata(pci_dev); 2110 struct net_device *dev = pci_get_drvdata(pci_dev);
2097 struct dmfe_board_info *db = netdev_priv(dev); 2111 struct dmfe_board_info *db = netdev_priv(dev);
2112 void __iomem *ioaddr = db->ioaddr;
2098 u32 tmp; 2113 u32 tmp;
2099 2114
2100 /* Disable upper layer interface */ 2115 /* Disable upper layer interface */
@@ -2102,11 +2117,11 @@ static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2102 2117
2103 /* Disable Tx/Rx */ 2118 /* Disable Tx/Rx */
2104 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); 2119 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2105 update_cr6(db->cr6_data, dev->base_addr); 2120 update_cr6(db->cr6_data, ioaddr);
2106 2121
2107 /* Disable Interrupt */ 2122 /* Disable Interrupt */
2108 outl(0, dev->base_addr + DCR7); 2123 dw32(DCR7, 0);
2109 outl(inl (dev->base_addr + DCR5), dev->base_addr + DCR5); 2124 dw32(DCR5, dr32(DCR5));
2110 2125
2111 /* Fre RX buffers */ 2126 /* Fre RX buffers */
2112 dmfe_free_rxbuffer(db); 2127 dmfe_free_rxbuffer(db);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index fea3641d9398..c4f37aca2269 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -328,7 +328,7 @@ static void tulip_up(struct net_device *dev)
328 udelay(100); 328 udelay(100);
329 329
330 if (tulip_debug > 1) 330 if (tulip_debug > 1)
331 netdev_dbg(dev, "tulip_up(), irq==%d\n", dev->irq); 331 netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
332 332
333 iowrite32(tp->rx_ring_dma, ioaddr + CSR3); 333 iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
334 iowrite32(tp->tx_ring_dma, ioaddr + CSR4); 334 iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
@@ -515,11 +515,13 @@ media_picked:
515static int 515static int
516tulip_open(struct net_device *dev) 516tulip_open(struct net_device *dev)
517{ 517{
518 struct tulip_private *tp = netdev_priv(dev);
518 int retval; 519 int retval;
519 520
520 tulip_init_ring (dev); 521 tulip_init_ring (dev);
521 522
522 retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev); 523 retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
524 dev->name, dev);
523 if (retval) 525 if (retval)
524 goto free_ring; 526 goto free_ring;
525 527
@@ -841,7 +843,7 @@ static int tulip_close (struct net_device *dev)
841 netdev_dbg(dev, "Shutting down ethercard, status was %02x\n", 843 netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
842 ioread32 (ioaddr + CSR5)); 844 ioread32 (ioaddr + CSR5));
843 845
844 free_irq (dev->irq, dev); 846 free_irq (tp->pdev->irq, dev);
845 847
846 tulip_free_ring (dev); 848 tulip_free_ring (dev);
847 849
@@ -1489,8 +1491,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1489 1491
1490 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task); 1492 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1491 1493
1492 dev->base_addr = (unsigned long)ioaddr;
1493
1494#ifdef CONFIG_TULIP_MWI 1494#ifdef CONFIG_TULIP_MWI
1495 if (!force_csr0 && (tp->flags & HAS_PCI_MWI)) 1495 if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1496 tulip_mwi_config (pdev, dev); 1496 tulip_mwi_config (pdev, dev);
@@ -1650,7 +1650,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1650 for (i = 0; i < 6; i++) 1650 for (i = 0; i < 6; i++)
1651 last_phys_addr[i] = dev->dev_addr[i]; 1651 last_phys_addr[i] = dev->dev_addr[i];
1652 last_irq = irq; 1652 last_irq = irq;
1653 dev->irq = irq;
1654 1653
1655 /* The lower four bits are the media type. */ 1654 /* The lower four bits are the media type. */
1656 if (board_idx >= 0 && board_idx < MAX_UNITS) { 1655 if (board_idx >= 0 && board_idx < MAX_UNITS) {
@@ -1858,7 +1857,8 @@ static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1858 tulip_down(dev); 1857 tulip_down(dev);
1859 1858
1860 netif_device_detach(dev); 1859 netif_device_detach(dev);
1861 free_irq(dev->irq, dev); 1860 /* FIXME: it needlessly adds an error path. */
1861 free_irq(tp->pdev->irq, dev);
1862 1862
1863save_state: 1863save_state:
1864 pci_save_state(pdev); 1864 pci_save_state(pdev);
@@ -1900,7 +1900,9 @@ static int tulip_resume(struct pci_dev *pdev)
1900 return retval; 1900 return retval;
1901 } 1901 }
1902 1902
1903 if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) { 1903 retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
1904 dev->name, dev);
1905 if (retval) {
1904 pr_err("request_irq failed in resume\n"); 1906 pr_err("request_irq failed in resume\n");
1905 return retval; 1907 return retval;
1906 } 1908 }
@@ -1960,11 +1962,14 @@ static void __devexit tulip_remove_one (struct pci_dev *pdev)
1960 1962
1961static void poll_tulip (struct net_device *dev) 1963static void poll_tulip (struct net_device *dev)
1962{ 1964{
1965 struct tulip_private *tp = netdev_priv(dev);
1966 const int irq = tp->pdev->irq;
1967
1963 /* disable_irq here is not very nice, but with the lockless 1968 /* disable_irq here is not very nice, but with the lockless
1964 interrupt handler we have no other choice. */ 1969 interrupt handler we have no other choice. */
1965 disable_irq(dev->irq); 1970 disable_irq(irq);
1966 tulip_interrupt (dev->irq, dev); 1971 tulip_interrupt (irq, dev);
1967 enable_irq(dev->irq); 1972 enable_irq(irq);
1968} 1973}
1969#endif 1974#endif
1970 1975
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index fc4001f6a5e4..75d45f8a37dc 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -42,6 +42,8 @@
42#include <asm/dma.h> 42#include <asm/dma.h>
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44 44
45#define uw32(reg, val) iowrite32(val, ioaddr + (reg))
46#define ur32(reg) ioread32(ioaddr + (reg))
45 47
46/* Board/System/Debug information/definition ---------------- */ 48/* Board/System/Debug information/definition ---------------- */
47#define PCI_ULI5261_ID 0x526110B9 /* ULi M5261 ID*/ 49#define PCI_ULI5261_ID 0x526110B9 /* ULi M5261 ID*/
@@ -110,14 +112,6 @@ do { \
110 112
111#define SROM_V41_CODE 0x14 113#define SROM_V41_CODE 0x14
112 114
113#define SROM_CLK_WRITE(data, ioaddr) \
114 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
115 udelay(5); \
116 outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
117 udelay(5); \
118 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
119 udelay(5);
120
121/* Structure/enum declaration ------------------------------- */ 115/* Structure/enum declaration ------------------------------- */
122struct tx_desc { 116struct tx_desc {
123 __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */ 117 __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
@@ -132,12 +126,15 @@ struct rx_desc {
132} __attribute__(( aligned(32) )); 126} __attribute__(( aligned(32) ));
133 127
134struct uli526x_board_info { 128struct uli526x_board_info {
135 u32 chip_id; /* Chip vendor/Device ID */ 129 struct uli_phy_ops {
130 void (*write)(struct uli526x_board_info *, u8, u8, u16);
131 u16 (*read)(struct uli526x_board_info *, u8, u8);
132 } phy;
136 struct net_device *next_dev; /* next device */ 133 struct net_device *next_dev; /* next device */
137 struct pci_dev *pdev; /* PCI device */ 134 struct pci_dev *pdev; /* PCI device */
138 spinlock_t lock; 135 spinlock_t lock;
139 136
140 long ioaddr; /* I/O base address */ 137 void __iomem *ioaddr; /* I/O base address */
141 u32 cr0_data; 138 u32 cr0_data;
142 u32 cr5_data; 139 u32 cr5_data;
143 u32 cr6_data; 140 u32 cr6_data;
@@ -227,21 +224,21 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *,
227static int uli526x_stop(struct net_device *); 224static int uli526x_stop(struct net_device *);
228static void uli526x_set_filter_mode(struct net_device *); 225static void uli526x_set_filter_mode(struct net_device *);
229static const struct ethtool_ops netdev_ethtool_ops; 226static const struct ethtool_ops netdev_ethtool_ops;
230static u16 read_srom_word(long, int); 227static u16 read_srom_word(struct uli526x_board_info *, int);
231static irqreturn_t uli526x_interrupt(int, void *); 228static irqreturn_t uli526x_interrupt(int, void *);
232#ifdef CONFIG_NET_POLL_CONTROLLER 229#ifdef CONFIG_NET_POLL_CONTROLLER
233static void uli526x_poll(struct net_device *dev); 230static void uli526x_poll(struct net_device *dev);
234#endif 231#endif
235static void uli526x_descriptor_init(struct net_device *, unsigned long); 232static void uli526x_descriptor_init(struct net_device *, void __iomem *);
236static void allocate_rx_buffer(struct net_device *); 233static void allocate_rx_buffer(struct net_device *);
237static void update_cr6(u32, unsigned long); 234static void update_cr6(u32, void __iomem *);
238static void send_filter_frame(struct net_device *, int); 235static void send_filter_frame(struct net_device *, int);
239static u16 phy_read(unsigned long, u8, u8, u32); 236static u16 phy_readby_cr9(struct uli526x_board_info *, u8, u8);
240static u16 phy_readby_cr10(unsigned long, u8, u8); 237static u16 phy_readby_cr10(struct uli526x_board_info *, u8, u8);
241static void phy_write(unsigned long, u8, u8, u16, u32); 238static void phy_writeby_cr9(struct uli526x_board_info *, u8, u8, u16);
242static void phy_writeby_cr10(unsigned long, u8, u8, u16); 239static void phy_writeby_cr10(struct uli526x_board_info *, u8, u8, u16);
243static void phy_write_1bit(unsigned long, u32, u32); 240static void phy_write_1bit(struct uli526x_board_info *db, u32);
244static u16 phy_read_1bit(unsigned long, u32); 241static u16 phy_read_1bit(struct uli526x_board_info *db);
245static u8 uli526x_sense_speed(struct uli526x_board_info *); 242static u8 uli526x_sense_speed(struct uli526x_board_info *);
246static void uli526x_process_mode(struct uli526x_board_info *); 243static void uli526x_process_mode(struct uli526x_board_info *);
247static void uli526x_timer(unsigned long); 244static void uli526x_timer(unsigned long);
@@ -253,6 +250,18 @@ static void uli526x_free_rxbuffer(struct uli526x_board_info *);
253static void uli526x_init(struct net_device *); 250static void uli526x_init(struct net_device *);
254static void uli526x_set_phyxcer(struct uli526x_board_info *); 251static void uli526x_set_phyxcer(struct uli526x_board_info *);
255 252
253static void srom_clk_write(struct uli526x_board_info *db, u32 data)
254{
255 void __iomem *ioaddr = db->ioaddr;
256
257 uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS);
258 udelay(5);
259 uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
260 udelay(5);
261 uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS);
262 udelay(5);
263}
264
256/* ULI526X network board routine ---------------------------- */ 265/* ULI526X network board routine ---------------------------- */
257 266
258static const struct net_device_ops netdev_ops = { 267static const struct net_device_ops netdev_ops = {
@@ -277,6 +286,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
277{ 286{
278 struct uli526x_board_info *db; /* board information structure */ 287 struct uli526x_board_info *db; /* board information structure */
279 struct net_device *dev; 288 struct net_device *dev;
289 void __iomem *ioaddr;
280 int i, err; 290 int i, err;
281 291
282 ULI526X_DBUG(0, "uli526x_init_one()", 0); 292 ULI526X_DBUG(0, "uli526x_init_one()", 0);
@@ -313,9 +323,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
313 goto err_out_disable; 323 goto err_out_disable;
314 } 324 }
315 325
316 if (pci_request_regions(pdev, DRV_NAME)) { 326 err = pci_request_regions(pdev, DRV_NAME);
327 if (err < 0) {
317 pr_err("Failed to request PCI regions\n"); 328 pr_err("Failed to request PCI regions\n");
318 err = -ENODEV;
319 goto err_out_disable; 329 goto err_out_disable;
320 } 330 }
321 331
@@ -323,32 +333,41 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
323 db = netdev_priv(dev); 333 db = netdev_priv(dev);
324 334
325 /* Allocate Tx/Rx descriptor memory */ 335 /* Allocate Tx/Rx descriptor memory */
336 err = -ENOMEM;
337
326 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); 338 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
327 if(db->desc_pool_ptr == NULL) 339 if (!db->desc_pool_ptr)
328 { 340 goto err_out_release;
329 err = -ENOMEM; 341
330 goto err_out_nomem;
331 }
332 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); 342 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
333 if(db->buf_pool_ptr == NULL) 343 if (!db->buf_pool_ptr)
334 { 344 goto err_out_free_tx_desc;
335 err = -ENOMEM;
336 goto err_out_nomem;
337 }
338 345
339 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; 346 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
340 db->first_tx_desc_dma = db->desc_pool_dma_ptr; 347 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
341 db->buf_pool_start = db->buf_pool_ptr; 348 db->buf_pool_start = db->buf_pool_ptr;
342 db->buf_pool_dma_start = db->buf_pool_dma_ptr; 349 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
343 350
344 db->chip_id = ent->driver_data; 351 switch (ent->driver_data) {
345 db->ioaddr = pci_resource_start(pdev, 0); 352 case PCI_ULI5263_ID:
353 db->phy.write = phy_writeby_cr10;
354 db->phy.read = phy_readby_cr10;
355 break;
356 default:
357 db->phy.write = phy_writeby_cr9;
358 db->phy.read = phy_readby_cr9;
359 break;
360 }
361
362 /* IO region. */
363 ioaddr = pci_iomap(pdev, 0, 0);
364 if (!ioaddr)
365 goto err_out_free_tx_buf;
346 366
367 db->ioaddr = ioaddr;
347 db->pdev = pdev; 368 db->pdev = pdev;
348 db->init = 1; 369 db->init = 1;
349 370
350 dev->base_addr = db->ioaddr;
351 dev->irq = pdev->irq;
352 pci_set_drvdata(pdev, dev); 371 pci_set_drvdata(pdev, dev);
353 372
354 /* Register some necessary functions */ 373 /* Register some necessary functions */
@@ -360,24 +379,24 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
360 379
361 /* read 64 word srom data */ 380 /* read 64 word srom data */
362 for (i = 0; i < 64; i++) 381 for (i = 0; i < 64; i++)
363 ((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i)); 382 ((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db, i));
364 383
365 /* Set Node address */ 384 /* Set Node address */
366 if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0) /* SROM absent, so read MAC address from ID Table */ 385 if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0) /* SROM absent, so read MAC address from ID Table */
367 { 386 {
368 outl(0x10000, db->ioaddr + DCR0); //Diagnosis mode 387 uw32(DCR0, 0x10000); //Diagnosis mode
369 outl(0x1c0, db->ioaddr + DCR13); //Reset dianostic pointer port 388 uw32(DCR13, 0x1c0); //Reset dianostic pointer port
370 outl(0, db->ioaddr + DCR14); //Clear reset port 389 uw32(DCR14, 0); //Clear reset port
371 outl(0x10, db->ioaddr + DCR14); //Reset ID Table pointer 390 uw32(DCR14, 0x10); //Reset ID Table pointer
372 outl(0, db->ioaddr + DCR14); //Clear reset port 391 uw32(DCR14, 0); //Clear reset port
373 outl(0, db->ioaddr + DCR13); //Clear CR13 392 uw32(DCR13, 0); //Clear CR13
374 outl(0x1b0, db->ioaddr + DCR13); //Select ID Table access port 393 uw32(DCR13, 0x1b0); //Select ID Table access port
375 //Read MAC address from CR14 394 //Read MAC address from CR14
376 for (i = 0; i < 6; i++) 395 for (i = 0; i < 6; i++)
377 dev->dev_addr[i] = inl(db->ioaddr + DCR14); 396 dev->dev_addr[i] = ur32(DCR14);
378 //Read end 397 //Read end
379 outl(0, db->ioaddr + DCR13); //Clear CR13 398 uw32(DCR13, 0); //Clear CR13
380 outl(0, db->ioaddr + DCR0); //Clear CR0 399 uw32(DCR0, 0); //Clear CR0
381 udelay(10); 400 udelay(10);
382 } 401 }
383 else /*Exist SROM*/ 402 else /*Exist SROM*/
@@ -387,26 +406,26 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
387 } 406 }
388 err = register_netdev (dev); 407 err = register_netdev (dev);
389 if (err) 408 if (err)
390 goto err_out_res; 409 goto err_out_unmap;
391 410
392 netdev_info(dev, "ULi M%04lx at pci%s, %pM, irq %d\n", 411 netdev_info(dev, "ULi M%04lx at pci%s, %pM, irq %d\n",
393 ent->driver_data >> 16, pci_name(pdev), 412 ent->driver_data >> 16, pci_name(pdev),
394 dev->dev_addr, dev->irq); 413 dev->dev_addr, pdev->irq);
395 414
396 pci_set_master(pdev); 415 pci_set_master(pdev);
397 416
398 return 0; 417 return 0;
399 418
400err_out_res: 419err_out_unmap:
420 pci_iounmap(pdev, db->ioaddr);
421err_out_free_tx_buf:
422 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
423 db->buf_pool_ptr, db->buf_pool_dma_ptr);
424err_out_free_tx_desc:
425 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
426 db->desc_pool_ptr, db->desc_pool_dma_ptr);
427err_out_release:
401 pci_release_regions(pdev); 428 pci_release_regions(pdev);
402err_out_nomem:
403 if(db->desc_pool_ptr)
404 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
405 db->desc_pool_ptr, db->desc_pool_dma_ptr);
406
407 if(db->buf_pool_ptr != NULL)
408 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
409 db->buf_pool_ptr, db->buf_pool_dma_ptr);
410err_out_disable: 429err_out_disable:
411 pci_disable_device(pdev); 430 pci_disable_device(pdev);
412err_out_free: 431err_out_free:
@@ -422,19 +441,17 @@ static void __devexit uli526x_remove_one (struct pci_dev *pdev)
422 struct net_device *dev = pci_get_drvdata(pdev); 441 struct net_device *dev = pci_get_drvdata(pdev);
423 struct uli526x_board_info *db = netdev_priv(dev); 442 struct uli526x_board_info *db = netdev_priv(dev);
424 443
425 ULI526X_DBUG(0, "uli526x_remove_one()", 0); 444 unregister_netdev(dev);
426 445 pci_iounmap(pdev, db->ioaddr);
427 pci_free_consistent(db->pdev, sizeof(struct tx_desc) * 446 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
428 DESC_ALL_CNT + 0x20, db->desc_pool_ptr, 447 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
429 db->desc_pool_dma_ptr); 448 db->desc_pool_dma_ptr);
430 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, 449 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
431 db->buf_pool_ptr, db->buf_pool_dma_ptr); 450 db->buf_pool_ptr, db->buf_pool_dma_ptr);
432 unregister_netdev(dev);
433 pci_release_regions(pdev); 451 pci_release_regions(pdev);
434 free_netdev(dev); /* free board information */
435 pci_set_drvdata(pdev, NULL);
436 pci_disable_device(pdev); 452 pci_disable_device(pdev);
437 ULI526X_DBUG(0, "uli526x_remove_one() exit", 0); 453 pci_set_drvdata(pdev, NULL);
454 free_netdev(dev);
438} 455}
439 456
440 457
@@ -468,7 +485,8 @@ static int uli526x_open(struct net_device *dev)
468 /* Initialize ULI526X board */ 485 /* Initialize ULI526X board */
469 uli526x_init(dev); 486 uli526x_init(dev);
470 487
471 ret = request_irq(dev->irq, uli526x_interrupt, IRQF_SHARED, dev->name, dev); 488 ret = request_irq(db->pdev->irq, uli526x_interrupt, IRQF_SHARED,
489 dev->name, dev);
472 if (ret) 490 if (ret)
473 return ret; 491 return ret;
474 492
@@ -496,57 +514,57 @@ static int uli526x_open(struct net_device *dev)
496static void uli526x_init(struct net_device *dev) 514static void uli526x_init(struct net_device *dev)
497{ 515{
498 struct uli526x_board_info *db = netdev_priv(dev); 516 struct uli526x_board_info *db = netdev_priv(dev);
499 unsigned long ioaddr = db->ioaddr; 517 struct uli_phy_ops *phy = &db->phy;
518 void __iomem *ioaddr = db->ioaddr;
500 u8 phy_tmp; 519 u8 phy_tmp;
501 u8 timeout; 520 u8 timeout;
502 u16 phy_value;
503 u16 phy_reg_reset; 521 u16 phy_reg_reset;
504 522
505 523
506 ULI526X_DBUG(0, "uli526x_init()", 0); 524 ULI526X_DBUG(0, "uli526x_init()", 0);
507 525
508 /* Reset M526x MAC controller */ 526 /* Reset M526x MAC controller */
509 outl(ULI526X_RESET, ioaddr + DCR0); /* RESET MAC */ 527 uw32(DCR0, ULI526X_RESET); /* RESET MAC */
510 udelay(100); 528 udelay(100);
511 outl(db->cr0_data, ioaddr + DCR0); 529 uw32(DCR0, db->cr0_data);
512 udelay(5); 530 udelay(5);
513 531
514 /* Phy addr : In some boards,M5261/M5263 phy address != 1 */ 532 /* Phy addr : In some boards,M5261/M5263 phy address != 1 */
515 db->phy_addr = 1; 533 db->phy_addr = 1;
516 for(phy_tmp=0;phy_tmp<32;phy_tmp++) 534 for (phy_tmp = 0; phy_tmp < 32; phy_tmp++) {
517 { 535 u16 phy_value;
518 phy_value=phy_read(db->ioaddr,phy_tmp,3,db->chip_id);//peer add 536
519 if(phy_value != 0xffff&&phy_value!=0) 537 phy_value = phy->read(db, phy_tmp, 3); //peer add
520 { 538 if (phy_value != 0xffff && phy_value != 0) {
521 db->phy_addr = phy_tmp; 539 db->phy_addr = phy_tmp;
522 break; 540 break;
523 } 541 }
524 } 542 }
525 if(phy_tmp == 32) 543
544 if (phy_tmp == 32)
526 pr_warn("Can not find the phy address!!!\n"); 545 pr_warn("Can not find the phy address!!!\n");
527 /* Parser SROM and media mode */ 546 /* Parser SROM and media mode */
528 db->media_mode = uli526x_media_mode; 547 db->media_mode = uli526x_media_mode;
529 548
530 /* phyxcer capability setting */ 549 /* phyxcer capability setting */
531 phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id); 550 phy_reg_reset = phy->read(db, db->phy_addr, 0);
532 phy_reg_reset = (phy_reg_reset | 0x8000); 551 phy_reg_reset = (phy_reg_reset | 0x8000);
533 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id); 552 phy->write(db, db->phy_addr, 0, phy_reg_reset);
534 553
535 /* See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management 554 /* See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management
536 * functions") or phy data sheet for details on phy reset 555 * functions") or phy data sheet for details on phy reset
537 */ 556 */
538 udelay(500); 557 udelay(500);
539 timeout = 10; 558 timeout = 10;
540 while (timeout-- && 559 while (timeout-- && phy->read(db, db->phy_addr, 0) & 0x8000)
541 phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id) & 0x8000) 560 udelay(100);
542 udelay(100);
543 561
544 /* Process Phyxcer Media Mode */ 562 /* Process Phyxcer Media Mode */
545 uli526x_set_phyxcer(db); 563 uli526x_set_phyxcer(db);
546 564
547 /* Media Mode Process */ 565 /* Media Mode Process */
548 if ( !(db->media_mode & ULI526X_AUTO) ) 566 if ( !(db->media_mode & ULI526X_AUTO) )
549 db->op_mode = db->media_mode; /* Force Mode */ 567 db->op_mode = db->media_mode; /* Force Mode */
550 568
551 /* Initialize Transmit/Receive decriptor and CR3/4 */ 569 /* Initialize Transmit/Receive decriptor and CR3/4 */
552 uli526x_descriptor_init(dev, ioaddr); 570 uli526x_descriptor_init(dev, ioaddr);
@@ -559,10 +577,10 @@ static void uli526x_init(struct net_device *dev)
559 577
560 /* Init CR7, interrupt active bit */ 578 /* Init CR7, interrupt active bit */
561 db->cr7_data = CR7_DEFAULT; 579 db->cr7_data = CR7_DEFAULT;
562 outl(db->cr7_data, ioaddr + DCR7); 580 uw32(DCR7, db->cr7_data);
563 581
564 /* Init CR15, Tx jabber and Rx watchdog timer */ 582 /* Init CR15, Tx jabber and Rx watchdog timer */
565 outl(db->cr15_data, ioaddr + DCR15); 583 uw32(DCR15, db->cr15_data);
566 584
567 /* Enable ULI526X Tx/Rx function */ 585 /* Enable ULI526X Tx/Rx function */
568 db->cr6_data |= CR6_RXSC | CR6_TXSC; 586 db->cr6_data |= CR6_RXSC | CR6_TXSC;
@@ -579,6 +597,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
579 struct net_device *dev) 597 struct net_device *dev)
580{ 598{
581 struct uli526x_board_info *db = netdev_priv(dev); 599 struct uli526x_board_info *db = netdev_priv(dev);
600 void __iomem *ioaddr = db->ioaddr;
582 struct tx_desc *txptr; 601 struct tx_desc *txptr;
583 unsigned long flags; 602 unsigned long flags;
584 603
@@ -604,7 +623,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
604 } 623 }
605 624
606 /* Disable NIC interrupt */ 625 /* Disable NIC interrupt */
607 outl(0, dev->base_addr + DCR7); 626 uw32(DCR7, 0);
608 627
609 /* transmit this packet */ 628 /* transmit this packet */
610 txptr = db->tx_insert_ptr; 629 txptr = db->tx_insert_ptr;
@@ -615,10 +634,10 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
615 db->tx_insert_ptr = txptr->next_tx_desc; 634 db->tx_insert_ptr = txptr->next_tx_desc;
616 635
617 /* Transmit Packet Process */ 636 /* Transmit Packet Process */
618 if ( (db->tx_packet_cnt < TX_DESC_CNT) ) { 637 if (db->tx_packet_cnt < TX_DESC_CNT) {
619 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ 638 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
620 db->tx_packet_cnt++; /* Ready to send */ 639 db->tx_packet_cnt++; /* Ready to send */
621 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ 640 uw32(DCR1, 0x1); /* Issue Tx polling */
622 dev->trans_start = jiffies; /* saved time stamp */ 641 dev->trans_start = jiffies; /* saved time stamp */
623 } 642 }
624 643
@@ -628,7 +647,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
628 647
629 /* Restore CR7 to enable interrupt */ 648 /* Restore CR7 to enable interrupt */
630 spin_unlock_irqrestore(&db->lock, flags); 649 spin_unlock_irqrestore(&db->lock, flags);
631 outl(db->cr7_data, dev->base_addr + DCR7); 650 uw32(DCR7, db->cr7_data);
632 651
633 /* free this SKB */ 652 /* free this SKB */
634 dev_kfree_skb(skb); 653 dev_kfree_skb(skb);
@@ -645,9 +664,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
645static int uli526x_stop(struct net_device *dev) 664static int uli526x_stop(struct net_device *dev)
646{ 665{
647 struct uli526x_board_info *db = netdev_priv(dev); 666 struct uli526x_board_info *db = netdev_priv(dev);
648 unsigned long ioaddr = dev->base_addr; 667 void __iomem *ioaddr = db->ioaddr;
649
650 ULI526X_DBUG(0, "uli526x_stop", 0);
651 668
652 /* disable system */ 669 /* disable system */
653 netif_stop_queue(dev); 670 netif_stop_queue(dev);
@@ -656,12 +673,12 @@ static int uli526x_stop(struct net_device *dev)
656 del_timer_sync(&db->timer); 673 del_timer_sync(&db->timer);
657 674
658 /* Reset & stop ULI526X board */ 675 /* Reset & stop ULI526X board */
659 outl(ULI526X_RESET, ioaddr + DCR0); 676 uw32(DCR0, ULI526X_RESET);
660 udelay(5); 677 udelay(5);
661 phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id); 678 db->phy.write(db, db->phy_addr, 0, 0x8000);
662 679
663 /* free interrupt */ 680 /* free interrupt */
664 free_irq(dev->irq, dev); 681 free_irq(db->pdev->irq, dev);
665 682
666 /* free allocated rx buffer */ 683 /* free allocated rx buffer */
667 uli526x_free_rxbuffer(db); 684 uli526x_free_rxbuffer(db);
@@ -679,18 +696,18 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
679{ 696{
680 struct net_device *dev = dev_id; 697 struct net_device *dev = dev_id;
681 struct uli526x_board_info *db = netdev_priv(dev); 698 struct uli526x_board_info *db = netdev_priv(dev);
682 unsigned long ioaddr = dev->base_addr; 699 void __iomem *ioaddr = db->ioaddr;
683 unsigned long flags; 700 unsigned long flags;
684 701
685 spin_lock_irqsave(&db->lock, flags); 702 spin_lock_irqsave(&db->lock, flags);
686 outl(0, ioaddr + DCR7); 703 uw32(DCR7, 0);
687 704
688 /* Got ULI526X status */ 705 /* Got ULI526X status */
689 db->cr5_data = inl(ioaddr + DCR5); 706 db->cr5_data = ur32(DCR5);
690 outl(db->cr5_data, ioaddr + DCR5); 707 uw32(DCR5, db->cr5_data);
691 if ( !(db->cr5_data & 0x180c1) ) { 708 if ( !(db->cr5_data & 0x180c1) ) {
692 /* Restore CR7 to enable interrupt mask */ 709 /* Restore CR7 to enable interrupt mask */
693 outl(db->cr7_data, ioaddr + DCR7); 710 uw32(DCR7, db->cr7_data);
694 spin_unlock_irqrestore(&db->lock, flags); 711 spin_unlock_irqrestore(&db->lock, flags);
695 return IRQ_HANDLED; 712 return IRQ_HANDLED;
696 } 713 }
@@ -718,7 +735,7 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
718 uli526x_free_tx_pkt(dev, db); 735 uli526x_free_tx_pkt(dev, db);
719 736
720 /* Restore CR7 to enable interrupt mask */ 737 /* Restore CR7 to enable interrupt mask */
721 outl(db->cr7_data, ioaddr + DCR7); 738 uw32(DCR7, db->cr7_data);
722 739
723 spin_unlock_irqrestore(&db->lock, flags); 740 spin_unlock_irqrestore(&db->lock, flags);
724 return IRQ_HANDLED; 741 return IRQ_HANDLED;
@@ -727,8 +744,10 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
727#ifdef CONFIG_NET_POLL_CONTROLLER 744#ifdef CONFIG_NET_POLL_CONTROLLER
728static void uli526x_poll(struct net_device *dev) 745static void uli526x_poll(struct net_device *dev)
729{ 746{
747 struct uli526x_board_info *db = netdev_priv(dev);
748
730 /* ISR grabs the irqsave lock, so this should be safe */ 749 /* ISR grabs the irqsave lock, so this should be safe */
731 uli526x_interrupt(dev->irq, dev); 750 uli526x_interrupt(db->pdev->irq, dev);
732} 751}
733#endif 752#endif
734 753
@@ -962,12 +981,7 @@ static void netdev_get_drvinfo(struct net_device *dev,
962 981
963 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 982 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
964 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 983 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
965 if (np->pdev) 984 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
966 strlcpy(info->bus_info, pci_name(np->pdev),
967 sizeof(info->bus_info));
968 else
969 sprintf(info->bus_info, "EISA 0x%lx %d",
970 dev->base_addr, dev->irq);
971} 985}
972 986
973static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { 987static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
@@ -1007,18 +1021,20 @@ static const struct ethtool_ops netdev_ethtool_ops = {
1007 1021
1008static void uli526x_timer(unsigned long data) 1022static void uli526x_timer(unsigned long data)
1009{ 1023{
1010 u32 tmp_cr8;
1011 unsigned char tmp_cr12=0;
1012 struct net_device *dev = (struct net_device *) data; 1024 struct net_device *dev = (struct net_device *) data;
1013 struct uli526x_board_info *db = netdev_priv(dev); 1025 struct uli526x_board_info *db = netdev_priv(dev);
1026 struct uli_phy_ops *phy = &db->phy;
1027 void __iomem *ioaddr = db->ioaddr;
1014 unsigned long flags; 1028 unsigned long flags;
1029 u8 tmp_cr12 = 0;
1030 u32 tmp_cr8;
1015 1031
1016 //ULI526X_DBUG(0, "uli526x_timer()", 0); 1032 //ULI526X_DBUG(0, "uli526x_timer()", 0);
1017 spin_lock_irqsave(&db->lock, flags); 1033 spin_lock_irqsave(&db->lock, flags);
1018 1034
1019 1035
1020 /* Dynamic reset ULI526X : system error or transmit time-out */ 1036 /* Dynamic reset ULI526X : system error or transmit time-out */
1021 tmp_cr8 = inl(db->ioaddr + DCR8); 1037 tmp_cr8 = ur32(DCR8);
1022 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) { 1038 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1023 db->reset_cr8++; 1039 db->reset_cr8++;
1024 db->wait_reset = 1; 1040 db->wait_reset = 1;
@@ -1028,7 +1044,7 @@ static void uli526x_timer(unsigned long data)
1028 /* TX polling kick monitor */ 1044 /* TX polling kick monitor */
1029 if ( db->tx_packet_cnt && 1045 if ( db->tx_packet_cnt &&
1030 time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) { 1046 time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) {
1031 outl(0x1, dev->base_addr + DCR1); // Tx polling again 1047 uw32(DCR1, 0x1); // Tx polling again
1032 1048
1033 // TX Timeout 1049 // TX Timeout
1034 if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) { 1050 if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) {
@@ -1049,7 +1065,7 @@ static void uli526x_timer(unsigned long data)
1049 } 1065 }
1050 1066
1051 /* Link status check, Dynamic media type change */ 1067 /* Link status check, Dynamic media type change */
1052 if((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)!=0) 1068 if ((phy->read(db, db->phy_addr, 5) & 0x01e0)!=0)
1053 tmp_cr12 = 3; 1069 tmp_cr12 = 3;
1054 1070
1055 if ( !(tmp_cr12 & 0x3) && !db->link_failed ) { 1071 if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
@@ -1062,7 +1078,7 @@ static void uli526x_timer(unsigned long data)
1062 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ 1078 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1063 /* AUTO don't need */ 1079 /* AUTO don't need */
1064 if ( !(db->media_mode & 0x8) ) 1080 if ( !(db->media_mode & 0x8) )
1065 phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); 1081 phy->write(db, db->phy_addr, 0, 0x1000);
1066 1082
1067 /* AUTO mode, if INT phyxcer link failed, select EXT device */ 1083 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1068 if (db->media_mode & ULI526X_AUTO) { 1084 if (db->media_mode & ULI526X_AUTO) {
@@ -1119,12 +1135,13 @@ static void uli526x_timer(unsigned long data)
1119static void uli526x_reset_prepare(struct net_device *dev) 1135static void uli526x_reset_prepare(struct net_device *dev)
1120{ 1136{
1121 struct uli526x_board_info *db = netdev_priv(dev); 1137 struct uli526x_board_info *db = netdev_priv(dev);
1138 void __iomem *ioaddr = db->ioaddr;
1122 1139
1123 /* Sopt MAC controller */ 1140 /* Sopt MAC controller */
1124 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */ 1141 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1125 update_cr6(db->cr6_data, dev->base_addr); 1142 update_cr6(db->cr6_data, ioaddr);
1126 outl(0, dev->base_addr + DCR7); /* Disable Interrupt */ 1143 uw32(DCR7, 0); /* Disable Interrupt */
1127 outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5); 1144 uw32(DCR5, ur32(DCR5));
1128 1145
1129 /* Disable upper layer interface */ 1146 /* Disable upper layer interface */
1130 netif_stop_queue(dev); 1147 netif_stop_queue(dev);
@@ -1289,7 +1306,7 @@ static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * sk
1289 * Using Chain structure, and allocate Tx/Rx buffer 1306 * Using Chain structure, and allocate Tx/Rx buffer
1290 */ 1307 */
1291 1308
1292static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr) 1309static void uli526x_descriptor_init(struct net_device *dev, void __iomem *ioaddr)
1293{ 1310{
1294 struct uli526x_board_info *db = netdev_priv(dev); 1311 struct uli526x_board_info *db = netdev_priv(dev);
1295 struct tx_desc *tmp_tx; 1312 struct tx_desc *tmp_tx;
@@ -1304,14 +1321,14 @@ static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr
1304 /* tx descriptor start pointer */ 1321 /* tx descriptor start pointer */
1305 db->tx_insert_ptr = db->first_tx_desc; 1322 db->tx_insert_ptr = db->first_tx_desc;
1306 db->tx_remove_ptr = db->first_tx_desc; 1323 db->tx_remove_ptr = db->first_tx_desc;
1307 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */ 1324 uw32(DCR4, db->first_tx_desc_dma); /* TX DESC address */
1308 1325
1309 /* rx descriptor start pointer */ 1326 /* rx descriptor start pointer */
1310 db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT; 1327 db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
1311 db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT; 1328 db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
1312 db->rx_insert_ptr = db->first_rx_desc; 1329 db->rx_insert_ptr = db->first_rx_desc;
1313 db->rx_ready_ptr = db->first_rx_desc; 1330 db->rx_ready_ptr = db->first_rx_desc;
1314 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */ 1331 uw32(DCR3, db->first_rx_desc_dma); /* RX DESC address */
1315 1332
1316 /* Init Transmit chain */ 1333 /* Init Transmit chain */
1317 tmp_buf = db->buf_pool_start; 1334 tmp_buf = db->buf_pool_start;
@@ -1352,11 +1369,9 @@ static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr
1352 * Update CR6 value 1369 * Update CR6 value
1353 * Firstly stop ULI526X, then written value and start 1370 * Firstly stop ULI526X, then written value and start
1354 */ 1371 */
1355 1372static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
1356static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1357{ 1373{
1358 1374 uw32(DCR6, cr6_data);
1359 outl(cr6_data, ioaddr + DCR6);
1360 udelay(5); 1375 udelay(5);
1361} 1376}
1362 1377
@@ -1375,6 +1390,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1375static void send_filter_frame(struct net_device *dev, int mc_cnt) 1390static void send_filter_frame(struct net_device *dev, int mc_cnt)
1376{ 1391{
1377 struct uli526x_board_info *db = netdev_priv(dev); 1392 struct uli526x_board_info *db = netdev_priv(dev);
1393 void __iomem *ioaddr = db->ioaddr;
1378 struct netdev_hw_addr *ha; 1394 struct netdev_hw_addr *ha;
1379 struct tx_desc *txptr; 1395 struct tx_desc *txptr;
1380 u16 * addrptr; 1396 u16 * addrptr;
@@ -1420,9 +1436,9 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
1420 /* Resource Empty */ 1436 /* Resource Empty */
1421 db->tx_packet_cnt++; 1437 db->tx_packet_cnt++;
1422 txptr->tdes0 = cpu_to_le32(0x80000000); 1438 txptr->tdes0 = cpu_to_le32(0x80000000);
1423 update_cr6(db->cr6_data | 0x2000, dev->base_addr); 1439 update_cr6(db->cr6_data | 0x2000, ioaddr);
1424 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ 1440 uw32(DCR1, 0x1); /* Issue Tx polling */
1425 update_cr6(db->cr6_data, dev->base_addr); 1441 update_cr6(db->cr6_data, ioaddr);
1426 dev->trans_start = jiffies; 1442 dev->trans_start = jiffies;
1427 } else 1443 } else
1428 netdev_err(dev, "No Tx resource - Send_filter_frame!\n"); 1444 netdev_err(dev, "No Tx resource - Send_filter_frame!\n");
@@ -1465,37 +1481,38 @@ static void allocate_rx_buffer(struct net_device *dev)
1465 * Read one word data from the serial ROM 1481 * Read one word data from the serial ROM
1466 */ 1482 */
1467 1483
1468static u16 read_srom_word(long ioaddr, int offset) 1484static u16 read_srom_word(struct uli526x_board_info *db, int offset)
1469{ 1485{
1470 int i; 1486 void __iomem *ioaddr = db->ioaddr;
1471 u16 srom_data = 0; 1487 u16 srom_data = 0;
1472 long cr9_ioaddr = ioaddr + DCR9; 1488 int i;
1473 1489
1474 outl(CR9_SROM_READ, cr9_ioaddr); 1490 uw32(DCR9, CR9_SROM_READ);
1475 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); 1491 uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1476 1492
1477 /* Send the Read Command 110b */ 1493 /* Send the Read Command 110b */
1478 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); 1494 srom_clk_write(db, SROM_DATA_1);
1479 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); 1495 srom_clk_write(db, SROM_DATA_1);
1480 SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr); 1496 srom_clk_write(db, SROM_DATA_0);
1481 1497
1482 /* Send the offset */ 1498 /* Send the offset */
1483 for (i = 5; i >= 0; i--) { 1499 for (i = 5; i >= 0; i--) {
1484 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0; 1500 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1485 SROM_CLK_WRITE(srom_data, cr9_ioaddr); 1501 srom_clk_write(db, srom_data);
1486 } 1502 }
1487 1503
1488 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); 1504 uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1489 1505
1490 for (i = 16; i > 0; i--) { 1506 for (i = 16; i > 0; i--) {
1491 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr); 1507 uw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
1492 udelay(5); 1508 udelay(5);
1493 srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0); 1509 srom_data = (srom_data << 1) |
1494 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); 1510 ((ur32(DCR9) & CR9_CRDOUT) ? 1 : 0);
1511 uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1495 udelay(5); 1512 udelay(5);
1496 } 1513 }
1497 1514
1498 outl(CR9_SROM_READ, cr9_ioaddr); 1515 uw32(DCR9, CR9_SROM_READ);
1499 return srom_data; 1516 return srom_data;
1500} 1517}
1501 1518
@@ -1506,15 +1523,16 @@ static u16 read_srom_word(long ioaddr, int offset)
1506 1523
1507static u8 uli526x_sense_speed(struct uli526x_board_info * db) 1524static u8 uli526x_sense_speed(struct uli526x_board_info * db)
1508{ 1525{
1526 struct uli_phy_ops *phy = &db->phy;
1509 u8 ErrFlag = 0; 1527 u8 ErrFlag = 0;
1510 u16 phy_mode; 1528 u16 phy_mode;
1511 1529
1512 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); 1530 phy_mode = phy->read(db, db->phy_addr, 1);
1513 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); 1531 phy_mode = phy->read(db, db->phy_addr, 1);
1514 1532
1515 if ( (phy_mode & 0x24) == 0x24 ) { 1533 if ( (phy_mode & 0x24) == 0x24 ) {
1516 1534
1517 phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7); 1535 phy_mode = ((phy->read(db, db->phy_addr, 5) & 0x01e0)<<7);
1518 if(phy_mode&0x8000) 1536 if(phy_mode&0x8000)
1519 phy_mode = 0x8000; 1537 phy_mode = 0x8000;
1520 else if(phy_mode&0x4000) 1538 else if(phy_mode&0x4000)
@@ -1549,10 +1567,11 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db)
1549 1567
1550static void uli526x_set_phyxcer(struct uli526x_board_info *db) 1568static void uli526x_set_phyxcer(struct uli526x_board_info *db)
1551{ 1569{
1570 struct uli_phy_ops *phy = &db->phy;
1552 u16 phy_reg; 1571 u16 phy_reg;
1553 1572
1554 /* Phyxcer capability setting */ 1573 /* Phyxcer capability setting */
1555 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0; 1574 phy_reg = phy->read(db, db->phy_addr, 4) & ~0x01e0;
1556 1575
1557 if (db->media_mode & ULI526X_AUTO) { 1576 if (db->media_mode & ULI526X_AUTO) {
1558 /* AUTO Mode */ 1577 /* AUTO Mode */
@@ -1573,10 +1592,10 @@ static void uli526x_set_phyxcer(struct uli526x_board_info *db)
1573 phy_reg|=db->PHY_reg4; 1592 phy_reg|=db->PHY_reg4;
1574 db->media_mode|=ULI526X_AUTO; 1593 db->media_mode|=ULI526X_AUTO;
1575 } 1594 }
1576 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id); 1595 phy->write(db, db->phy_addr, 4, phy_reg);
1577 1596
1578 /* Restart Auto-Negotiation */ 1597 /* Restart Auto-Negotiation */
1579 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id); 1598 phy->write(db, db->phy_addr, 0, 0x1200);
1580 udelay(50); 1599 udelay(50);
1581} 1600}
1582 1601
@@ -1590,6 +1609,7 @@ static void uli526x_set_phyxcer(struct uli526x_board_info *db)
1590 1609
1591static void uli526x_process_mode(struct uli526x_board_info *db) 1610static void uli526x_process_mode(struct uli526x_board_info *db)
1592{ 1611{
1612 struct uli_phy_ops *phy = &db->phy;
1593 u16 phy_reg; 1613 u16 phy_reg;
1594 1614
1595 /* Full Duplex Mode Check */ 1615 /* Full Duplex Mode Check */
@@ -1601,10 +1621,10 @@ static void uli526x_process_mode(struct uli526x_board_info *db)
1601 update_cr6(db->cr6_data, db->ioaddr); 1621 update_cr6(db->cr6_data, db->ioaddr);
1602 1622
1603 /* 10/100M phyxcer force mode need */ 1623 /* 10/100M phyxcer force mode need */
1604 if ( !(db->media_mode & 0x8)) { 1624 if (!(db->media_mode & 0x8)) {
1605 /* Forece Mode */ 1625 /* Forece Mode */
1606 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id); 1626 phy_reg = phy->read(db, db->phy_addr, 6);
1607 if ( !(phy_reg & 0x1) ) { 1627 if (!(phy_reg & 0x1)) {
1608 /* parter without N-Way capability */ 1628 /* parter without N-Way capability */
1609 phy_reg = 0x0; 1629 phy_reg = 0x0;
1610 switch(db->op_mode) { 1630 switch(db->op_mode) {
@@ -1613,148 +1633,126 @@ static void uli526x_process_mode(struct uli526x_board_info *db)
1613 case ULI526X_100MHF: phy_reg = 0x2000; break; 1633 case ULI526X_100MHF: phy_reg = 0x2000; break;
1614 case ULI526X_100MFD: phy_reg = 0x2100; break; 1634 case ULI526X_100MFD: phy_reg = 0x2100; break;
1615 } 1635 }
1616 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id); 1636 phy->write(db, db->phy_addr, 0, phy_reg);
1617 } 1637 }
1618 } 1638 }
1619} 1639}
1620 1640
1621 1641
1622/* 1642/* M5261/M5263 Chip */
1623 * Write a word to Phy register 1643static void phy_writeby_cr9(struct uli526x_board_info *db, u8 phy_addr,
1624 */ 1644 u8 offset, u16 phy_data)
1625
1626static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
1627{ 1645{
1628 u16 i; 1646 u16 i;
1629 unsigned long ioaddr;
1630
1631 if(chip_id == PCI_ULI5263_ID)
1632 {
1633 phy_writeby_cr10(iobase, phy_addr, offset, phy_data);
1634 return;
1635 }
1636 /* M5261/M5263 Chip */
1637 ioaddr = iobase + DCR9;
1638 1647
1639 /* Send 33 synchronization clock to Phy controller */ 1648 /* Send 33 synchronization clock to Phy controller */
1640 for (i = 0; i < 35; i++) 1649 for (i = 0; i < 35; i++)
1641 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1650 phy_write_1bit(db, PHY_DATA_1);
1642 1651
1643 /* Send start command(01) to Phy */ 1652 /* Send start command(01) to Phy */
1644 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); 1653 phy_write_1bit(db, PHY_DATA_0);
1645 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1654 phy_write_1bit(db, PHY_DATA_1);
1646 1655
1647 /* Send write command(01) to Phy */ 1656 /* Send write command(01) to Phy */
1648 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); 1657 phy_write_1bit(db, PHY_DATA_0);
1649 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1658 phy_write_1bit(db, PHY_DATA_1);
1650 1659
1651 /* Send Phy address */ 1660 /* Send Phy address */
1652 for (i = 0x10; i > 0; i = i >> 1) 1661 for (i = 0x10; i > 0; i = i >> 1)
1653 phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); 1662 phy_write_1bit(db, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1654 1663
1655 /* Send register address */ 1664 /* Send register address */
1656 for (i = 0x10; i > 0; i = i >> 1) 1665 for (i = 0x10; i > 0; i = i >> 1)
1657 phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); 1666 phy_write_1bit(db, offset & i ? PHY_DATA_1 : PHY_DATA_0);
1658 1667
1659 /* written trasnition */ 1668 /* written trasnition */
1660 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1669 phy_write_1bit(db, PHY_DATA_1);
1661 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); 1670 phy_write_1bit(db, PHY_DATA_0);
1662 1671
1663 /* Write a word data to PHY controller */ 1672 /* Write a word data to PHY controller */
1664 for ( i = 0x8000; i > 0; i >>= 1) 1673 for (i = 0x8000; i > 0; i >>= 1)
1665 phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); 1674 phy_write_1bit(db, phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1666
1667} 1675}
1668 1676
1669 1677static u16 phy_readby_cr9(struct uli526x_board_info *db, u8 phy_addr, u8 offset)
1670/*
1671 * Read a word data from phy register
1672 */
1673
1674static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1675{ 1678{
1676 int i;
1677 u16 phy_data; 1679 u16 phy_data;
1678 unsigned long ioaddr; 1680 int i;
1679
1680 if(chip_id == PCI_ULI5263_ID)
1681 return phy_readby_cr10(iobase, phy_addr, offset);
1682 /* M5261/M5263 Chip */
1683 ioaddr = iobase + DCR9;
1684 1681
1685 /* Send 33 synchronization clock to Phy controller */ 1682 /* Send 33 synchronization clock to Phy controller */
1686 for (i = 0; i < 35; i++) 1683 for (i = 0; i < 35; i++)
1687 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1684 phy_write_1bit(db, PHY_DATA_1);
1688 1685
1689 /* Send start command(01) to Phy */ 1686 /* Send start command(01) to Phy */
1690 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); 1687 phy_write_1bit(db, PHY_DATA_0);
1691 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1688 phy_write_1bit(db, PHY_DATA_1);
1692 1689
1693 /* Send read command(10) to Phy */ 1690 /* Send read command(10) to Phy */
1694 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1691 phy_write_1bit(db, PHY_DATA_1);
1695 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); 1692 phy_write_1bit(db, PHY_DATA_0);
1696 1693
1697 /* Send Phy address */ 1694 /* Send Phy address */
1698 for (i = 0x10; i > 0; i = i >> 1) 1695 for (i = 0x10; i > 0; i = i >> 1)
1699 phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); 1696 phy_write_1bit(db, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1700 1697
1701 /* Send register address */ 1698 /* Send register address */
1702 for (i = 0x10; i > 0; i = i >> 1) 1699 for (i = 0x10; i > 0; i = i >> 1)
1703 phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); 1700 phy_write_1bit(db, offset & i ? PHY_DATA_1 : PHY_DATA_0);
1704 1701
1705 /* Skip transition state */ 1702 /* Skip transition state */
1706 phy_read_1bit(ioaddr, chip_id); 1703 phy_read_1bit(db);
1707 1704
1708 /* read 16bit data */ 1705 /* read 16bit data */
1709 for (phy_data = 0, i = 0; i < 16; i++) { 1706 for (phy_data = 0, i = 0; i < 16; i++) {
1710 phy_data <<= 1; 1707 phy_data <<= 1;
1711 phy_data |= phy_read_1bit(ioaddr, chip_id); 1708 phy_data |= phy_read_1bit(db);
1712 } 1709 }
1713 1710
1714 return phy_data; 1711 return phy_data;
1715} 1712}
1716 1713
1717static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset) 1714static u16 phy_readby_cr10(struct uli526x_board_info *db, u8 phy_addr,
1715 u8 offset)
1718{ 1716{
1719 unsigned long ioaddr,cr10_value; 1717 void __iomem *ioaddr = db->ioaddr;
1718 u32 cr10_value = phy_addr;
1720 1719
1721 ioaddr = iobase + DCR10; 1720 cr10_value = (cr10_value << 5) + offset;
1722 cr10_value = phy_addr; 1721 cr10_value = (cr10_value << 16) + 0x08000000;
1723 cr10_value = (cr10_value<<5) + offset; 1722 uw32(DCR10, cr10_value);
1724 cr10_value = (cr10_value<<16) + 0x08000000;
1725 outl(cr10_value,ioaddr);
1726 udelay(1); 1723 udelay(1);
1727 while(1) 1724 while (1) {
1728 { 1725 cr10_value = ur32(DCR10);
1729 cr10_value = inl(ioaddr); 1726 if (cr10_value & 0x10000000)
1730 if(cr10_value&0x10000000)
1731 break; 1727 break;
1732 } 1728 }
1733 return cr10_value & 0x0ffff; 1729 return cr10_value & 0x0ffff;
1734} 1730}
1735 1731
1736static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data) 1732static void phy_writeby_cr10(struct uli526x_board_info *db, u8 phy_addr,
1733 u8 offset, u16 phy_data)
1737{ 1734{
1738 unsigned long ioaddr,cr10_value; 1735 void __iomem *ioaddr = db->ioaddr;
1736 u32 cr10_value = phy_addr;
1739 1737
1740 ioaddr = iobase + DCR10; 1738 cr10_value = (cr10_value << 5) + offset;
1741 cr10_value = phy_addr; 1739 cr10_value = (cr10_value << 16) + 0x04000000 + phy_data;
1742 cr10_value = (cr10_value<<5) + offset; 1740 uw32(DCR10, cr10_value);
1743 cr10_value = (cr10_value<<16) + 0x04000000 + phy_data;
1744 outl(cr10_value,ioaddr);
1745 udelay(1); 1741 udelay(1);
1746} 1742}
1747/* 1743/*
1748 * Write one bit data to Phy Controller 1744 * Write one bit data to Phy Controller
1749 */ 1745 */
1750 1746
1751static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id) 1747static void phy_write_1bit(struct uli526x_board_info *db, u32 data)
1752{ 1748{
1753 outl(phy_data , ioaddr); /* MII Clock Low */ 1749 void __iomem *ioaddr = db->ioaddr;
1750
1751 uw32(DCR9, data); /* MII Clock Low */
1754 udelay(1); 1752 udelay(1);
1755 outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */ 1753 uw32(DCR9, data | MDCLKH); /* MII Clock High */
1756 udelay(1); 1754 udelay(1);
1757 outl(phy_data , ioaddr); /* MII Clock Low */ 1755 uw32(DCR9, data); /* MII Clock Low */
1758 udelay(1); 1756 udelay(1);
1759} 1757}
1760 1758
@@ -1763,14 +1761,15 @@ static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id)
1763 * Read one bit phy data from PHY controller 1761 * Read one bit phy data from PHY controller
1764 */ 1762 */
1765 1763
1766static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id) 1764static u16 phy_read_1bit(struct uli526x_board_info *db)
1767{ 1765{
1766 void __iomem *ioaddr = db->ioaddr;
1768 u16 phy_data; 1767 u16 phy_data;
1769 1768
1770 outl(0x50000 , ioaddr); 1769 uw32(DCR9, 0x50000);
1771 udelay(1); 1770 udelay(1);
1772 phy_data = ( inl(ioaddr) >> 19 ) & 0x1; 1771 phy_data = (ur32(DCR9) >> 19) & 0x1;
1773 outl(0x40000 , ioaddr); 1772 uw32(DCR9, 0x40000);
1774 udelay(1); 1773 udelay(1);
1775 1774
1776 return phy_data; 1775 return phy_data;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 2ac6fff0363a..4d1ffca83c82 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -400,9 +400,6 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
400 No hold time required! */ 400 No hold time required! */
401 iowrite32(0x00000001, ioaddr + PCIBusCfg); 401 iowrite32(0x00000001, ioaddr + PCIBusCfg);
402 402
403 dev->base_addr = (unsigned long)ioaddr;
404 dev->irq = irq;
405
406 np = netdev_priv(dev); 403 np = netdev_priv(dev);
407 np->pci_dev = pdev; 404 np->pci_dev = pdev;
408 np->chip_id = chip_idx; 405 np->chip_id = chip_idx;
@@ -635,17 +632,18 @@ static int netdev_open(struct net_device *dev)
635{ 632{
636 struct netdev_private *np = netdev_priv(dev); 633 struct netdev_private *np = netdev_priv(dev);
637 void __iomem *ioaddr = np->base_addr; 634 void __iomem *ioaddr = np->base_addr;
635 const int irq = np->pci_dev->irq;
638 int i; 636 int i;
639 637
640 iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */ 638 iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */
641 639
642 netif_device_detach(dev); 640 netif_device_detach(dev);
643 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); 641 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
644 if (i) 642 if (i)
645 goto out_err; 643 goto out_err;
646 644
647 if (debug > 1) 645 if (debug > 1)
648 netdev_dbg(dev, "w89c840_open() irq %d\n", dev->irq); 646 netdev_dbg(dev, "w89c840_open() irq %d\n", irq);
649 647
650 if((i=alloc_ringdesc(dev))) 648 if((i=alloc_ringdesc(dev)))
651 goto out_err; 649 goto out_err;
@@ -932,6 +930,7 @@ static void tx_timeout(struct net_device *dev)
932{ 930{
933 struct netdev_private *np = netdev_priv(dev); 931 struct netdev_private *np = netdev_priv(dev);
934 void __iomem *ioaddr = np->base_addr; 932 void __iomem *ioaddr = np->base_addr;
933 const int irq = np->pci_dev->irq;
935 934
936 dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n", 935 dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
937 ioread32(ioaddr + IntrStatus)); 936 ioread32(ioaddr + IntrStatus));
@@ -951,7 +950,7 @@ static void tx_timeout(struct net_device *dev)
951 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes); 950 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
952 printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C)); 951 printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
953 952
954 disable_irq(dev->irq); 953 disable_irq(irq);
955 spin_lock_irq(&np->lock); 954 spin_lock_irq(&np->lock);
956 /* 955 /*
957 * Under high load dirty_tx and the internal tx descriptor pointer 956 * Under high load dirty_tx and the internal tx descriptor pointer
@@ -966,7 +965,7 @@ static void tx_timeout(struct net_device *dev)
966 init_rxtx_rings(dev); 965 init_rxtx_rings(dev);
967 init_registers(dev); 966 init_registers(dev);
968 spin_unlock_irq(&np->lock); 967 spin_unlock_irq(&np->lock);
969 enable_irq(dev->irq); 968 enable_irq(irq);
970 969
971 netif_wake_queue(dev); 970 netif_wake_queue(dev);
972 dev->trans_start = jiffies; /* prevent tx timeout */ 971 dev->trans_start = jiffies; /* prevent tx timeout */
@@ -1500,7 +1499,7 @@ static int netdev_close(struct net_device *dev)
1500 iowrite32(0x0000, ioaddr + IntrEnable); 1499 iowrite32(0x0000, ioaddr + IntrEnable);
1501 spin_unlock_irq(&np->lock); 1500 spin_unlock_irq(&np->lock);
1502 1501
1503 free_irq(dev->irq, dev); 1502 free_irq(np->pci_dev->irq, dev);
1504 wmb(); 1503 wmb();
1505 netif_device_attach(dev); 1504 netif_device_attach(dev);
1506 1505
@@ -1589,7 +1588,7 @@ static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
1589 iowrite32(0, ioaddr + IntrEnable); 1588 iowrite32(0, ioaddr + IntrEnable);
1590 spin_unlock_irq(&np->lock); 1589 spin_unlock_irq(&np->lock);
1591 1590
1592 synchronize_irq(dev->irq); 1591 synchronize_irq(np->pci_dev->irq);
1593 netif_tx_disable(dev); 1592 netif_tx_disable(dev);
1594 1593
1595 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; 1594 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index fdb329fe6e8e..138bf83bc98e 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -41,7 +41,9 @@ MODULE_DESCRIPTION("Xircom Cardbus ethernet driver");
41MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>"); 41MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>");
42MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
43 43
44 44#define xw32(reg, val) iowrite32(val, ioaddr + (reg))
45#define xr32(reg) ioread32(ioaddr + (reg))
46#define xr8(reg) ioread8(ioaddr + (reg))
45 47
46/* IO registers on the card, offsets */ 48/* IO registers on the card, offsets */
47#define CSR0 0x00 49#define CSR0 0x00
@@ -83,7 +85,7 @@ struct xircom_private {
83 85
84 struct sk_buff *tx_skb[4]; 86 struct sk_buff *tx_skb[4];
85 87
86 unsigned long io_port; 88 void __iomem *ioaddr;
87 int open; 89 int open;
88 90
89 /* transmit_used is the rotating counter that indicates which transmit 91 /* transmit_used is the rotating counter that indicates which transmit
@@ -137,7 +139,7 @@ static int link_status(struct xircom_private *card);
137 139
138 140
139static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = { 141static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = {
140 {0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,}, 142 { PCI_VDEVICE(XIRCOM, 0x0003), },
141 {0,}, 143 {0,},
142}; 144};
143MODULE_DEVICE_TABLE(pci, xircom_pci_table); 145MODULE_DEVICE_TABLE(pci, xircom_pci_table);
@@ -146,9 +148,7 @@ static struct pci_driver xircom_ops = {
146 .name = "xircom_cb", 148 .name = "xircom_cb",
147 .id_table = xircom_pci_table, 149 .id_table = xircom_pci_table,
148 .probe = xircom_probe, 150 .probe = xircom_probe,
149 .remove = xircom_remove, 151 .remove = __devexit_p(xircom_remove),
150 .suspend =NULL,
151 .resume =NULL
152}; 152};
153 153
154 154
@@ -192,15 +192,18 @@ static const struct net_device_ops netdev_ops = {
192 */ 192 */
193static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id) 193static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
194{ 194{
195 struct device *d = &pdev->dev;
195 struct net_device *dev = NULL; 196 struct net_device *dev = NULL;
196 struct xircom_private *private; 197 struct xircom_private *private;
197 unsigned long flags; 198 unsigned long flags;
198 unsigned short tmp16; 199 unsigned short tmp16;
200 int rc;
199 201
200 /* First do the PCI initialisation */ 202 /* First do the PCI initialisation */
201 203
202 if (pci_enable_device(pdev)) 204 rc = pci_enable_device(pdev);
203 return -ENODEV; 205 if (rc < 0)
206 goto out;
204 207
205 /* disable all powermanagement */ 208 /* disable all powermanagement */
206 pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000); 209 pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000);
@@ -211,11 +214,13 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
211 pci_read_config_word (pdev,PCI_STATUS, &tmp16); 214 pci_read_config_word (pdev,PCI_STATUS, &tmp16);
212 pci_write_config_word (pdev, PCI_STATUS,tmp16); 215 pci_write_config_word (pdev, PCI_STATUS,tmp16);
213 216
214 if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) { 217 rc = pci_request_regions(pdev, "xircom_cb");
218 if (rc < 0) {
215 pr_err("%s: failed to allocate io-region\n", __func__); 219 pr_err("%s: failed to allocate io-region\n", __func__);
216 return -ENODEV; 220 goto err_disable;
217 } 221 }
218 222
223 rc = -ENOMEM;
219 /* 224 /*
220 Before changing the hardware, allocate the memory. 225 Before changing the hardware, allocate the memory.
221 This way, we can fail gracefully if not enough memory 226 This way, we can fail gracefully if not enough memory
@@ -223,17 +228,21 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
223 */ 228 */
224 dev = alloc_etherdev(sizeof(struct xircom_private)); 229 dev = alloc_etherdev(sizeof(struct xircom_private));
225 if (!dev) 230 if (!dev)
226 goto device_fail; 231 goto err_release;
227 232
228 private = netdev_priv(dev); 233 private = netdev_priv(dev);
229 234
230 /* Allocate the send/receive buffers */ 235 /* Allocate the send/receive buffers */
231 private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle); 236 private->rx_buffer = dma_alloc_coherent(d, 8192,
237 &private->rx_dma_handle,
238 GFP_KERNEL);
232 if (private->rx_buffer == NULL) { 239 if (private->rx_buffer == NULL) {
233 pr_err("%s: no memory for rx buffer\n", __func__); 240 pr_err("%s: no memory for rx buffer\n", __func__);
234 goto rx_buf_fail; 241 goto rx_buf_fail;
235 } 242 }
236 private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle); 243 private->tx_buffer = dma_alloc_coherent(d, 8192,
244 &private->tx_dma_handle,
245 GFP_KERNEL);
237 if (private->tx_buffer == NULL) { 246 if (private->tx_buffer == NULL) {
238 pr_err("%s: no memory for tx buffer\n", __func__); 247 pr_err("%s: no memory for tx buffer\n", __func__);
239 goto tx_buf_fail; 248 goto tx_buf_fail;
@@ -244,10 +253,13 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
244 253
245 private->dev = dev; 254 private->dev = dev;
246 private->pdev = pdev; 255 private->pdev = pdev;
247 private->io_port = pci_resource_start(pdev, 0); 256
257 /* IO range. */
258 private->ioaddr = pci_iomap(pdev, 0, 0);
259 if (!private->ioaddr)
260 goto reg_fail;
261
248 spin_lock_init(&private->lock); 262 spin_lock_init(&private->lock);
249 dev->irq = pdev->irq;
250 dev->base_addr = private->io_port;
251 263
252 initialize_card(private); 264 initialize_card(private);
253 read_mac_address(private); 265 read_mac_address(private);
@@ -256,9 +268,10 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
256 dev->netdev_ops = &netdev_ops; 268 dev->netdev_ops = &netdev_ops;
257 pci_set_drvdata(pdev, dev); 269 pci_set_drvdata(pdev, dev);
258 270
259 if (register_netdev(dev)) { 271 rc = register_netdev(dev);
272 if (rc < 0) {
260 pr_err("%s: netdevice registration failed\n", __func__); 273 pr_err("%s: netdevice registration failed\n", __func__);
261 goto reg_fail; 274 goto err_unmap;
262 } 275 }
263 276
264 netdev_info(dev, "Xircom cardbus revision %i at irq %i\n", 277 netdev_info(dev, "Xircom cardbus revision %i at irq %i\n",
@@ -273,17 +286,23 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
273 spin_unlock_irqrestore(&private->lock,flags); 286 spin_unlock_irqrestore(&private->lock,flags);
274 287
275 trigger_receive(private); 288 trigger_receive(private);
289out:
290 return rc;
276 291
277 return 0; 292err_unmap:
278 293 pci_iounmap(pdev, private->ioaddr);
279reg_fail: 294reg_fail:
280 kfree(private->tx_buffer); 295 pci_set_drvdata(pdev, NULL);
296 dma_free_coherent(d, 8192, private->tx_buffer, private->tx_dma_handle);
281tx_buf_fail: 297tx_buf_fail:
282 kfree(private->rx_buffer); 298 dma_free_coherent(d, 8192, private->rx_buffer, private->rx_dma_handle);
283rx_buf_fail: 299rx_buf_fail:
284 free_netdev(dev); 300 free_netdev(dev);
285device_fail: 301err_release:
286 return -ENODEV; 302 pci_release_regions(pdev);
303err_disable:
304 pci_disable_device(pdev);
305 goto out;
287} 306}
288 307
289 308
@@ -297,25 +316,28 @@ static void __devexit xircom_remove(struct pci_dev *pdev)
297{ 316{
298 struct net_device *dev = pci_get_drvdata(pdev); 317 struct net_device *dev = pci_get_drvdata(pdev);
299 struct xircom_private *card = netdev_priv(dev); 318 struct xircom_private *card = netdev_priv(dev);
319 struct device *d = &pdev->dev;
300 320
301 pci_free_consistent(pdev,8192,card->rx_buffer,card->rx_dma_handle);
302 pci_free_consistent(pdev,8192,card->tx_buffer,card->tx_dma_handle);
303
304 release_region(dev->base_addr, 128);
305 unregister_netdev(dev); 321 unregister_netdev(dev);
306 free_netdev(dev); 322 pci_iounmap(pdev, card->ioaddr);
307 pci_set_drvdata(pdev, NULL); 323 pci_set_drvdata(pdev, NULL);
324 dma_free_coherent(d, 8192, card->tx_buffer, card->tx_dma_handle);
325 dma_free_coherent(d, 8192, card->rx_buffer, card->rx_dma_handle);
326 free_netdev(dev);
327 pci_release_regions(pdev);
328 pci_disable_device(pdev);
308} 329}
309 330
310static irqreturn_t xircom_interrupt(int irq, void *dev_instance) 331static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
311{ 332{
312 struct net_device *dev = (struct net_device *) dev_instance; 333 struct net_device *dev = (struct net_device *) dev_instance;
313 struct xircom_private *card = netdev_priv(dev); 334 struct xircom_private *card = netdev_priv(dev);
335 void __iomem *ioaddr = card->ioaddr;
314 unsigned int status; 336 unsigned int status;
315 int i; 337 int i;
316 338
317 spin_lock(&card->lock); 339 spin_lock(&card->lock);
318 status = inl(card->io_port+CSR5); 340 status = xr32(CSR5);
319 341
320#if defined DEBUG && DEBUG > 1 342#if defined DEBUG && DEBUG > 1
321 print_binary(status); 343 print_binary(status);
@@ -345,7 +367,7 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
345 /* Clear all remaining interrupts */ 367 /* Clear all remaining interrupts */
346 status |= 0xffffffff; /* FIXME: make this clear only the 368 status |= 0xffffffff; /* FIXME: make this clear only the
347 real existing bits */ 369 real existing bits */
348 outl(status,card->io_port+CSR5); 370 xw32(CSR5, status);
349 371
350 372
351 for (i=0;i<NUMDESCRIPTORS;i++) 373 for (i=0;i<NUMDESCRIPTORS;i++)
@@ -423,11 +445,11 @@ static netdev_tx_t xircom_start_xmit(struct sk_buff *skb,
423static int xircom_open(struct net_device *dev) 445static int xircom_open(struct net_device *dev)
424{ 446{
425 struct xircom_private *xp = netdev_priv(dev); 447 struct xircom_private *xp = netdev_priv(dev);
448 const int irq = xp->pdev->irq;
426 int retval; 449 int retval;
427 450
428 netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n", 451 netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n", irq);
429 dev->irq); 452 retval = request_irq(irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
430 retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
431 if (retval) 453 if (retval)
432 return retval; 454 return retval;
433 455
@@ -459,7 +481,7 @@ static int xircom_close(struct net_device *dev)
459 spin_unlock_irqrestore(&card->lock,flags); 481 spin_unlock_irqrestore(&card->lock,flags);
460 482
461 card->open = 0; 483 card->open = 0;
462 free_irq(dev->irq,dev); 484 free_irq(card->pdev->irq, dev);
463 485
464 return 0; 486 return 0;
465 487
@@ -469,35 +491,39 @@ static int xircom_close(struct net_device *dev)
469#ifdef CONFIG_NET_POLL_CONTROLLER 491#ifdef CONFIG_NET_POLL_CONTROLLER
470static void xircom_poll_controller(struct net_device *dev) 492static void xircom_poll_controller(struct net_device *dev)
471{ 493{
472 disable_irq(dev->irq); 494 struct xircom_private *xp = netdev_priv(dev);
473 xircom_interrupt(dev->irq, dev); 495 const int irq = xp->pdev->irq;
474 enable_irq(dev->irq); 496
497 disable_irq(irq);
498 xircom_interrupt(irq, dev);
499 enable_irq(irq);
475} 500}
476#endif 501#endif
477 502
478 503
479static void initialize_card(struct xircom_private *card) 504static void initialize_card(struct xircom_private *card)
480{ 505{
481 unsigned int val; 506 void __iomem *ioaddr = card->ioaddr;
482 unsigned long flags; 507 unsigned long flags;
508 u32 val;
483 509
484 spin_lock_irqsave(&card->lock, flags); 510 spin_lock_irqsave(&card->lock, flags);
485 511
486 /* First: reset the card */ 512 /* First: reset the card */
487 val = inl(card->io_port + CSR0); 513 val = xr32(CSR0);
488 val |= 0x01; /* Software reset */ 514 val |= 0x01; /* Software reset */
489 outl(val, card->io_port + CSR0); 515 xw32(CSR0, val);
490 516
491 udelay(100); /* give the card some time to reset */ 517 udelay(100); /* give the card some time to reset */
492 518
493 val = inl(card->io_port + CSR0); 519 val = xr32(CSR0);
494 val &= ~0x01; /* disable Software reset */ 520 val &= ~0x01; /* disable Software reset */
495 outl(val, card->io_port + CSR0); 521 xw32(CSR0, val);
496 522
497 523
498 val = 0; /* Value 0x00 is a safe and conservative value 524 val = 0; /* Value 0x00 is a safe and conservative value
499 for the PCI configuration settings */ 525 for the PCI configuration settings */
500 outl(val, card->io_port + CSR0); 526 xw32(CSR0, val);
501 527
502 528
503 disable_all_interrupts(card); 529 disable_all_interrupts(card);
@@ -515,10 +541,9 @@ ignored; I chose zero.
515*/ 541*/
516static void trigger_transmit(struct xircom_private *card) 542static void trigger_transmit(struct xircom_private *card)
517{ 543{
518 unsigned int val; 544 void __iomem *ioaddr = card->ioaddr;
519 545
520 val = 0; 546 xw32(CSR1, 0);
521 outl(val, card->io_port + CSR1);
522} 547}
523 548
524/* 549/*
@@ -530,10 +555,9 @@ ignored; I chose zero.
530*/ 555*/
531static void trigger_receive(struct xircom_private *card) 556static void trigger_receive(struct xircom_private *card)
532{ 557{
533 unsigned int val; 558 void __iomem *ioaddr = card->ioaddr;
534 559
535 val = 0; 560 xw32(CSR2, 0);
536 outl(val, card->io_port + CSR2);
537} 561}
538 562
539/* 563/*
@@ -542,6 +566,7 @@ descriptors and programs the addresses into the card.
542*/ 566*/
543static void setup_descriptors(struct xircom_private *card) 567static void setup_descriptors(struct xircom_private *card)
544{ 568{
569 void __iomem *ioaddr = card->ioaddr;
545 u32 address; 570 u32 address;
546 int i; 571 int i;
547 572
@@ -571,7 +596,7 @@ static void setup_descriptors(struct xircom_private *card)
571 wmb(); 596 wmb();
572 /* Write the receive descriptor ring address to the card */ 597 /* Write the receive descriptor ring address to the card */
573 address = card->rx_dma_handle; 598 address = card->rx_dma_handle;
574 outl(address, card->io_port + CSR3); /* Receive descr list address */ 599 xw32(CSR3, address); /* Receive descr list address */
575 600
576 601
577 /* transmit descriptors */ 602 /* transmit descriptors */
@@ -596,7 +621,7 @@ static void setup_descriptors(struct xircom_private *card)
596 wmb(); 621 wmb();
597 /* wite the transmit descriptor ring to the card */ 622 /* wite the transmit descriptor ring to the card */
598 address = card->tx_dma_handle; 623 address = card->tx_dma_handle;
599 outl(address, card->io_port + CSR4); /* xmit descr list address */ 624 xw32(CSR4, address); /* xmit descr list address */
600} 625}
601 626
602/* 627/*
@@ -605,11 +630,12 @@ valid by setting the address in the card to 0x00.
605*/ 630*/
606static void remove_descriptors(struct xircom_private *card) 631static void remove_descriptors(struct xircom_private *card)
607{ 632{
633 void __iomem *ioaddr = card->ioaddr;
608 unsigned int val; 634 unsigned int val;
609 635
610 val = 0; 636 val = 0;
611 outl(val, card->io_port + CSR3); /* Receive descriptor address */ 637 xw32(CSR3, val); /* Receive descriptor address */
612 outl(val, card->io_port + CSR4); /* Send descriptor address */ 638 xw32(CSR4, val); /* Send descriptor address */
613} 639}
614 640
615/* 641/*
@@ -620,17 +646,17 @@ This function also clears the status-bit.
620*/ 646*/
621static int link_status_changed(struct xircom_private *card) 647static int link_status_changed(struct xircom_private *card)
622{ 648{
649 void __iomem *ioaddr = card->ioaddr;
623 unsigned int val; 650 unsigned int val;
624 651
625 val = inl(card->io_port + CSR5); /* Status register */ 652 val = xr32(CSR5); /* Status register */
626 653 if (!(val & (1 << 27))) /* no change */
627 if ((val & (1 << 27)) == 0) /* no change */
628 return 0; 654 return 0;
629 655
630 /* clear the event by writing a 1 to the bit in the 656 /* clear the event by writing a 1 to the bit in the
631 status register. */ 657 status register. */
632 val = (1 << 27); 658 val = (1 << 27);
633 outl(val, card->io_port + CSR5); 659 xw32(CSR5, val);
634 660
635 return 1; 661 return 1;
636} 662}
@@ -642,11 +668,9 @@ in a non-stopped state.
642*/ 668*/
643static int transmit_active(struct xircom_private *card) 669static int transmit_active(struct xircom_private *card)
644{ 670{
645 unsigned int val; 671 void __iomem *ioaddr = card->ioaddr;
646
647 val = inl(card->io_port + CSR5); /* Status register */
648 672
649 if ((val & (7 << 20)) == 0) /* transmitter disabled */ 673 if (!(xr32(CSR5) & (7 << 20))) /* transmitter disabled */
650 return 0; 674 return 0;
651 675
652 return 1; 676 return 1;
@@ -658,11 +682,9 @@ in a non-stopped state.
658*/ 682*/
659static int receive_active(struct xircom_private *card) 683static int receive_active(struct xircom_private *card)
660{ 684{
661 unsigned int val; 685 void __iomem *ioaddr = card->ioaddr;
662
663 val = inl(card->io_port + CSR5); /* Status register */
664 686
665 if ((val & (7 << 17)) == 0) /* receiver disabled */ 687 if (!(xr32(CSR5) & (7 << 17))) /* receiver disabled */
666 return 0; 688 return 0;
667 689
668 return 1; 690 return 1;
@@ -680,10 +702,11 @@ must be called with the lock held and interrupts disabled.
680*/ 702*/
681static void activate_receiver(struct xircom_private *card) 703static void activate_receiver(struct xircom_private *card)
682{ 704{
705 void __iomem *ioaddr = card->ioaddr;
683 unsigned int val; 706 unsigned int val;
684 int counter; 707 int counter;
685 708
686 val = inl(card->io_port + CSR6); /* Operation mode */ 709 val = xr32(CSR6); /* Operation mode */
687 710
688 /* If the "active" bit is set and the receiver is already 711 /* If the "active" bit is set and the receiver is already
689 active, no need to do the expensive thing */ 712 active, no need to do the expensive thing */
@@ -692,7 +715,7 @@ static void activate_receiver(struct xircom_private *card)
692 715
693 716
694 val = val & ~2; /* disable the receiver */ 717 val = val & ~2; /* disable the receiver */
695 outl(val, card->io_port + CSR6); 718 xw32(CSR6, val);
696 719
697 counter = 10; 720 counter = 10;
698 while (counter > 0) { 721 while (counter > 0) {
@@ -706,9 +729,9 @@ static void activate_receiver(struct xircom_private *card)
706 } 729 }
707 730
708 /* enable the receiver */ 731 /* enable the receiver */
709 val = inl(card->io_port + CSR6); /* Operation mode */ 732 val = xr32(CSR6); /* Operation mode */
710 val = val | 2; /* enable the receiver */ 733 val = val | 2; /* enable the receiver */
711 outl(val, card->io_port + CSR6); 734 xw32(CSR6, val);
712 735
713 /* now wait for the card to activate again */ 736 /* now wait for the card to activate again */
714 counter = 10; 737 counter = 10;
@@ -733,12 +756,13 @@ must be called with the lock held and interrupts disabled.
733*/ 756*/
734static void deactivate_receiver(struct xircom_private *card) 757static void deactivate_receiver(struct xircom_private *card)
735{ 758{
759 void __iomem *ioaddr = card->ioaddr;
736 unsigned int val; 760 unsigned int val;
737 int counter; 761 int counter;
738 762
739 val = inl(card->io_port + CSR6); /* Operation mode */ 763 val = xr32(CSR6); /* Operation mode */
740 val = val & ~2; /* disable the receiver */ 764 val = val & ~2; /* disable the receiver */
741 outl(val, card->io_port + CSR6); 765 xw32(CSR6, val);
742 766
743 counter = 10; 767 counter = 10;
744 while (counter > 0) { 768 while (counter > 0) {
@@ -765,10 +789,11 @@ must be called with the lock held and interrupts disabled.
765*/ 789*/
766static void activate_transmitter(struct xircom_private *card) 790static void activate_transmitter(struct xircom_private *card)
767{ 791{
792 void __iomem *ioaddr = card->ioaddr;
768 unsigned int val; 793 unsigned int val;
769 int counter; 794 int counter;
770 795
771 val = inl(card->io_port + CSR6); /* Operation mode */ 796 val = xr32(CSR6); /* Operation mode */
772 797
773 /* If the "active" bit is set and the receiver is already 798 /* If the "active" bit is set and the receiver is already
774 active, no need to do the expensive thing */ 799 active, no need to do the expensive thing */
@@ -776,7 +801,7 @@ static void activate_transmitter(struct xircom_private *card)
776 return; 801 return;
777 802
778 val = val & ~(1 << 13); /* disable the transmitter */ 803 val = val & ~(1 << 13); /* disable the transmitter */
779 outl(val, card->io_port + CSR6); 804 xw32(CSR6, val);
780 805
781 counter = 10; 806 counter = 10;
782 while (counter > 0) { 807 while (counter > 0) {
@@ -791,9 +816,9 @@ static void activate_transmitter(struct xircom_private *card)
791 } 816 }
792 817
793 /* enable the transmitter */ 818 /* enable the transmitter */
794 val = inl(card->io_port + CSR6); /* Operation mode */ 819 val = xr32(CSR6); /* Operation mode */
795 val = val | (1 << 13); /* enable the transmitter */ 820 val = val | (1 << 13); /* enable the transmitter */
796 outl(val, card->io_port + CSR6); 821 xw32(CSR6, val);
797 822
798 /* now wait for the card to activate again */ 823 /* now wait for the card to activate again */
799 counter = 10; 824 counter = 10;
@@ -818,12 +843,13 @@ must be called with the lock held and interrupts disabled.
818*/ 843*/
819static void deactivate_transmitter(struct xircom_private *card) 844static void deactivate_transmitter(struct xircom_private *card)
820{ 845{
846 void __iomem *ioaddr = card->ioaddr;
821 unsigned int val; 847 unsigned int val;
822 int counter; 848 int counter;
823 849
824 val = inl(card->io_port + CSR6); /* Operation mode */ 850 val = xr32(CSR6); /* Operation mode */
825 val = val & ~2; /* disable the transmitter */ 851 val = val & ~2; /* disable the transmitter */
826 outl(val, card->io_port + CSR6); 852 xw32(CSR6, val);
827 853
828 counter = 20; 854 counter = 20;
829 while (counter > 0) { 855 while (counter > 0) {
@@ -846,11 +872,12 @@ must be called with the lock held and interrupts disabled.
846*/ 872*/
847static void enable_transmit_interrupt(struct xircom_private *card) 873static void enable_transmit_interrupt(struct xircom_private *card)
848{ 874{
875 void __iomem *ioaddr = card->ioaddr;
849 unsigned int val; 876 unsigned int val;
850 877
851 val = inl(card->io_port + CSR7); /* Interrupt enable register */ 878 val = xr32(CSR7); /* Interrupt enable register */
852 val |= 1; /* enable the transmit interrupt */ 879 val |= 1; /* enable the transmit interrupt */
853 outl(val, card->io_port + CSR7); 880 xw32(CSR7, val);
854} 881}
855 882
856 883
@@ -861,11 +888,12 @@ must be called with the lock held and interrupts disabled.
861*/ 888*/
862static void enable_receive_interrupt(struct xircom_private *card) 889static void enable_receive_interrupt(struct xircom_private *card)
863{ 890{
891 void __iomem *ioaddr = card->ioaddr;
864 unsigned int val; 892 unsigned int val;
865 893
866 val = inl(card->io_port + CSR7); /* Interrupt enable register */ 894 val = xr32(CSR7); /* Interrupt enable register */
867 val = val | (1 << 6); /* enable the receive interrupt */ 895 val = val | (1 << 6); /* enable the receive interrupt */
868 outl(val, card->io_port + CSR7); 896 xw32(CSR7, val);
869} 897}
870 898
871/* 899/*
@@ -875,11 +903,12 @@ must be called with the lock held and interrupts disabled.
875*/ 903*/
876static void enable_link_interrupt(struct xircom_private *card) 904static void enable_link_interrupt(struct xircom_private *card)
877{ 905{
906 void __iomem *ioaddr = card->ioaddr;
878 unsigned int val; 907 unsigned int val;
879 908
880 val = inl(card->io_port + CSR7); /* Interrupt enable register */ 909 val = xr32(CSR7); /* Interrupt enable register */
881 val = val | (1 << 27); /* enable the link status chage interrupt */ 910 val = val | (1 << 27); /* enable the link status chage interrupt */
882 outl(val, card->io_port + CSR7); 911 xw32(CSR7, val);
883} 912}
884 913
885 914
@@ -891,10 +920,9 @@ must be called with the lock held and interrupts disabled.
891*/ 920*/
892static void disable_all_interrupts(struct xircom_private *card) 921static void disable_all_interrupts(struct xircom_private *card)
893{ 922{
894 unsigned int val; 923 void __iomem *ioaddr = card->ioaddr;
895 924
896 val = 0; /* disable all interrupts */ 925 xw32(CSR7, 0);
897 outl(val, card->io_port + CSR7);
898} 926}
899 927
900/* 928/*
@@ -904,9 +932,10 @@ must be called with the lock held and interrupts disabled.
904*/ 932*/
905static void enable_common_interrupts(struct xircom_private *card) 933static void enable_common_interrupts(struct xircom_private *card)
906{ 934{
935 void __iomem *ioaddr = card->ioaddr;
907 unsigned int val; 936 unsigned int val;
908 937
909 val = inl(card->io_port + CSR7); /* Interrupt enable register */ 938 val = xr32(CSR7); /* Interrupt enable register */
910 val |= (1<<16); /* Normal Interrupt Summary */ 939 val |= (1<<16); /* Normal Interrupt Summary */
911 val |= (1<<15); /* Abnormal Interrupt Summary */ 940 val |= (1<<15); /* Abnormal Interrupt Summary */
912 val |= (1<<13); /* Fatal bus error */ 941 val |= (1<<13); /* Fatal bus error */
@@ -915,7 +944,7 @@ static void enable_common_interrupts(struct xircom_private *card)
915 val |= (1<<5); /* Transmit Underflow */ 944 val |= (1<<5); /* Transmit Underflow */
916 val |= (1<<2); /* Transmit Buffer Unavailable */ 945 val |= (1<<2); /* Transmit Buffer Unavailable */
917 val |= (1<<1); /* Transmit Process Stopped */ 946 val |= (1<<1); /* Transmit Process Stopped */
918 outl(val, card->io_port + CSR7); 947 xw32(CSR7, val);
919} 948}
920 949
921/* 950/*
@@ -925,11 +954,12 @@ must be called with the lock held and interrupts disabled.
925*/ 954*/
926static int enable_promisc(struct xircom_private *card) 955static int enable_promisc(struct xircom_private *card)
927{ 956{
957 void __iomem *ioaddr = card->ioaddr;
928 unsigned int val; 958 unsigned int val;
929 959
930 val = inl(card->io_port + CSR6); 960 val = xr32(CSR6);
931 val = val | (1 << 6); 961 val = val | (1 << 6);
932 outl(val, card->io_port + CSR6); 962 xw32(CSR6, val);
933 963
934 return 1; 964 return 1;
935} 965}
@@ -944,13 +974,16 @@ Must be called in locked state with interrupts disabled
944*/ 974*/
945static int link_status(struct xircom_private *card) 975static int link_status(struct xircom_private *card)
946{ 976{
947 unsigned int val; 977 void __iomem *ioaddr = card->ioaddr;
978 u8 val;
948 979
949 val = inb(card->io_port + CSR12); 980 val = xr8(CSR12);
950 981
951 if (!(val&(1<<2))) /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */ 982 /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */
983 if (!(val & (1 << 2)))
952 return 10; 984 return 10;
953 if (!(val&(1<<1))) /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */ 985 /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */
986 if (!(val & (1 << 1)))
954 return 100; 987 return 100;
955 988
956 /* If we get here -> no link at all */ 989 /* If we get here -> no link at all */
@@ -969,29 +1002,31 @@ static int link_status(struct xircom_private *card)
969 */ 1002 */
970static void read_mac_address(struct xircom_private *card) 1003static void read_mac_address(struct xircom_private *card)
971{ 1004{
972 unsigned char j, tuple, link, data_id, data_count; 1005 void __iomem *ioaddr = card->ioaddr;
973 unsigned long flags; 1006 unsigned long flags;
1007 u8 link;
974 int i; 1008 int i;
975 1009
976 spin_lock_irqsave(&card->lock, flags); 1010 spin_lock_irqsave(&card->lock, flags);
977 1011
978 outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */ 1012 xw32(CSR9, 1 << 12); /* enable boot rom access */
979 for (i = 0x100; i < 0x1f7; i += link + 2) { 1013 for (i = 0x100; i < 0x1f7; i += link + 2) {
980 outl(i, card->io_port + CSR10); 1014 u8 tuple, data_id, data_count;
981 tuple = inl(card->io_port + CSR9) & 0xff; 1015
982 outl(i + 1, card->io_port + CSR10); 1016 xw32(CSR10, i);
983 link = inl(card->io_port + CSR9) & 0xff; 1017 tuple = xr32(CSR9);
984 outl(i + 2, card->io_port + CSR10); 1018 xw32(CSR10, i + 1);
985 data_id = inl(card->io_port + CSR9) & 0xff; 1019 link = xr32(CSR9);
986 outl(i + 3, card->io_port + CSR10); 1020 xw32(CSR10, i + 2);
987 data_count = inl(card->io_port + CSR9) & 0xff; 1021 data_id = xr32(CSR9);
1022 xw32(CSR10, i + 3);
1023 data_count = xr32(CSR9);
988 if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) { 1024 if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) {
989 /* 1025 int j;
990 * This is it. We have the data we want. 1026
991 */
992 for (j = 0; j < 6; j++) { 1027 for (j = 0; j < 6; j++) {
993 outl(i + j + 4, card->io_port + CSR10); 1028 xw32(CSR10, i + j + 4);
994 card->dev->dev_addr[j] = inl(card->io_port + CSR9) & 0xff; 1029 card->dev->dev_addr[j] = xr32(CSR9) & 0xff;
995 } 1030 }
996 break; 1031 break;
997 } else if (link == 0) { 1032 } else if (link == 0) {
@@ -1010,6 +1045,7 @@ static void read_mac_address(struct xircom_private *card)
1010 */ 1045 */
1011static void transceiver_voodoo(struct xircom_private *card) 1046static void transceiver_voodoo(struct xircom_private *card)
1012{ 1047{
1048 void __iomem *ioaddr = card->ioaddr;
1013 unsigned long flags; 1049 unsigned long flags;
1014 1050
1015 /* disable all powermanagement */ 1051 /* disable all powermanagement */
@@ -1019,14 +1055,14 @@ static void transceiver_voodoo(struct xircom_private *card)
1019 1055
1020 spin_lock_irqsave(&card->lock, flags); 1056 spin_lock_irqsave(&card->lock, flags);
1021 1057
1022 outl(0x0008, card->io_port + CSR15); 1058 xw32(CSR15, 0x0008);
1023 udelay(25); 1059 udelay(25);
1024 outl(0xa8050000, card->io_port + CSR15); 1060 xw32(CSR15, 0xa8050000);
1025 udelay(25); 1061 udelay(25);
1026 outl(0xa00f0000, card->io_port + CSR15); 1062 xw32(CSR15, 0xa00f0000);
1027 udelay(25); 1063 udelay(25);
1028 1064
1029 spin_unlock_irqrestore(&card->lock, flags); 1065 spin_unlock_irqrestore(&card->lock, flags);
1030 1066
1031 netif_start_queue(card->dev); 1067 netif_start_queue(card->dev);
1032} 1068}
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index b2dc2c81a147..a059f0c27e28 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -16,6 +16,13 @@
16#include "dl2k.h" 16#include "dl2k.h"
17#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
18 18
19#define dw32(reg, val) iowrite32(val, ioaddr + (reg))
20#define dw16(reg, val) iowrite16(val, ioaddr + (reg))
21#define dw8(reg, val) iowrite8(val, ioaddr + (reg))
22#define dr32(reg) ioread32(ioaddr + (reg))
23#define dr16(reg) ioread16(ioaddr + (reg))
24#define dr8(reg) ioread8(ioaddr + (reg))
25
19static char version[] __devinitdata = 26static char version[] __devinitdata =
20 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; 27 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
21#define MAX_UNITS 8 28#define MAX_UNITS 8
@@ -49,8 +56,13 @@ module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */
49/* Enable the default interrupts */ 56/* Enable the default interrupts */
50#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \ 57#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
51 UpdateStats | LinkEvent) 58 UpdateStats | LinkEvent)
52#define EnableInt() \ 59
53writew(DEFAULT_INTR, ioaddr + IntEnable) 60static void dl2k_enable_int(struct netdev_private *np)
61{
62 void __iomem *ioaddr = np->ioaddr;
63
64 dw16(IntEnable, DEFAULT_INTR);
65}
54 66
55static const int max_intrloop = 50; 67static const int max_intrloop = 50;
56static const int multicast_filter_limit = 0x40; 68static const int multicast_filter_limit = 0x40;
@@ -73,7 +85,7 @@ static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
73static int rio_close (struct net_device *dev); 85static int rio_close (struct net_device *dev);
74static int find_miiphy (struct net_device *dev); 86static int find_miiphy (struct net_device *dev);
75static int parse_eeprom (struct net_device *dev); 87static int parse_eeprom (struct net_device *dev);
76static int read_eeprom (long ioaddr, int eep_addr); 88static int read_eeprom (struct netdev_private *, int eep_addr);
77static int mii_wait_link (struct net_device *dev, int wait); 89static int mii_wait_link (struct net_device *dev, int wait);
78static int mii_set_media (struct net_device *dev); 90static int mii_set_media (struct net_device *dev);
79static int mii_get_media (struct net_device *dev); 91static int mii_get_media (struct net_device *dev);
@@ -106,7 +118,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
106 static int card_idx; 118 static int card_idx;
107 int chip_idx = ent->driver_data; 119 int chip_idx = ent->driver_data;
108 int err, irq; 120 int err, irq;
109 long ioaddr; 121 void __iomem *ioaddr;
110 static int version_printed; 122 static int version_printed;
111 void *ring_space; 123 void *ring_space;
112 dma_addr_t ring_dma; 124 dma_addr_t ring_dma;
@@ -124,26 +136,29 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
124 goto err_out_disable; 136 goto err_out_disable;
125 137
126 pci_set_master (pdev); 138 pci_set_master (pdev);
139
140 err = -ENOMEM;
141
127 dev = alloc_etherdev (sizeof (*np)); 142 dev = alloc_etherdev (sizeof (*np));
128 if (!dev) { 143 if (!dev)
129 err = -ENOMEM;
130 goto err_out_res; 144 goto err_out_res;
131 }
132 SET_NETDEV_DEV(dev, &pdev->dev); 145 SET_NETDEV_DEV(dev, &pdev->dev);
133 146
134#ifdef MEM_MAPPING 147 np = netdev_priv(dev);
135 ioaddr = pci_resource_start (pdev, 1); 148
136 ioaddr = (long) ioremap (ioaddr, RIO_IO_SIZE); 149 /* IO registers range. */
137 if (!ioaddr) { 150 ioaddr = pci_iomap(pdev, 0, 0);
138 err = -ENOMEM; 151 if (!ioaddr)
139 goto err_out_dev; 152 goto err_out_dev;
140 } 153 np->eeprom_addr = ioaddr;
141#else 154
142 ioaddr = pci_resource_start (pdev, 0); 155#ifdef MEM_MAPPING
156 /* MM registers range. */
157 ioaddr = pci_iomap(pdev, 1, 0);
158 if (!ioaddr)
159 goto err_out_iounmap;
143#endif 160#endif
144 dev->base_addr = ioaddr; 161 np->ioaddr = ioaddr;
145 dev->irq = irq;
146 np = netdev_priv(dev);
147 np->chip_id = chip_idx; 162 np->chip_id = chip_idx;
148 np->pdev = pdev; 163 np->pdev = pdev;
149 spin_lock_init (&np->tx_lock); 164 spin_lock_init (&np->tx_lock);
@@ -239,7 +254,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
239 goto err_out_unmap_rx; 254 goto err_out_unmap_rx;
240 255
241 /* Fiber device? */ 256 /* Fiber device? */
242 np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0; 257 np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
243 np->link_status = 0; 258 np->link_status = 0;
244 /* Set media and reset PHY */ 259 /* Set media and reset PHY */
245 if (np->phy_media) { 260 if (np->phy_media) {
@@ -276,22 +291,20 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
276 printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); 291 printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
277 return 0; 292 return 0;
278 293
279 err_out_unmap_rx: 294err_out_unmap_rx:
280 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); 295 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
281 err_out_unmap_tx: 296err_out_unmap_tx:
282 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); 297 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
283 err_out_iounmap: 298err_out_iounmap:
284#ifdef MEM_MAPPING 299#ifdef MEM_MAPPING
285 iounmap ((void *) ioaddr); 300 pci_iounmap(pdev, np->ioaddr);
286
287 err_out_dev:
288#endif 301#endif
302 pci_iounmap(pdev, np->eeprom_addr);
303err_out_dev:
289 free_netdev (dev); 304 free_netdev (dev);
290 305err_out_res:
291 err_out_res:
292 pci_release_regions (pdev); 306 pci_release_regions (pdev);
293 307err_out_disable:
294 err_out_disable:
295 pci_disable_device (pdev); 308 pci_disable_device (pdev);
296 return err; 309 return err;
297} 310}
@@ -299,11 +312,9 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
299static int 312static int
300find_miiphy (struct net_device *dev) 313find_miiphy (struct net_device *dev)
301{ 314{
315 struct netdev_private *np = netdev_priv(dev);
302 int i, phy_found = 0; 316 int i, phy_found = 0;
303 struct netdev_private *np;
304 long ioaddr;
305 np = netdev_priv(dev); 317 np = netdev_priv(dev);
306 ioaddr = dev->base_addr;
307 np->phy_addr = 1; 318 np->phy_addr = 1;
308 319
309 for (i = 31; i >= 0; i--) { 320 for (i = 31; i >= 0; i--) {
@@ -323,26 +334,19 @@ find_miiphy (struct net_device *dev)
323static int 334static int
324parse_eeprom (struct net_device *dev) 335parse_eeprom (struct net_device *dev)
325{ 336{
337 struct netdev_private *np = netdev_priv(dev);
338 void __iomem *ioaddr = np->ioaddr;
326 int i, j; 339 int i, j;
327 long ioaddr = dev->base_addr;
328 u8 sromdata[256]; 340 u8 sromdata[256];
329 u8 *psib; 341 u8 *psib;
330 u32 crc; 342 u32 crc;
331 PSROM_t psrom = (PSROM_t) sromdata; 343 PSROM_t psrom = (PSROM_t) sromdata;
332 struct netdev_private *np = netdev_priv(dev);
333 344
334 int cid, next; 345 int cid, next;
335 346
336#ifdef MEM_MAPPING 347 for (i = 0; i < 128; i++)
337 ioaddr = pci_resource_start (np->pdev, 0); 348 ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i));
338#endif 349
339 /* Read eeprom */
340 for (i = 0; i < 128; i++) {
341 ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom (ioaddr, i));
342 }
343#ifdef MEM_MAPPING
344 ioaddr = dev->base_addr;
345#endif
346 if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */ 350 if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */
347 /* Check CRC */ 351 /* Check CRC */
348 crc = ~ether_crc_le (256 - 4, sromdata); 352 crc = ~ether_crc_le (256 - 4, sromdata);
@@ -378,8 +382,7 @@ parse_eeprom (struct net_device *dev)
378 return 0; 382 return 0;
379 case 2: /* Duplex Polarity */ 383 case 2: /* Duplex Polarity */
380 np->duplex_polarity = psib[i]; 384 np->duplex_polarity = psib[i];
381 writeb (readb (ioaddr + PhyCtrl) | psib[i], 385 dw8(PhyCtrl, dr8(PhyCtrl) | psib[i]);
382 ioaddr + PhyCtrl);
383 break; 386 break;
384 case 3: /* Wake Polarity */ 387 case 3: /* Wake Polarity */
385 np->wake_polarity = psib[i]; 388 np->wake_polarity = psib[i];
@@ -407,59 +410,57 @@ static int
407rio_open (struct net_device *dev) 410rio_open (struct net_device *dev)
408{ 411{
409 struct netdev_private *np = netdev_priv(dev); 412 struct netdev_private *np = netdev_priv(dev);
410 long ioaddr = dev->base_addr; 413 void __iomem *ioaddr = np->ioaddr;
414 const int irq = np->pdev->irq;
411 int i; 415 int i;
412 u16 macctrl; 416 u16 macctrl;
413 417
414 i = request_irq (dev->irq, rio_interrupt, IRQF_SHARED, dev->name, dev); 418 i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
415 if (i) 419 if (i)
416 return i; 420 return i;
417 421
418 /* Reset all logic functions */ 422 /* Reset all logic functions */
419 writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset, 423 dw16(ASICCtrl + 2,
420 ioaddr + ASICCtrl + 2); 424 GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
421 mdelay(10); 425 mdelay(10);
422 426
423 /* DebugCtrl bit 4, 5, 9 must set */ 427 /* DebugCtrl bit 4, 5, 9 must set */
424 writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl); 428 dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);
425 429
426 /* Jumbo frame */ 430 /* Jumbo frame */
427 if (np->jumbo != 0) 431 if (np->jumbo != 0)
428 writew (MAX_JUMBO+14, ioaddr + MaxFrameSize); 432 dw16(MaxFrameSize, MAX_JUMBO+14);
429 433
430 alloc_list (dev); 434 alloc_list (dev);
431 435
432 /* Get station address */ 436 /* Get station address */
433 for (i = 0; i < 6; i++) 437 for (i = 0; i < 6; i++)
434 writeb (dev->dev_addr[i], ioaddr + StationAddr0 + i); 438 dw8(StationAddr0 + i, dev->dev_addr[i]);
435 439
436 set_multicast (dev); 440 set_multicast (dev);
437 if (np->coalesce) { 441 if (np->coalesce) {
438 writel (np->rx_coalesce | np->rx_timeout << 16, 442 dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16);
439 ioaddr + RxDMAIntCtrl);
440 } 443 }
441 /* Set RIO to poll every N*320nsec. */ 444 /* Set RIO to poll every N*320nsec. */
442 writeb (0x20, ioaddr + RxDMAPollPeriod); 445 dw8(RxDMAPollPeriod, 0x20);
443 writeb (0xff, ioaddr + TxDMAPollPeriod); 446 dw8(TxDMAPollPeriod, 0xff);
444 writeb (0x30, ioaddr + RxDMABurstThresh); 447 dw8(RxDMABurstThresh, 0x30);
445 writeb (0x30, ioaddr + RxDMAUrgentThresh); 448 dw8(RxDMAUrgentThresh, 0x30);
446 writel (0x0007ffff, ioaddr + RmonStatMask); 449 dw32(RmonStatMask, 0x0007ffff);
447 /* clear statistics */ 450 /* clear statistics */
448 clear_stats (dev); 451 clear_stats (dev);
449 452
450 /* VLAN supported */ 453 /* VLAN supported */
451 if (np->vlan) { 454 if (np->vlan) {
452 /* priority field in RxDMAIntCtrl */ 455 /* priority field in RxDMAIntCtrl */
453 writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10, 456 dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10);
454 ioaddr + RxDMAIntCtrl);
455 /* VLANId */ 457 /* VLANId */
456 writew (np->vlan, ioaddr + VLANId); 458 dw16(VLANId, np->vlan);
457 /* Length/Type should be 0x8100 */ 459 /* Length/Type should be 0x8100 */
458 writel (0x8100 << 16 | np->vlan, ioaddr + VLANTag); 460 dw32(VLANTag, 0x8100 << 16 | np->vlan);
459 /* Enable AutoVLANuntagging, but disable AutoVLANtagging. 461 /* Enable AutoVLANuntagging, but disable AutoVLANtagging.
460 VLAN information tagged by TFC' VID, CFI fields. */ 462 VLAN information tagged by TFC' VID, CFI fields. */
461 writel (readl (ioaddr + MACCtrl) | AutoVLANuntagging, 463 dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging);
462 ioaddr + MACCtrl);
463 } 464 }
464 465
465 init_timer (&np->timer); 466 init_timer (&np->timer);
@@ -469,20 +470,18 @@ rio_open (struct net_device *dev)
469 add_timer (&np->timer); 470 add_timer (&np->timer);
470 471
471 /* Start Tx/Rx */ 472 /* Start Tx/Rx */
472 writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable, 473 dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable);
473 ioaddr + MACCtrl);
474 474
475 macctrl = 0; 475 macctrl = 0;
476 macctrl |= (np->vlan) ? AutoVLANuntagging : 0; 476 macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
477 macctrl |= (np->full_duplex) ? DuplexSelect : 0; 477 macctrl |= (np->full_duplex) ? DuplexSelect : 0;
478 macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; 478 macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;
479 macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; 479 macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;
480 writew(macctrl, ioaddr + MACCtrl); 480 dw16(MACCtrl, macctrl);
481 481
482 netif_start_queue (dev); 482 netif_start_queue (dev);
483 483
484 /* Enable default interrupts */ 484 dl2k_enable_int(np);
485 EnableInt ();
486 return 0; 485 return 0;
487} 486}
488 487
@@ -533,10 +532,11 @@ rio_timer (unsigned long data)
533static void 532static void
534rio_tx_timeout (struct net_device *dev) 533rio_tx_timeout (struct net_device *dev)
535{ 534{
536 long ioaddr = dev->base_addr; 535 struct netdev_private *np = netdev_priv(dev);
536 void __iomem *ioaddr = np->ioaddr;
537 537
538 printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n", 538 printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
539 dev->name, readl (ioaddr + TxStatus)); 539 dev->name, dr32(TxStatus));
540 rio_free_tx(dev, 0); 540 rio_free_tx(dev, 0);
541 dev->if_port = 0; 541 dev->if_port = 0;
542 dev->trans_start = jiffies; /* prevent tx timeout */ 542 dev->trans_start = jiffies; /* prevent tx timeout */
@@ -547,6 +547,7 @@ static void
547alloc_list (struct net_device *dev) 547alloc_list (struct net_device *dev)
548{ 548{
549 struct netdev_private *np = netdev_priv(dev); 549 struct netdev_private *np = netdev_priv(dev);
550 void __iomem *ioaddr = np->ioaddr;
550 int i; 551 int i;
551 552
552 np->cur_rx = np->cur_tx = 0; 553 np->cur_rx = np->cur_tx = 0;
@@ -594,24 +595,23 @@ alloc_list (struct net_device *dev)
594 } 595 }
595 596
596 /* Set RFDListPtr */ 597 /* Set RFDListPtr */
597 writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0); 598 dw32(RFDListPtr0, np->rx_ring_dma);
598 writel (0, dev->base_addr + RFDListPtr1); 599 dw32(RFDListPtr1, 0);
599} 600}
600 601
601static netdev_tx_t 602static netdev_tx_t
602start_xmit (struct sk_buff *skb, struct net_device *dev) 603start_xmit (struct sk_buff *skb, struct net_device *dev)
603{ 604{
604 struct netdev_private *np = netdev_priv(dev); 605 struct netdev_private *np = netdev_priv(dev);
606 void __iomem *ioaddr = np->ioaddr;
605 struct netdev_desc *txdesc; 607 struct netdev_desc *txdesc;
606 unsigned entry; 608 unsigned entry;
607 u32 ioaddr;
608 u64 tfc_vlan_tag = 0; 609 u64 tfc_vlan_tag = 0;
609 610
610 if (np->link_status == 0) { /* Link Down */ 611 if (np->link_status == 0) { /* Link Down */
611 dev_kfree_skb(skb); 612 dev_kfree_skb(skb);
612 return NETDEV_TX_OK; 613 return NETDEV_TX_OK;
613 } 614 }
614 ioaddr = dev->base_addr;
615 entry = np->cur_tx % TX_RING_SIZE; 615 entry = np->cur_tx % TX_RING_SIZE;
616 np->tx_skbuff[entry] = skb; 616 np->tx_skbuff[entry] = skb;
617 txdesc = &np->tx_ring[entry]; 617 txdesc = &np->tx_ring[entry];
@@ -646,9 +646,9 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
646 (1 << FragCountShift)); 646 (1 << FragCountShift));
647 647
648 /* TxDMAPollNow */ 648 /* TxDMAPollNow */
649 writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl); 649 dw32(DMACtrl, dr32(DMACtrl) | 0x00001000);
650 /* Schedule ISR */ 650 /* Schedule ISR */
651 writel(10000, ioaddr + CountDown); 651 dw32(CountDown, 10000);
652 np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE; 652 np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
653 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE 653 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
654 < TX_QUEUE_LEN - 1 && np->speed != 10) { 654 < TX_QUEUE_LEN - 1 && np->speed != 10) {
@@ -658,10 +658,10 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
658 } 658 }
659 659
660 /* The first TFDListPtr */ 660 /* The first TFDListPtr */
661 if (readl (dev->base_addr + TFDListPtr0) == 0) { 661 if (!dr32(TFDListPtr0)) {
662 writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc), 662 dw32(TFDListPtr0, np->tx_ring_dma +
663 dev->base_addr + TFDListPtr0); 663 entry * sizeof (struct netdev_desc));
664 writel (0, dev->base_addr + TFDListPtr1); 664 dw32(TFDListPtr1, 0);
665 } 665 }
666 666
667 return NETDEV_TX_OK; 667 return NETDEV_TX_OK;
@@ -671,17 +671,15 @@ static irqreturn_t
671rio_interrupt (int irq, void *dev_instance) 671rio_interrupt (int irq, void *dev_instance)
672{ 672{
673 struct net_device *dev = dev_instance; 673 struct net_device *dev = dev_instance;
674 struct netdev_private *np; 674 struct netdev_private *np = netdev_priv(dev);
675 void __iomem *ioaddr = np->ioaddr;
675 unsigned int_status; 676 unsigned int_status;
676 long ioaddr;
677 int cnt = max_intrloop; 677 int cnt = max_intrloop;
678 int handled = 0; 678 int handled = 0;
679 679
680 ioaddr = dev->base_addr;
681 np = netdev_priv(dev);
682 while (1) { 680 while (1) {
683 int_status = readw (ioaddr + IntStatus); 681 int_status = dr16(IntStatus);
684 writew (int_status, ioaddr + IntStatus); 682 dw16(IntStatus, int_status);
685 int_status &= DEFAULT_INTR; 683 int_status &= DEFAULT_INTR;
686 if (int_status == 0 || --cnt < 0) 684 if (int_status == 0 || --cnt < 0)
687 break; 685 break;
@@ -692,7 +690,7 @@ rio_interrupt (int irq, void *dev_instance)
692 /* TxDMAComplete interrupt */ 690 /* TxDMAComplete interrupt */
693 if ((int_status & (TxDMAComplete|IntRequested))) { 691 if ((int_status & (TxDMAComplete|IntRequested))) {
694 int tx_status; 692 int tx_status;
695 tx_status = readl (ioaddr + TxStatus); 693 tx_status = dr32(TxStatus);
696 if (tx_status & 0x01) 694 if (tx_status & 0x01)
697 tx_error (dev, tx_status); 695 tx_error (dev, tx_status);
698 /* Free used tx skbuffs */ 696 /* Free used tx skbuffs */
@@ -705,7 +703,7 @@ rio_interrupt (int irq, void *dev_instance)
705 rio_error (dev, int_status); 703 rio_error (dev, int_status);
706 } 704 }
707 if (np->cur_tx != np->old_tx) 705 if (np->cur_tx != np->old_tx)
708 writel (100, ioaddr + CountDown); 706 dw32(CountDown, 100);
709 return IRQ_RETVAL(handled); 707 return IRQ_RETVAL(handled);
710} 708}
711 709
@@ -765,13 +763,11 @@ rio_free_tx (struct net_device *dev, int irq)
765static void 763static void
766tx_error (struct net_device *dev, int tx_status) 764tx_error (struct net_device *dev, int tx_status)
767{ 765{
768 struct netdev_private *np; 766 struct netdev_private *np = netdev_priv(dev);
769 long ioaddr = dev->base_addr; 767 void __iomem *ioaddr = np->ioaddr;
770 int frame_id; 768 int frame_id;
771 int i; 769 int i;
772 770
773 np = netdev_priv(dev);
774
775 frame_id = (tx_status & 0xffff0000); 771 frame_id = (tx_status & 0xffff0000);
776 printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", 772 printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
777 dev->name, tx_status, frame_id); 773 dev->name, tx_status, frame_id);
@@ -779,23 +775,21 @@ tx_error (struct net_device *dev, int tx_status)
779 /* Ttransmit Underrun */ 775 /* Ttransmit Underrun */
780 if (tx_status & 0x10) { 776 if (tx_status & 0x10) {
781 np->stats.tx_fifo_errors++; 777 np->stats.tx_fifo_errors++;
782 writew (readw (ioaddr + TxStartThresh) + 0x10, 778 dw16(TxStartThresh, dr16(TxStartThresh) + 0x10);
783 ioaddr + TxStartThresh);
784 /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */ 779 /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
785 writew (TxReset | DMAReset | FIFOReset | NetworkReset, 780 dw16(ASICCtrl + 2,
786 ioaddr + ASICCtrl + 2); 781 TxReset | DMAReset | FIFOReset | NetworkReset);
787 /* Wait for ResetBusy bit clear */ 782 /* Wait for ResetBusy bit clear */
788 for (i = 50; i > 0; i--) { 783 for (i = 50; i > 0; i--) {
789 if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0) 784 if (!(dr16(ASICCtrl + 2) & ResetBusy))
790 break; 785 break;
791 mdelay (1); 786 mdelay (1);
792 } 787 }
793 rio_free_tx (dev, 1); 788 rio_free_tx (dev, 1);
794 /* Reset TFDListPtr */ 789 /* Reset TFDListPtr */
795 writel (np->tx_ring_dma + 790 dw32(TFDListPtr0, np->tx_ring_dma +
796 np->old_tx * sizeof (struct netdev_desc), 791 np->old_tx * sizeof (struct netdev_desc));
797 dev->base_addr + TFDListPtr0); 792 dw32(TFDListPtr1, 0);
798 writel (0, dev->base_addr + TFDListPtr1);
799 793
800 /* Let TxStartThresh stay default value */ 794 /* Let TxStartThresh stay default value */
801 } 795 }
@@ -803,10 +797,10 @@ tx_error (struct net_device *dev, int tx_status)
803 if (tx_status & 0x04) { 797 if (tx_status & 0x04) {
804 np->stats.tx_fifo_errors++; 798 np->stats.tx_fifo_errors++;
805 /* TxReset and clear FIFO */ 799 /* TxReset and clear FIFO */
806 writew (TxReset | FIFOReset, ioaddr + ASICCtrl + 2); 800 dw16(ASICCtrl + 2, TxReset | FIFOReset);
807 /* Wait reset done */ 801 /* Wait reset done */
808 for (i = 50; i > 0; i--) { 802 for (i = 50; i > 0; i--) {
809 if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0) 803 if (!(dr16(ASICCtrl + 2) & ResetBusy))
810 break; 804 break;
811 mdelay (1); 805 mdelay (1);
812 } 806 }
@@ -821,7 +815,7 @@ tx_error (struct net_device *dev, int tx_status)
821 np->stats.collisions++; 815 np->stats.collisions++;
822#endif 816#endif
823 /* Restart the Tx */ 817 /* Restart the Tx */
824 writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl); 818 dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
825} 819}
826 820
827static int 821static int
@@ -931,8 +925,8 @@ receive_packet (struct net_device *dev)
931static void 925static void
932rio_error (struct net_device *dev, int int_status) 926rio_error (struct net_device *dev, int int_status)
933{ 927{
934 long ioaddr = dev->base_addr;
935 struct netdev_private *np = netdev_priv(dev); 928 struct netdev_private *np = netdev_priv(dev);
929 void __iomem *ioaddr = np->ioaddr;
936 u16 macctrl; 930 u16 macctrl;
937 931
938 /* Link change event */ 932 /* Link change event */
@@ -954,7 +948,7 @@ rio_error (struct net_device *dev, int int_status)
954 TxFlowControlEnable : 0; 948 TxFlowControlEnable : 0;
955 macctrl |= (np->rx_flow) ? 949 macctrl |= (np->rx_flow) ?
956 RxFlowControlEnable : 0; 950 RxFlowControlEnable : 0;
957 writew(macctrl, ioaddr + MACCtrl); 951 dw16(MACCtrl, macctrl);
958 np->link_status = 1; 952 np->link_status = 1;
959 netif_carrier_on(dev); 953 netif_carrier_on(dev);
960 } else { 954 } else {
@@ -974,7 +968,7 @@ rio_error (struct net_device *dev, int int_status)
974 if (int_status & HostError) { 968 if (int_status & HostError) {
975 printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n", 969 printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
976 dev->name, int_status); 970 dev->name, int_status);
977 writew (GlobalReset | HostReset, ioaddr + ASICCtrl + 2); 971 dw16(ASICCtrl + 2, GlobalReset | HostReset);
978 mdelay (500); 972 mdelay (500);
979 } 973 }
980} 974}
@@ -982,8 +976,8 @@ rio_error (struct net_device *dev, int int_status)
982static struct net_device_stats * 976static struct net_device_stats *
983get_stats (struct net_device *dev) 977get_stats (struct net_device *dev)
984{ 978{
985 long ioaddr = dev->base_addr;
986 struct netdev_private *np = netdev_priv(dev); 979 struct netdev_private *np = netdev_priv(dev);
980 void __iomem *ioaddr = np->ioaddr;
987#ifdef MEM_MAPPING 981#ifdef MEM_MAPPING
988 int i; 982 int i;
989#endif 983#endif
@@ -992,106 +986,107 @@ get_stats (struct net_device *dev)
992 /* All statistics registers need to be acknowledged, 986 /* All statistics registers need to be acknowledged,
993 else statistic overflow could cause problems */ 987 else statistic overflow could cause problems */
994 988
995 np->stats.rx_packets += readl (ioaddr + FramesRcvOk); 989 np->stats.rx_packets += dr32(FramesRcvOk);
996 np->stats.tx_packets += readl (ioaddr + FramesXmtOk); 990 np->stats.tx_packets += dr32(FramesXmtOk);
997 np->stats.rx_bytes += readl (ioaddr + OctetRcvOk); 991 np->stats.rx_bytes += dr32(OctetRcvOk);
998 np->stats.tx_bytes += readl (ioaddr + OctetXmtOk); 992 np->stats.tx_bytes += dr32(OctetXmtOk);
999 993
1000 np->stats.multicast = readl (ioaddr + McstFramesRcvdOk); 994 np->stats.multicast = dr32(McstFramesRcvdOk);
1001 np->stats.collisions += readl (ioaddr + SingleColFrames) 995 np->stats.collisions += dr32(SingleColFrames)
1002 + readl (ioaddr + MultiColFrames); 996 + dr32(MultiColFrames);
1003 997
1004 /* detailed tx errors */ 998 /* detailed tx errors */
1005 stat_reg = readw (ioaddr + FramesAbortXSColls); 999 stat_reg = dr16(FramesAbortXSColls);
1006 np->stats.tx_aborted_errors += stat_reg; 1000 np->stats.tx_aborted_errors += stat_reg;
1007 np->stats.tx_errors += stat_reg; 1001 np->stats.tx_errors += stat_reg;
1008 1002
1009 stat_reg = readw (ioaddr + CarrierSenseErrors); 1003 stat_reg = dr16(CarrierSenseErrors);
1010 np->stats.tx_carrier_errors += stat_reg; 1004 np->stats.tx_carrier_errors += stat_reg;
1011 np->stats.tx_errors += stat_reg; 1005 np->stats.tx_errors += stat_reg;
1012 1006
1013 /* Clear all other statistic register. */ 1007 /* Clear all other statistic register. */
1014 readl (ioaddr + McstOctetXmtOk); 1008 dr32(McstOctetXmtOk);
1015 readw (ioaddr + BcstFramesXmtdOk); 1009 dr16(BcstFramesXmtdOk);
1016 readl (ioaddr + McstFramesXmtdOk); 1010 dr32(McstFramesXmtdOk);
1017 readw (ioaddr + BcstFramesRcvdOk); 1011 dr16(BcstFramesRcvdOk);
1018 readw (ioaddr + MacControlFramesRcvd); 1012 dr16(MacControlFramesRcvd);
1019 readw (ioaddr + FrameTooLongErrors); 1013 dr16(FrameTooLongErrors);
1020 readw (ioaddr + InRangeLengthErrors); 1014 dr16(InRangeLengthErrors);
1021 readw (ioaddr + FramesCheckSeqErrors); 1015 dr16(FramesCheckSeqErrors);
1022 readw (ioaddr + FramesLostRxErrors); 1016 dr16(FramesLostRxErrors);
1023 readl (ioaddr + McstOctetXmtOk); 1017 dr32(McstOctetXmtOk);
1024 readl (ioaddr + BcstOctetXmtOk); 1018 dr32(BcstOctetXmtOk);
1025 readl (ioaddr + McstFramesXmtdOk); 1019 dr32(McstFramesXmtdOk);
1026 readl (ioaddr + FramesWDeferredXmt); 1020 dr32(FramesWDeferredXmt);
1027 readl (ioaddr + LateCollisions); 1021 dr32(LateCollisions);
1028 readw (ioaddr + BcstFramesXmtdOk); 1022 dr16(BcstFramesXmtdOk);
1029 readw (ioaddr + MacControlFramesXmtd); 1023 dr16(MacControlFramesXmtd);
1030 readw (ioaddr + FramesWEXDeferal); 1024 dr16(FramesWEXDeferal);
1031 1025
1032#ifdef MEM_MAPPING 1026#ifdef MEM_MAPPING
1033 for (i = 0x100; i <= 0x150; i += 4) 1027 for (i = 0x100; i <= 0x150; i += 4)
1034 readl (ioaddr + i); 1028 dr32(i);
1035#endif 1029#endif
1036 readw (ioaddr + TxJumboFrames); 1030 dr16(TxJumboFrames);
1037 readw (ioaddr + RxJumboFrames); 1031 dr16(RxJumboFrames);
1038 readw (ioaddr + TCPCheckSumErrors); 1032 dr16(TCPCheckSumErrors);
1039 readw (ioaddr + UDPCheckSumErrors); 1033 dr16(UDPCheckSumErrors);
1040 readw (ioaddr + IPCheckSumErrors); 1034 dr16(IPCheckSumErrors);
1041 return &np->stats; 1035 return &np->stats;
1042} 1036}
1043 1037
1044static int 1038static int
1045clear_stats (struct net_device *dev) 1039clear_stats (struct net_device *dev)
1046{ 1040{
1047 long ioaddr = dev->base_addr; 1041 struct netdev_private *np = netdev_priv(dev);
1042 void __iomem *ioaddr = np->ioaddr;
1048#ifdef MEM_MAPPING 1043#ifdef MEM_MAPPING
1049 int i; 1044 int i;
1050#endif 1045#endif
1051 1046
1052 /* All statistics registers need to be acknowledged, 1047 /* All statistics registers need to be acknowledged,
1053 else statistic overflow could cause problems */ 1048 else statistic overflow could cause problems */
1054 readl (ioaddr + FramesRcvOk); 1049 dr32(FramesRcvOk);
1055 readl (ioaddr + FramesXmtOk); 1050 dr32(FramesXmtOk);
1056 readl (ioaddr + OctetRcvOk); 1051 dr32(OctetRcvOk);
1057 readl (ioaddr + OctetXmtOk); 1052 dr32(OctetXmtOk);
1058 1053
1059 readl (ioaddr + McstFramesRcvdOk); 1054 dr32(McstFramesRcvdOk);
1060 readl (ioaddr + SingleColFrames); 1055 dr32(SingleColFrames);
1061 readl (ioaddr + MultiColFrames); 1056 dr32(MultiColFrames);
1062 readl (ioaddr + LateCollisions); 1057 dr32(LateCollisions);
1063 /* detailed rx errors */ 1058 /* detailed rx errors */
1064 readw (ioaddr + FrameTooLongErrors); 1059 dr16(FrameTooLongErrors);
1065 readw (ioaddr + InRangeLengthErrors); 1060 dr16(InRangeLengthErrors);
1066 readw (ioaddr + FramesCheckSeqErrors); 1061 dr16(FramesCheckSeqErrors);
1067 readw (ioaddr + FramesLostRxErrors); 1062 dr16(FramesLostRxErrors);
1068 1063
1069 /* detailed tx errors */ 1064 /* detailed tx errors */
1070 readw (ioaddr + FramesAbortXSColls); 1065 dr16(FramesAbortXSColls);
1071 readw (ioaddr + CarrierSenseErrors); 1066 dr16(CarrierSenseErrors);
1072 1067
1073 /* Clear all other statistic register. */ 1068 /* Clear all other statistic register. */
1074 readl (ioaddr + McstOctetXmtOk); 1069 dr32(McstOctetXmtOk);
1075 readw (ioaddr + BcstFramesXmtdOk); 1070 dr16(BcstFramesXmtdOk);
1076 readl (ioaddr + McstFramesXmtdOk); 1071 dr32(McstFramesXmtdOk);
1077 readw (ioaddr + BcstFramesRcvdOk); 1072 dr16(BcstFramesRcvdOk);
1078 readw (ioaddr + MacControlFramesRcvd); 1073 dr16(MacControlFramesRcvd);
1079 readl (ioaddr + McstOctetXmtOk); 1074 dr32(McstOctetXmtOk);
1080 readl (ioaddr + BcstOctetXmtOk); 1075 dr32(BcstOctetXmtOk);
1081 readl (ioaddr + McstFramesXmtdOk); 1076 dr32(McstFramesXmtdOk);
1082 readl (ioaddr + FramesWDeferredXmt); 1077 dr32(FramesWDeferredXmt);
1083 readw (ioaddr + BcstFramesXmtdOk); 1078 dr16(BcstFramesXmtdOk);
1084 readw (ioaddr + MacControlFramesXmtd); 1079 dr16(MacControlFramesXmtd);
1085 readw (ioaddr + FramesWEXDeferal); 1080 dr16(FramesWEXDeferal);
1086#ifdef MEM_MAPPING 1081#ifdef MEM_MAPPING
1087 for (i = 0x100; i <= 0x150; i += 4) 1082 for (i = 0x100; i <= 0x150; i += 4)
1088 readl (ioaddr + i); 1083 dr32(i);
1089#endif 1084#endif
1090 readw (ioaddr + TxJumboFrames); 1085 dr16(TxJumboFrames);
1091 readw (ioaddr + RxJumboFrames); 1086 dr16(RxJumboFrames);
1092 readw (ioaddr + TCPCheckSumErrors); 1087 dr16(TCPCheckSumErrors);
1093 readw (ioaddr + UDPCheckSumErrors); 1088 dr16(UDPCheckSumErrors);
1094 readw (ioaddr + IPCheckSumErrors); 1089 dr16(IPCheckSumErrors);
1095 return 0; 1090 return 0;
1096} 1091}
1097 1092
@@ -1114,10 +1109,10 @@ change_mtu (struct net_device *dev, int new_mtu)
1114static void 1109static void
1115set_multicast (struct net_device *dev) 1110set_multicast (struct net_device *dev)
1116{ 1111{
1117 long ioaddr = dev->base_addr; 1112 struct netdev_private *np = netdev_priv(dev);
1113 void __iomem *ioaddr = np->ioaddr;
1118 u32 hash_table[2]; 1114 u32 hash_table[2];
1119 u16 rx_mode = 0; 1115 u16 rx_mode = 0;
1120 struct netdev_private *np = netdev_priv(dev);
1121 1116
1122 hash_table[0] = hash_table[1] = 0; 1117 hash_table[0] = hash_table[1] = 0;
1123 /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */ 1118 /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
@@ -1153,9 +1148,9 @@ set_multicast (struct net_device *dev)
1153 rx_mode |= ReceiveVLANMatch; 1148 rx_mode |= ReceiveVLANMatch;
1154 } 1149 }
1155 1150
1156 writel (hash_table[0], ioaddr + HashTable0); 1151 dw32(HashTable0, hash_table[0]);
1157 writel (hash_table[1], ioaddr + HashTable1); 1152 dw32(HashTable1, hash_table[1]);
1158 writew (rx_mode, ioaddr + ReceiveMode); 1153 dw16(ReceiveMode, rx_mode);
1159} 1154}
1160 1155
1161static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1156static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
@@ -1259,55 +1254,21 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1259{ 1254{
1260 int phy_addr; 1255 int phy_addr;
1261 struct netdev_private *np = netdev_priv(dev); 1256 struct netdev_private *np = netdev_priv(dev);
1262 struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru; 1257 struct mii_ioctl_data *miidata = if_mii(rq);
1263
1264 struct netdev_desc *desc;
1265 int i;
1266 1258
1267 phy_addr = np->phy_addr; 1259 phy_addr = np->phy_addr;
1268 switch (cmd) { 1260 switch (cmd) {
1269 case SIOCDEVPRIVATE: 1261 case SIOCGMIIPHY:
1270 break; 1262 miidata->phy_id = phy_addr;
1271
1272 case SIOCDEVPRIVATE + 1:
1273 miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
1274 break;
1275 case SIOCDEVPRIVATE + 2:
1276 mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value);
1277 break;
1278 case SIOCDEVPRIVATE + 3:
1279 break; 1263 break;
1280 case SIOCDEVPRIVATE + 4: 1264 case SIOCGMIIREG:
1265 miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num);
1281 break; 1266 break;
1282 case SIOCDEVPRIVATE + 5: 1267 case SIOCSMIIREG:
1283 netif_stop_queue (dev); 1268 if (!capable(CAP_NET_ADMIN))
1284 break; 1269 return -EPERM;
1285 case SIOCDEVPRIVATE + 6: 1270 mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in);
1286 netif_wake_queue (dev);
1287 break;
1288 case SIOCDEVPRIVATE + 7:
1289 printk
1290 ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
1291 netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx,
1292 np->old_rx);
1293 break; 1271 break;
1294 case SIOCDEVPRIVATE + 8:
1295 printk("TX ring:\n");
1296 for (i = 0; i < TX_RING_SIZE; i++) {
1297 desc = &np->tx_ring[i];
1298 printk
1299 ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
1300 i,
1301 (u32) (np->tx_ring_dma + i * sizeof (*desc)),
1302 (u32)le64_to_cpu(desc->next_desc),
1303 (u32)le64_to_cpu(desc->status),
1304 (u32)(le64_to_cpu(desc->fraginfo) >> 32),
1305 (u32)le64_to_cpu(desc->fraginfo));
1306 printk ("\n");
1307 }
1308 printk ("\n");
1309 break;
1310
1311 default: 1272 default:
1312 return -EOPNOTSUPP; 1273 return -EOPNOTSUPP;
1313 } 1274 }
@@ -1318,15 +1279,15 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1318#define EEP_BUSY 0x8000 1279#define EEP_BUSY 0x8000
1319/* Read the EEPROM word */ 1280/* Read the EEPROM word */
1320/* We use I/O instruction to read/write eeprom to avoid fail on some machines */ 1281/* We use I/O instruction to read/write eeprom to avoid fail on some machines */
1321static int 1282static int read_eeprom(struct netdev_private *np, int eep_addr)
1322read_eeprom (long ioaddr, int eep_addr)
1323{ 1283{
1284 void __iomem *ioaddr = np->eeprom_addr;
1324 int i = 1000; 1285 int i = 1000;
1325 outw (EEP_READ | (eep_addr & 0xff), ioaddr + EepromCtrl); 1286
1287 dw16(EepromCtrl, EEP_READ | (eep_addr & 0xff));
1326 while (i-- > 0) { 1288 while (i-- > 0) {
1327 if (!(inw (ioaddr + EepromCtrl) & EEP_BUSY)) { 1289 if (!(dr16(EepromCtrl) & EEP_BUSY))
1328 return inw (ioaddr + EepromData); 1290 return dr16(EepromData);
1329 }
1330 } 1291 }
1331 return 0; 1292 return 0;
1332} 1293}
@@ -1336,38 +1297,40 @@ enum phy_ctrl_bits {
1336 MII_DUPLEX = 0x08, 1297 MII_DUPLEX = 0x08,
1337}; 1298};
1338 1299
1339#define mii_delay() readb(ioaddr) 1300#define mii_delay() dr8(PhyCtrl)
1340static void 1301static void
1341mii_sendbit (struct net_device *dev, u32 data) 1302mii_sendbit (struct net_device *dev, u32 data)
1342{ 1303{
1343 long ioaddr = dev->base_addr + PhyCtrl; 1304 struct netdev_private *np = netdev_priv(dev);
1344 data = (data) ? MII_DATA1 : 0; 1305 void __iomem *ioaddr = np->ioaddr;
1345 data |= MII_WRITE; 1306
1346 data |= (readb (ioaddr) & 0xf8) | MII_WRITE; 1307 data = ((data) ? MII_DATA1 : 0) | (dr8(PhyCtrl) & 0xf8) | MII_WRITE;
1347 writeb (data, ioaddr); 1308 dw8(PhyCtrl, data);
1348 mii_delay (); 1309 mii_delay ();
1349 writeb (data | MII_CLK, ioaddr); 1310 dw8(PhyCtrl, data | MII_CLK);
1350 mii_delay (); 1311 mii_delay ();
1351} 1312}
1352 1313
1353static int 1314static int
1354mii_getbit (struct net_device *dev) 1315mii_getbit (struct net_device *dev)
1355{ 1316{
1356 long ioaddr = dev->base_addr + PhyCtrl; 1317 struct netdev_private *np = netdev_priv(dev);
1318 void __iomem *ioaddr = np->ioaddr;
1357 u8 data; 1319 u8 data;
1358 1320
1359 data = (readb (ioaddr) & 0xf8) | MII_READ; 1321 data = (dr8(PhyCtrl) & 0xf8) | MII_READ;
1360 writeb (data, ioaddr); 1322 dw8(PhyCtrl, data);
1361 mii_delay (); 1323 mii_delay ();
1362 writeb (data | MII_CLK, ioaddr); 1324 dw8(PhyCtrl, data | MII_CLK);
1363 mii_delay (); 1325 mii_delay ();
1364 return ((readb (ioaddr) >> 1) & 1); 1326 return (dr8(PhyCtrl) >> 1) & 1;
1365} 1327}
1366 1328
1367static void 1329static void
1368mii_send_bits (struct net_device *dev, u32 data, int len) 1330mii_send_bits (struct net_device *dev, u32 data, int len)
1369{ 1331{
1370 int i; 1332 int i;
1333
1371 for (i = len - 1; i >= 0; i--) { 1334 for (i = len - 1; i >= 0; i--) {
1372 mii_sendbit (dev, data & (1 << i)); 1335 mii_sendbit (dev, data & (1 << i));
1373 } 1336 }
@@ -1721,28 +1684,29 @@ mii_set_media_pcs (struct net_device *dev)
1721static int 1684static int
1722rio_close (struct net_device *dev) 1685rio_close (struct net_device *dev)
1723{ 1686{
1724 long ioaddr = dev->base_addr;
1725 struct netdev_private *np = netdev_priv(dev); 1687 struct netdev_private *np = netdev_priv(dev);
1688 void __iomem *ioaddr = np->ioaddr;
1689
1690 struct pci_dev *pdev = np->pdev;
1726 struct sk_buff *skb; 1691 struct sk_buff *skb;
1727 int i; 1692 int i;
1728 1693
1729 netif_stop_queue (dev); 1694 netif_stop_queue (dev);
1730 1695
1731 /* Disable interrupts */ 1696 /* Disable interrupts */
1732 writew (0, ioaddr + IntEnable); 1697 dw16(IntEnable, 0);
1733 1698
1734 /* Stop Tx and Rx logics */ 1699 /* Stop Tx and Rx logics */
1735 writel (TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl); 1700 dw32(MACCtrl, TxDisable | RxDisable | StatsDisable);
1736 1701
1737 free_irq (dev->irq, dev); 1702 free_irq(pdev->irq, dev);
1738 del_timer_sync (&np->timer); 1703 del_timer_sync (&np->timer);
1739 1704
1740 /* Free all the skbuffs in the queue. */ 1705 /* Free all the skbuffs in the queue. */
1741 for (i = 0; i < RX_RING_SIZE; i++) { 1706 for (i = 0; i < RX_RING_SIZE; i++) {
1742 skb = np->rx_skbuff[i]; 1707 skb = np->rx_skbuff[i];
1743 if (skb) { 1708 if (skb) {
1744 pci_unmap_single(np->pdev, 1709 pci_unmap_single(pdev, desc_to_dma(&np->rx_ring[i]),
1745 desc_to_dma(&np->rx_ring[i]),
1746 skb->len, PCI_DMA_FROMDEVICE); 1710 skb->len, PCI_DMA_FROMDEVICE);
1747 dev_kfree_skb (skb); 1711 dev_kfree_skb (skb);
1748 np->rx_skbuff[i] = NULL; 1712 np->rx_skbuff[i] = NULL;
@@ -1753,8 +1717,7 @@ rio_close (struct net_device *dev)
1753 for (i = 0; i < TX_RING_SIZE; i++) { 1717 for (i = 0; i < TX_RING_SIZE; i++) {
1754 skb = np->tx_skbuff[i]; 1718 skb = np->tx_skbuff[i];
1755 if (skb) { 1719 if (skb) {
1756 pci_unmap_single(np->pdev, 1720 pci_unmap_single(pdev, desc_to_dma(&np->tx_ring[i]),
1757 desc_to_dma(&np->tx_ring[i]),
1758 skb->len, PCI_DMA_TODEVICE); 1721 skb->len, PCI_DMA_TODEVICE);
1759 dev_kfree_skb (skb); 1722 dev_kfree_skb (skb);
1760 np->tx_skbuff[i] = NULL; 1723 np->tx_skbuff[i] = NULL;
@@ -1778,8 +1741,9 @@ rio_remove1 (struct pci_dev *pdev)
1778 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, 1741 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
1779 np->tx_ring_dma); 1742 np->tx_ring_dma);
1780#ifdef MEM_MAPPING 1743#ifdef MEM_MAPPING
1781 iounmap ((char *) (dev->base_addr)); 1744 pci_iounmap(pdev, np->ioaddr);
1782#endif 1745#endif
1746 pci_iounmap(pdev, np->eeprom_addr);
1783 free_netdev (dev); 1747 free_netdev (dev);
1784 pci_release_regions (pdev); 1748 pci_release_regions (pdev);
1785 pci_disable_device (pdev); 1749 pci_disable_device (pdev);
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index ba0adcafa55a..3699565704c7 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -42,23 +42,6 @@
42#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc) 42#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
43#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc) 43#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
44 44
45/* This driver was written to use PCI memory space, however x86-oriented
46 hardware often uses I/O space accesses. */
47#ifndef MEM_MAPPING
48#undef readb
49#undef readw
50#undef readl
51#undef writeb
52#undef writew
53#undef writel
54#define readb inb
55#define readw inw
56#define readl inl
57#define writeb outb
58#define writew outw
59#define writel outl
60#endif
61
62/* Offsets to the device registers. 45/* Offsets to the device registers.
63 Unlike software-only systems, device drivers interact with complex hardware. 46 Unlike software-only systems, device drivers interact with complex hardware.
64 It's not useful to define symbolic names for every register bit in the 47 It's not useful to define symbolic names for every register bit in the
@@ -365,13 +348,6 @@ struct ioctl_data {
365 char *data; 348 char *data;
366}; 349};
367 350
368struct mii_data {
369 __u16 reserved;
370 __u16 reg_num;
371 __u16 in_value;
372 __u16 out_value;
373};
374
375/* The Rx and Tx buffer descriptors. */ 351/* The Rx and Tx buffer descriptors. */
376struct netdev_desc { 352struct netdev_desc {
377 __le64 next_desc; 353 __le64 next_desc;
@@ -391,6 +367,8 @@ struct netdev_private {
391 dma_addr_t tx_ring_dma; 367 dma_addr_t tx_ring_dma;
392 dma_addr_t rx_ring_dma; 368 dma_addr_t rx_ring_dma;
393 struct pci_dev *pdev; 369 struct pci_dev *pdev;
370 void __iomem *ioaddr;
371 void __iomem *eeprom_addr;
394 spinlock_t tx_lock; 372 spinlock_t tx_lock;
395 spinlock_t rx_lock; 373 spinlock_t rx_lock;
396 struct net_device_stats stats; 374 struct net_device_stats stats;
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index d783f4f96ec0..d7bb52a7bda1 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -522,9 +522,6 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
522 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET)); 522 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
523 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 523 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
524 524
525 dev->base_addr = (unsigned long)ioaddr;
526 dev->irq = irq;
527
528 np = netdev_priv(dev); 525 np = netdev_priv(dev);
529 np->base = ioaddr; 526 np->base = ioaddr;
530 np->pci_dev = pdev; 527 np->pci_dev = pdev;
@@ -828,18 +825,19 @@ static int netdev_open(struct net_device *dev)
828{ 825{
829 struct netdev_private *np = netdev_priv(dev); 826 struct netdev_private *np = netdev_priv(dev);
830 void __iomem *ioaddr = np->base; 827 void __iomem *ioaddr = np->base;
828 const int irq = np->pci_dev->irq;
831 unsigned long flags; 829 unsigned long flags;
832 int i; 830 int i;
833 831
834 /* Do we need to reset the chip??? */ 832 /* Do we need to reset the chip??? */
835 833
836 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); 834 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
837 if (i) 835 if (i)
838 return i; 836 return i;
839 837
840 if (netif_msg_ifup(np)) 838 if (netif_msg_ifup(np))
841 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", 839 printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
842 dev->name, dev->irq); 840
843 init_ring(dev); 841 init_ring(dev);
844 842
845 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr); 843 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
@@ -1814,7 +1812,7 @@ static int netdev_close(struct net_device *dev)
1814 } 1812 }
1815#endif /* __i386__ debugging only */ 1813#endif /* __i386__ debugging only */
1816 1814
1817 free_irq(dev->irq, dev); 1815 free_irq(np->pci_dev->irq, dev);
1818 1816
1819 del_timer_sync(&np->timer); 1817 del_timer_sync(&np->timer);
1820 1818
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index b276469f74e9..290b26f868c9 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -815,6 +815,7 @@ static const struct ethtool_ops dnet_ethtool_ops = {
815 .set_settings = dnet_set_settings, 815 .set_settings = dnet_set_settings,
816 .get_drvinfo = dnet_get_drvinfo, 816 .get_drvinfo = dnet_get_drvinfo,
817 .get_link = ethtool_op_get_link, 817 .get_link = ethtool_op_get_link,
818 .get_ts_info = ethtool_op_get_ts_info,
818}; 819};
819 820
820static const struct net_device_ops dnet_netdev_ops = { 821static const struct net_device_ops dnet_netdev_ops = {
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 9576ac002c23..ff4eb8fe25d5 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -33,7 +33,7 @@
33 33
34#include "be_hw.h" 34#include "be_hw.h"
35 35
36#define DRV_VER "4.2.116u" 36#define DRV_VER "4.2.220u"
37#define DRV_NAME "be2net" 37#define DRV_NAME "be2net"
38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" 39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
@@ -162,6 +162,11 @@ static inline void queue_head_inc(struct be_queue_info *q)
162 index_inc(&q->head, q->len); 162 index_inc(&q->head, q->len);
163} 163}
164 164
165static inline void index_dec(u16 *index, u16 limit)
166{
167 *index = MODULO((*index - 1), limit);
168}
169
165static inline void queue_tail_inc(struct be_queue_info *q) 170static inline void queue_tail_inc(struct be_queue_info *q)
166{ 171{
167 index_inc(&q->tail, q->len); 172 index_inc(&q->tail, q->len);
@@ -308,11 +313,33 @@ struct be_vf_cfg {
308 u32 tx_rate; 313 u32 tx_rate;
309}; 314};
310 315
316enum vf_state {
317 ENABLED = 0,
318 ASSIGNED = 1
319};
320
311#define BE_FLAGS_LINK_STATUS_INIT 1 321#define BE_FLAGS_LINK_STATUS_INIT 1
312#define BE_FLAGS_WORKER_SCHEDULED (1 << 3) 322#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
313#define BE_UC_PMAC_COUNT 30 323#define BE_UC_PMAC_COUNT 30
314#define BE_VF_UC_PMAC_COUNT 2 324#define BE_VF_UC_PMAC_COUNT 2
315 325
326struct phy_info {
327 u8 transceiver;
328 u8 autoneg;
329 u8 fc_autoneg;
330 u8 port_type;
331 u16 phy_type;
332 u16 interface_type;
333 u32 misc_params;
334 u16 auto_speeds_supported;
335 u16 fixed_speeds_supported;
336 int link_speed;
337 int forced_port_speed;
338 u32 dac_cable_len;
339 u32 advertising;
340 u32 supported;
341};
342
316struct be_adapter { 343struct be_adapter {
317 struct pci_dev *pdev; 344 struct pci_dev *pdev;
318 struct net_device *netdev; 345 struct net_device *netdev;
@@ -377,29 +404,30 @@ struct be_adapter {
377 u32 rx_fc; /* Rx flow control */ 404 u32 rx_fc; /* Rx flow control */
378 u32 tx_fc; /* Tx flow control */ 405 u32 tx_fc; /* Tx flow control */
379 bool stats_cmd_sent; 406 bool stats_cmd_sent;
380 int link_speed;
381 u8 port_type;
382 u8 transceiver;
383 u8 autoneg;
384 u8 generation; /* BladeEngine ASIC generation */ 407 u8 generation; /* BladeEngine ASIC generation */
385 u32 flash_status; 408 u32 flash_status;
386 struct completion flash_compl; 409 struct completion flash_compl;
387 410
388 u32 num_vfs; 411 u32 num_vfs; /* Number of VFs provisioned by PF driver */
389 u8 is_virtfn; 412 u32 dev_num_vfs; /* Number of VFs supported by HW */
413 u8 virtfn;
390 struct be_vf_cfg *vf_cfg; 414 struct be_vf_cfg *vf_cfg;
391 bool be3_native; 415 bool be3_native;
392 u32 sli_family; 416 u32 sli_family;
393 u8 hba_port_num; 417 u8 hba_port_num;
394 u16 pvid; 418 u16 pvid;
419 struct phy_info phy;
395 u8 wol_cap; 420 u8 wol_cap;
396 bool wol; 421 bool wol;
397 u32 max_pmac_cnt; /* Max secondary UC MACs programmable */ 422 u32 max_pmac_cnt; /* Max secondary UC MACs programmable */
398 u32 uc_macs; /* Count of secondary UC MAC programmed */ 423 u32 uc_macs; /* Count of secondary UC MAC programmed */
424 u32 msg_enable;
399}; 425};
400 426
401#define be_physfn(adapter) (!adapter->is_virtfn) 427#define be_physfn(adapter) (!adapter->virtfn)
402#define sriov_enabled(adapter) (adapter->num_vfs > 0) 428#define sriov_enabled(adapter) (adapter->num_vfs > 0)
429#define sriov_want(adapter) (adapter->dev_num_vfs && num_vfs && \
430 be_physfn(adapter))
403#define for_all_vfs(adapter, vf_cfg, i) \ 431#define for_all_vfs(adapter, vf_cfg, i) \
404 for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \ 432 for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
405 i++, vf_cfg++) 433 i++, vf_cfg++)
@@ -528,14 +556,6 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
528 return val; 556 return val;
529} 557}
530 558
531static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
532{
533 u32 sli_intf;
534
535 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
536 adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
537}
538
539static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac) 559static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
540{ 560{
541 u32 addr; 561 u32 addr;
@@ -583,4 +603,7 @@ extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
583extern void be_parse_stats(struct be_adapter *adapter); 603extern void be_parse_stats(struct be_adapter *adapter);
584extern int be_load_fw(struct be_adapter *adapter, u8 *func); 604extern int be_load_fw(struct be_adapter *adapter, u8 *func);
585extern bool be_is_wol_supported(struct be_adapter *adapter); 605extern bool be_is_wol_supported(struct be_adapter *adapter);
606extern bool be_pause_supported(struct be_adapter *adapter);
607extern u32 be_get_fw_log_level(struct be_adapter *adapter);
608
586#endif /* BE_H */ 609#endif /* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 67b030d72df1..b24623cce07b 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -61,10 +61,21 @@ static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
61 compl->flags = 0; 61 compl->flags = 0;
62} 62}
63 63
64static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
65{
66 unsigned long addr;
67
68 addr = tag1;
69 addr = ((addr << 16) << 16) | tag0;
70 return (void *)addr;
71}
72
64static int be_mcc_compl_process(struct be_adapter *adapter, 73static int be_mcc_compl_process(struct be_adapter *adapter,
65 struct be_mcc_compl *compl) 74 struct be_mcc_compl *compl)
66{ 75{
67 u16 compl_status, extd_status; 76 u16 compl_status, extd_status;
77 struct be_cmd_resp_hdr *resp_hdr;
78 u8 opcode = 0, subsystem = 0;
68 79
69 /* Just swap the status to host endian; mcc tag is opaquely copied 80 /* Just swap the status to host endian; mcc tag is opaquely copied
70 * from mcc_wrb */ 81 * from mcc_wrb */
@@ -73,32 +84,36 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
73 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & 84 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
74 CQE_STATUS_COMPL_MASK; 85 CQE_STATUS_COMPL_MASK;
75 86
76 if (((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) || 87 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
77 (compl->tag0 == OPCODE_COMMON_WRITE_OBJECT)) && 88
78 (compl->tag1 == CMD_SUBSYSTEM_COMMON)) { 89 if (resp_hdr) {
90 opcode = resp_hdr->opcode;
91 subsystem = resp_hdr->subsystem;
92 }
93
94 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
95 (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
96 (subsystem == CMD_SUBSYSTEM_COMMON)) {
79 adapter->flash_status = compl_status; 97 adapter->flash_status = compl_status;
80 complete(&adapter->flash_compl); 98 complete(&adapter->flash_compl);
81 } 99 }
82 100
83 if (compl_status == MCC_STATUS_SUCCESS) { 101 if (compl_status == MCC_STATUS_SUCCESS) {
84 if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) || 102 if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
85 (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) && 103 (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
86 (compl->tag1 == CMD_SUBSYSTEM_ETH)) { 104 (subsystem == CMD_SUBSYSTEM_ETH)) {
87 be_parse_stats(adapter); 105 be_parse_stats(adapter);
88 adapter->stats_cmd_sent = false; 106 adapter->stats_cmd_sent = false;
89 } 107 }
90 if (compl->tag0 == 108 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
91 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) { 109 subsystem == CMD_SUBSYSTEM_COMMON) {
92 struct be_mcc_wrb *mcc_wrb =
93 queue_index_node(&adapter->mcc_obj.q,
94 compl->tag1);
95 struct be_cmd_resp_get_cntl_addnl_attribs *resp = 110 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
96 embedded_payload(mcc_wrb); 111 (void *)resp_hdr;
97 adapter->drv_stats.be_on_die_temperature = 112 adapter->drv_stats.be_on_die_temperature =
98 resp->on_die_temperature; 113 resp->on_die_temperature;
99 } 114 }
100 } else { 115 } else {
101 if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) 116 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
102 be_get_temp_freq = 0; 117 be_get_temp_freq = 0;
103 118
104 if (compl_status == MCC_STATUS_NOT_SUPPORTED || 119 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
@@ -108,13 +123,13 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
108 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { 123 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
109 dev_warn(&adapter->pdev->dev, "This domain(VM) is not " 124 dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
110 "permitted to execute this cmd (opcode %d)\n", 125 "permitted to execute this cmd (opcode %d)\n",
111 compl->tag0); 126 opcode);
112 } else { 127 } else {
113 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & 128 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
114 CQE_STATUS_EXTD_MASK; 129 CQE_STATUS_EXTD_MASK;
115 dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:" 130 dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
116 "status %d, extd-status %d\n", 131 "status %d, extd-status %d\n",
117 compl->tag0, compl_status, extd_status); 132 opcode, compl_status, extd_status);
118 } 133 }
119 } 134 }
120done: 135done:
@@ -126,7 +141,7 @@ static void be_async_link_state_process(struct be_adapter *adapter,
126 struct be_async_event_link_state *evt) 141 struct be_async_event_link_state *evt)
127{ 142{
128 /* When link status changes, link speed must be re-queried from FW */ 143 /* When link status changes, link speed must be re-queried from FW */
129 adapter->link_speed = -1; 144 adapter->phy.link_speed = -1;
130 145
131 /* For the initial link status do not rely on the ASYNC event as 146 /* For the initial link status do not rely on the ASYNC event as
132 * it may not be received in some cases. 147 * it may not be received in some cases.
@@ -153,7 +168,7 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
153{ 168{
154 if (evt->physical_port == adapter->port_num) { 169 if (evt->physical_port == adapter->port_num) {
155 /* qos_link_speed is in units of 10 Mbps */ 170 /* qos_link_speed is in units of 10 Mbps */
156 adapter->link_speed = evt->qos_link_speed * 10; 171 adapter->phy.link_speed = evt->qos_link_speed * 10;
157 } 172 }
158} 173}
159 174
@@ -286,7 +301,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
286 if (i == mcc_timeout) { 301 if (i == mcc_timeout) {
287 dev_err(&adapter->pdev->dev, "FW not responding\n"); 302 dev_err(&adapter->pdev->dev, "FW not responding\n");
288 adapter->fw_timeout = true; 303 adapter->fw_timeout = true;
289 return -1; 304 return -EIO;
290 } 305 }
291 return status; 306 return status;
292} 307}
@@ -294,8 +309,26 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
294/* Notify MCC requests and wait for completion */ 309/* Notify MCC requests and wait for completion */
295static int be_mcc_notify_wait(struct be_adapter *adapter) 310static int be_mcc_notify_wait(struct be_adapter *adapter)
296{ 311{
312 int status;
313 struct be_mcc_wrb *wrb;
314 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
315 u16 index = mcc_obj->q.head;
316 struct be_cmd_resp_hdr *resp;
317
318 index_dec(&index, mcc_obj->q.len);
319 wrb = queue_index_node(&mcc_obj->q, index);
320
321 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
322
297 be_mcc_notify(adapter); 323 be_mcc_notify(adapter);
298 return be_mcc_wait_compl(adapter); 324
325 status = be_mcc_wait_compl(adapter);
326 if (status == -EIO)
327 goto out;
328
329 status = resp->status;
330out:
331 return status;
299} 332}
300 333
301static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) 334static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
@@ -435,14 +468,17 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
435 struct be_mcc_wrb *wrb, struct be_dma_mem *mem) 468 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
436{ 469{
437 struct be_sge *sge; 470 struct be_sge *sge;
471 unsigned long addr = (unsigned long)req_hdr;
472 u64 req_addr = addr;
438 473
439 req_hdr->opcode = opcode; 474 req_hdr->opcode = opcode;
440 req_hdr->subsystem = subsystem; 475 req_hdr->subsystem = subsystem;
441 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); 476 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
442 req_hdr->version = 0; 477 req_hdr->version = 0;
443 478
444 wrb->tag0 = opcode; 479 wrb->tag0 = req_addr & 0xFFFFFFFF;
445 wrb->tag1 = subsystem; 480 wrb->tag1 = upper_32_bits(req_addr);
481
446 wrb->payload_length = cmd_len; 482 wrb->payload_length = cmd_len;
447 if (mem) { 483 if (mem) {
448 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) << 484 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
@@ -1221,7 +1257,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1221 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb, 1257 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1222 nonemb_cmd); 1258 nonemb_cmd);
1223 1259
1224 req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num); 1260 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1225 req->cmd_params.params.reset_stats = 0; 1261 req->cmd_params.params.reset_stats = 0;
1226 1262
1227 be_mcc_notify(adapter); 1263 be_mcc_notify(adapter);
@@ -1283,13 +1319,10 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
1283{ 1319{
1284 struct be_mcc_wrb *wrb; 1320 struct be_mcc_wrb *wrb;
1285 struct be_cmd_req_get_cntl_addnl_attribs *req; 1321 struct be_cmd_req_get_cntl_addnl_attribs *req;
1286 u16 mccq_index;
1287 int status; 1322 int status;
1288 1323
1289 spin_lock_bh(&adapter->mcc_lock); 1324 spin_lock_bh(&adapter->mcc_lock);
1290 1325
1291 mccq_index = adapter->mcc_obj.q.head;
1292
1293 wrb = wrb_from_mccq(adapter); 1326 wrb = wrb_from_mccq(adapter);
1294 if (!wrb) { 1327 if (!wrb) {
1295 status = -EBUSY; 1328 status = -EBUSY;
@@ -1301,8 +1334,6 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
1301 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req), 1334 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1302 wrb, NULL); 1335 wrb, NULL);
1303 1336
1304 wrb->tag1 = mccq_index;
1305
1306 be_mcc_notify(adapter); 1337 be_mcc_notify(adapter);
1307 1338
1308err: 1339err:
@@ -1824,18 +1855,16 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1824 spin_unlock_bh(&adapter->mcc_lock); 1855 spin_unlock_bh(&adapter->mcc_lock);
1825 1856
1826 if (!wait_for_completion_timeout(&adapter->flash_compl, 1857 if (!wait_for_completion_timeout(&adapter->flash_compl,
1827 msecs_to_jiffies(12000))) 1858 msecs_to_jiffies(30000)))
1828 status = -1; 1859 status = -1;
1829 else 1860 else
1830 status = adapter->flash_status; 1861 status = adapter->flash_status;
1831 1862
1832 resp = embedded_payload(wrb); 1863 resp = embedded_payload(wrb);
1833 if (!status) { 1864 if (!status)
1834 *data_written = le32_to_cpu(resp->actual_write_len); 1865 *data_written = le32_to_cpu(resp->actual_write_len);
1835 } else { 1866 else
1836 *addn_status = resp->additional_status; 1867 *addn_status = resp->additional_status;
1837 status = resp->status;
1838 }
1839 1868
1840 return status; 1869 return status;
1841 1870
@@ -1950,7 +1979,7 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1950 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1979 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1951 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL); 1980 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL);
1952 1981
1953 req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT); 1982 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
1954 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 1983 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1955 req->params.offset = cpu_to_le32(offset); 1984 req->params.offset = cpu_to_le32(offset);
1956 req->params.data_buf_size = cpu_to_le32(0x4); 1985 req->params.data_buf_size = cpu_to_le32(0x4);
@@ -2136,8 +2165,7 @@ err:
2136 return status; 2165 return status;
2137} 2166}
2138 2167
2139int be_cmd_get_phy_info(struct be_adapter *adapter, 2168int be_cmd_get_phy_info(struct be_adapter *adapter)
2140 struct be_phy_info *phy_info)
2141{ 2169{
2142 struct be_mcc_wrb *wrb; 2170 struct be_mcc_wrb *wrb;
2143 struct be_cmd_req_get_phy_info *req; 2171 struct be_cmd_req_get_phy_info *req;
@@ -2170,9 +2198,15 @@ int be_cmd_get_phy_info(struct be_adapter *adapter,
2170 if (!status) { 2198 if (!status) {
2171 struct be_phy_info *resp_phy_info = 2199 struct be_phy_info *resp_phy_info =
2172 cmd.va + sizeof(struct be_cmd_req_hdr); 2200 cmd.va + sizeof(struct be_cmd_req_hdr);
2173 phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type); 2201 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2174 phy_info->interface_type = 2202 adapter->phy.interface_type =
2175 le16_to_cpu(resp_phy_info->interface_type); 2203 le16_to_cpu(resp_phy_info->interface_type);
2204 adapter->phy.auto_speeds_supported =
2205 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2206 adapter->phy.fixed_speeds_supported =
2207 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2208 adapter->phy.misc_params =
2209 le32_to_cpu(resp_phy_info->misc_params);
2176 } 2210 }
2177 pci_free_consistent(adapter->pdev, cmd.size, 2211 pci_free_consistent(adapter->pdev, cmd.size,
2178 cmd.va, cmd.dma); 2212 cmd.va, cmd.dma);
@@ -2555,4 +2589,60 @@ err:
2555 mutex_unlock(&adapter->mbox_lock); 2589 mutex_unlock(&adapter->mbox_lock);
2556 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 2590 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2557 return status; 2591 return status;
2592
2593}
2594int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
2595 struct be_dma_mem *cmd)
2596{
2597 struct be_mcc_wrb *wrb;
2598 struct be_cmd_req_get_ext_fat_caps *req;
2599 int status;
2600
2601 if (mutex_lock_interruptible(&adapter->mbox_lock))
2602 return -1;
2603
2604 wrb = wrb_from_mbox(adapter);
2605 if (!wrb) {
2606 status = -EBUSY;
2607 goto err;
2608 }
2609
2610 req = cmd->va;
2611 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2612 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
2613 cmd->size, wrb, cmd);
2614 req->parameter_type = cpu_to_le32(1);
2615
2616 status = be_mbox_notify_wait(adapter);
2617err:
2618 mutex_unlock(&adapter->mbox_lock);
2619 return status;
2620}
2621
2622int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
2623 struct be_dma_mem *cmd,
2624 struct be_fat_conf_params *configs)
2625{
2626 struct be_mcc_wrb *wrb;
2627 struct be_cmd_req_set_ext_fat_caps *req;
2628 int status;
2629
2630 spin_lock_bh(&adapter->mcc_lock);
2631
2632 wrb = wrb_from_mccq(adapter);
2633 if (!wrb) {
2634 status = -EBUSY;
2635 goto err;
2636 }
2637
2638 req = cmd->va;
2639 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
2640 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2641 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
2642 cmd->size, wrb, cmd);
2643
2644 status = be_mcc_notify_wait(adapter);
2645err:
2646 spin_unlock_bh(&adapter->mcc_lock);
2647 return status;
2558} 2648}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d5b680c56af0..0b1029b60f69 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -189,6 +189,8 @@ struct be_mcc_mailbox {
189#define OPCODE_COMMON_GET_PHY_DETAILS 102 189#define OPCODE_COMMON_GET_PHY_DETAILS 102
190#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103 190#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
191#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121 191#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
192#define OPCODE_COMMON_GET_EXT_FAT_CAPABILITES 125
193#define OPCODE_COMMON_SET_EXT_FAT_CAPABILITES 126
192#define OPCODE_COMMON_GET_MAC_LIST 147 194#define OPCODE_COMMON_GET_MAC_LIST 147
193#define OPCODE_COMMON_SET_MAC_LIST 148 195#define OPCODE_COMMON_SET_MAC_LIST 148
194#define OPCODE_COMMON_GET_HSW_CONFIG 152 196#define OPCODE_COMMON_GET_HSW_CONFIG 152
@@ -225,8 +227,12 @@ struct be_cmd_req_hdr {
225#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */ 227#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
226#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */ 228#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */
227struct be_cmd_resp_hdr { 229struct be_cmd_resp_hdr {
228 u32 info; /* dword 0 */ 230 u8 opcode; /* dword 0 */
229 u32 status; /* dword 1 */ 231 u8 subsystem; /* dword 0 */
232 u8 rsvd[2]; /* dword 0 */
233 u8 status; /* dword 1 */
234 u8 add_status; /* dword 1 */
235 u8 rsvd1[2]; /* dword 1 */
230 u32 response_length; /* dword 2 */ 236 u32 response_length; /* dword 2 */
231 u32 actual_resp_len; /* dword 3 */ 237 u32 actual_resp_len; /* dword 3 */
232}; 238};
@@ -1309,9 +1315,36 @@ enum {
1309 PHY_TYPE_KX4_10GB, 1315 PHY_TYPE_KX4_10GB,
1310 PHY_TYPE_BASET_10GB, 1316 PHY_TYPE_BASET_10GB,
1311 PHY_TYPE_BASET_1GB, 1317 PHY_TYPE_BASET_1GB,
1318 PHY_TYPE_BASEX_1GB,
1319 PHY_TYPE_SGMII,
1312 PHY_TYPE_DISABLED = 255 1320 PHY_TYPE_DISABLED = 255
1313}; 1321};
1314 1322
1323#define BE_SUPPORTED_SPEED_NONE 0
1324#define BE_SUPPORTED_SPEED_10MBPS 1
1325#define BE_SUPPORTED_SPEED_100MBPS 2
1326#define BE_SUPPORTED_SPEED_1GBPS 4
1327#define BE_SUPPORTED_SPEED_10GBPS 8
1328
1329#define BE_AN_EN 0x2
1330#define BE_PAUSE_SYM_EN 0x80
1331
1332/* MAC speed valid values */
1333#define SPEED_DEFAULT 0x0
1334#define SPEED_FORCED_10GB 0x1
1335#define SPEED_FORCED_1GB 0x2
1336#define SPEED_AUTONEG_10GB 0x3
1337#define SPEED_AUTONEG_1GB 0x4
1338#define SPEED_AUTONEG_100MB 0x5
1339#define SPEED_AUTONEG_10GB_1GB 0x6
1340#define SPEED_AUTONEG_10GB_1GB_100MB 0x7
1341#define SPEED_AUTONEG_1GB_100MB 0x8
1342#define SPEED_AUTONEG_10MB 0x9
1343#define SPEED_AUTONEG_1GB_100MB_10MB 0xa
1344#define SPEED_AUTONEG_100MB_10MB 0xb
1345#define SPEED_FORCED_100MB 0xc
1346#define SPEED_FORCED_10MB 0xd
1347
1315struct be_cmd_req_get_phy_info { 1348struct be_cmd_req_get_phy_info {
1316 struct be_cmd_req_hdr hdr; 1349 struct be_cmd_req_hdr hdr;
1317 u8 rsvd0[24]; 1350 u8 rsvd0[24];
@@ -1321,7 +1354,11 @@ struct be_phy_info {
1321 u16 phy_type; 1354 u16 phy_type;
1322 u16 interface_type; 1355 u16 interface_type;
1323 u32 misc_params; 1356 u32 misc_params;
1324 u32 future_use[4]; 1357 u16 ext_phy_details;
1358 u16 rsvd;
1359 u16 auto_speeds_supported;
1360 u16 fixed_speeds_supported;
1361 u32 future_use[2];
1325}; 1362};
1326 1363
1327struct be_cmd_resp_get_phy_info { 1364struct be_cmd_resp_get_phy_info {
@@ -1567,6 +1604,56 @@ static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
1567 } 1604 }
1568} 1605}
1569 1606
1607
1608/************** get fat capabilites *******************/
1609#define MAX_MODULES 27
1610#define MAX_MODES 4
1611#define MODE_UART 0
1612#define FW_LOG_LEVEL_DEFAULT 48
1613#define FW_LOG_LEVEL_FATAL 64
1614
1615struct ext_fat_mode {
1616 u8 mode;
1617 u8 rsvd0;
1618 u16 port_mask;
1619 u32 dbg_lvl;
1620 u64 fun_mask;
1621} __packed;
1622
1623struct ext_fat_modules {
1624 u8 modules_str[32];
1625 u32 modules_id;
1626 u32 num_modes;
1627 struct ext_fat_mode trace_lvl[MAX_MODES];
1628} __packed;
1629
1630struct be_fat_conf_params {
1631 u32 max_log_entries;
1632 u32 log_entry_size;
1633 u8 log_type;
1634 u8 max_log_funs;
1635 u8 max_log_ports;
1636 u8 rsvd0;
1637 u32 supp_modes;
1638 u32 num_modules;
1639 struct ext_fat_modules module[MAX_MODULES];
1640} __packed;
1641
1642struct be_cmd_req_get_ext_fat_caps {
1643 struct be_cmd_req_hdr hdr;
1644 u32 parameter_type;
1645};
1646
1647struct be_cmd_resp_get_ext_fat_caps {
1648 struct be_cmd_resp_hdr hdr;
1649 struct be_fat_conf_params get_params;
1650};
1651
1652struct be_cmd_req_set_ext_fat_caps {
1653 struct be_cmd_req_hdr hdr;
1654 struct be_fat_conf_params set_params;
1655};
1656
1570extern int be_pci_fnum_get(struct be_adapter *adapter); 1657extern int be_pci_fnum_get(struct be_adapter *adapter);
1571extern int be_cmd_POST(struct be_adapter *adapter); 1658extern int be_cmd_POST(struct be_adapter *adapter);
1572extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 1659extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -1655,8 +1742,7 @@ extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1655 struct be_dma_mem *nonemb_cmd); 1742 struct be_dma_mem *nonemb_cmd);
1656extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 1743extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1657 u8 loopback_type, u8 enable); 1744 u8 loopback_type, u8 enable);
1658extern int be_cmd_get_phy_info(struct be_adapter *adapter, 1745extern int be_cmd_get_phy_info(struct be_adapter *adapter);
1659 struct be_phy_info *phy_info);
1660extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); 1746extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
1661extern void be_detect_dump_ue(struct be_adapter *adapter); 1747extern void be_detect_dump_ue(struct be_adapter *adapter);
1662extern int be_cmd_get_die_temperature(struct be_adapter *adapter); 1748extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
@@ -1673,4 +1759,9 @@ extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
1673extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, 1759extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
1674 u32 domain, u16 intf_id); 1760 u32 domain, u16 intf_id);
1675extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter); 1761extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
1762extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
1763 struct be_dma_mem *cmd);
1764extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
1765 struct be_dma_mem *cmd,
1766 struct be_fat_conf_params *cfgs);
1676 1767
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index c1ff73cb0e62..63e51d476900 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -433,102 +433,193 @@ static int be_get_sset_count(struct net_device *netdev, int stringset)
433 } 433 }
434} 434}
435 435
436static u32 be_get_port_type(u32 phy_type, u32 dac_cable_len)
437{
438 u32 port;
439
440 switch (phy_type) {
441 case PHY_TYPE_BASET_1GB:
442 case PHY_TYPE_BASEX_1GB:
443 case PHY_TYPE_SGMII:
444 port = PORT_TP;
445 break;
446 case PHY_TYPE_SFP_PLUS_10GB:
447 port = dac_cable_len ? PORT_DA : PORT_FIBRE;
448 break;
449 case PHY_TYPE_XFP_10GB:
450 case PHY_TYPE_SFP_1GB:
451 port = PORT_FIBRE;
452 break;
453 case PHY_TYPE_BASET_10GB:
454 port = PORT_TP;
455 break;
456 default:
457 port = PORT_OTHER;
458 }
459
460 return port;
461}
462
463static u32 convert_to_et_setting(u32 if_type, u32 if_speeds)
464{
465 u32 val = 0;
466
467 switch (if_type) {
468 case PHY_TYPE_BASET_1GB:
469 case PHY_TYPE_BASEX_1GB:
470 case PHY_TYPE_SGMII:
471 val |= SUPPORTED_TP;
472 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
473 val |= SUPPORTED_1000baseT_Full;
474 if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
475 val |= SUPPORTED_100baseT_Full;
476 if (if_speeds & BE_SUPPORTED_SPEED_10MBPS)
477 val |= SUPPORTED_10baseT_Full;
478 break;
479 case PHY_TYPE_KX4_10GB:
480 val |= SUPPORTED_Backplane;
481 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
482 val |= SUPPORTED_1000baseKX_Full;
483 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
484 val |= SUPPORTED_10000baseKX4_Full;
485 break;
486 case PHY_TYPE_KR_10GB:
487 val |= SUPPORTED_Backplane |
488 SUPPORTED_10000baseKR_Full;
489 break;
490 case PHY_TYPE_SFP_PLUS_10GB:
491 case PHY_TYPE_XFP_10GB:
492 case PHY_TYPE_SFP_1GB:
493 val |= SUPPORTED_FIBRE;
494 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
495 val |= SUPPORTED_10000baseT_Full;
496 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
497 val |= SUPPORTED_1000baseT_Full;
498 break;
499 case PHY_TYPE_BASET_10GB:
500 val |= SUPPORTED_TP;
501 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
502 val |= SUPPORTED_10000baseT_Full;
503 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
504 val |= SUPPORTED_1000baseT_Full;
505 if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
506 val |= SUPPORTED_100baseT_Full;
507 break;
508 default:
509 val |= SUPPORTED_TP;
510 }
511
512 return val;
513}
514
515static int convert_to_et_speed(u32 be_speed)
516{
517 int et_speed = SPEED_10000;
518
519 switch (be_speed) {
520 case PHY_LINK_SPEED_10MBPS:
521 et_speed = SPEED_10;
522 break;
523 case PHY_LINK_SPEED_100MBPS:
524 et_speed = SPEED_100;
525 break;
526 case PHY_LINK_SPEED_1GBPS:
527 et_speed = SPEED_1000;
528 break;
529 case PHY_LINK_SPEED_10GBPS:
530 et_speed = SPEED_10000;
531 break;
532 }
533
534 return et_speed;
535}
536
537bool be_pause_supported(struct be_adapter *adapter)
538{
539 return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB ||
540 adapter->phy.interface_type == PHY_TYPE_XFP_10GB) ?
541 false : true;
542}
543
436static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 544static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
437{ 545{
438 struct be_adapter *adapter = netdev_priv(netdev); 546 struct be_adapter *adapter = netdev_priv(netdev);
439 struct be_phy_info phy_info; 547 u8 port_speed = 0;
440 u8 mac_speed = 0;
441 u16 link_speed = 0; 548 u16 link_speed = 0;
442 u8 link_status; 549 u8 link_status;
550 u32 et_speed = 0;
443 int status; 551 int status;
444 552
445 if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) { 553 if (adapter->phy.link_speed < 0 || !(netdev->flags & IFF_UP)) {
446 status = be_cmd_link_status_query(adapter, &mac_speed, 554 if (adapter->phy.forced_port_speed < 0) {
447 &link_speed, &link_status, 0); 555 status = be_cmd_link_status_query(adapter, &port_speed,
448 if (!status) 556 &link_speed, &link_status, 0);
449 be_link_status_update(adapter, link_status); 557 if (!status)
450 558 be_link_status_update(adapter, link_status);
451 /* link_speed is in units of 10 Mbps */ 559 if (link_speed)
452 if (link_speed) { 560 et_speed = link_speed * 10;
453 ethtool_cmd_speed_set(ecmd, link_speed*10); 561 else if (link_status)
562 et_speed = convert_to_et_speed(port_speed);
454 } else { 563 } else {
455 switch (mac_speed) { 564 et_speed = adapter->phy.forced_port_speed;
456 case PHY_LINK_SPEED_10MBPS:
457 ethtool_cmd_speed_set(ecmd, SPEED_10);
458 break;
459 case PHY_LINK_SPEED_100MBPS:
460 ethtool_cmd_speed_set(ecmd, SPEED_100);
461 break;
462 case PHY_LINK_SPEED_1GBPS:
463 ethtool_cmd_speed_set(ecmd, SPEED_1000);
464 break;
465 case PHY_LINK_SPEED_10GBPS:
466 ethtool_cmd_speed_set(ecmd, SPEED_10000);
467 break;
468 case PHY_LINK_SPEED_ZERO:
469 ethtool_cmd_speed_set(ecmd, 0);
470 break;
471 }
472 } 565 }
473 566
474 status = be_cmd_get_phy_info(adapter, &phy_info); 567 ethtool_cmd_speed_set(ecmd, et_speed);
475 if (!status) { 568
476 switch (phy_info.interface_type) { 569 status = be_cmd_get_phy_info(adapter);
477 case PHY_TYPE_XFP_10GB: 570 if (status)
478 case PHY_TYPE_SFP_1GB: 571 return status;
479 case PHY_TYPE_SFP_PLUS_10GB: 572
480 ecmd->port = PORT_FIBRE; 573 ecmd->supported =
481 break; 574 convert_to_et_setting(adapter->phy.interface_type,
482 default: 575 adapter->phy.auto_speeds_supported |
483 ecmd->port = PORT_TP; 576 adapter->phy.fixed_speeds_supported);
484 break; 577 ecmd->advertising =
485 } 578 convert_to_et_setting(adapter->phy.interface_type,
579 adapter->phy.auto_speeds_supported);
580
581 ecmd->port = be_get_port_type(adapter->phy.interface_type,
582 adapter->phy.dac_cable_len);
583
584 if (adapter->phy.auto_speeds_supported) {
585 ecmd->supported |= SUPPORTED_Autoneg;
586 ecmd->autoneg = AUTONEG_ENABLE;
587 ecmd->advertising |= ADVERTISED_Autoneg;
588 }
486 589
487 switch (phy_info.interface_type) { 590 if (be_pause_supported(adapter)) {
488 case PHY_TYPE_KR_10GB: 591 ecmd->supported |= SUPPORTED_Pause;
489 case PHY_TYPE_KX4_10GB: 592 ecmd->advertising |= ADVERTISED_Pause;
490 ecmd->autoneg = AUTONEG_ENABLE; 593 }
594
595 switch (adapter->phy.interface_type) {
596 case PHY_TYPE_KR_10GB:
597 case PHY_TYPE_KX4_10GB:
491 ecmd->transceiver = XCVR_INTERNAL; 598 ecmd->transceiver = XCVR_INTERNAL;
492 break; 599 break;
493 default: 600 default:
494 ecmd->autoneg = AUTONEG_DISABLE; 601 ecmd->transceiver = XCVR_EXTERNAL;
495 ecmd->transceiver = XCVR_EXTERNAL; 602 break;
496 break;
497 }
498 } 603 }
499 604
500 /* Save for future use */ 605 /* Save for future use */
501 adapter->link_speed = ethtool_cmd_speed(ecmd); 606 adapter->phy.link_speed = ethtool_cmd_speed(ecmd);
502 adapter->port_type = ecmd->port; 607 adapter->phy.port_type = ecmd->port;
503 adapter->transceiver = ecmd->transceiver; 608 adapter->phy.transceiver = ecmd->transceiver;
504 adapter->autoneg = ecmd->autoneg; 609 adapter->phy.autoneg = ecmd->autoneg;
610 adapter->phy.advertising = ecmd->advertising;
611 adapter->phy.supported = ecmd->supported;
505 } else { 612 } else {
506 ethtool_cmd_speed_set(ecmd, adapter->link_speed); 613 ethtool_cmd_speed_set(ecmd, adapter->phy.link_speed);
507 ecmd->port = adapter->port_type; 614 ecmd->port = adapter->phy.port_type;
508 ecmd->transceiver = adapter->transceiver; 615 ecmd->transceiver = adapter->phy.transceiver;
509 ecmd->autoneg = adapter->autoneg; 616 ecmd->autoneg = adapter->phy.autoneg;
617 ecmd->advertising = adapter->phy.advertising;
618 ecmd->supported = adapter->phy.supported;
510 } 619 }
511 620
512 ecmd->duplex = DUPLEX_FULL; 621 ecmd->duplex = netif_carrier_ok(netdev) ? DUPLEX_FULL : DUPLEX_UNKNOWN;
513 ecmd->phy_address = adapter->port_num; 622 ecmd->phy_address = adapter->port_num;
514 switch (ecmd->port) {
515 case PORT_FIBRE:
516 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
517 break;
518 case PORT_TP:
519 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
520 break;
521 case PORT_AUI:
522 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI);
523 break;
524 }
525
526 if (ecmd->autoneg) {
527 ecmd->supported |= SUPPORTED_1000baseT_Full;
528 ecmd->supported |= SUPPORTED_Autoneg;
529 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
530 ADVERTISED_1000baseT_Full);
531 }
532 623
533 return 0; 624 return 0;
534} 625}
@@ -548,7 +639,7 @@ be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
548 struct be_adapter *adapter = netdev_priv(netdev); 639 struct be_adapter *adapter = netdev_priv(netdev);
549 640
550 be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause); 641 be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
551 ecmd->autoneg = 0; 642 ecmd->autoneg = adapter->phy.fc_autoneg;
552} 643}
553 644
554static int 645static int
@@ -702,7 +793,7 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
702 } 793 }
703 } 794 }
704 795
705 if (be_test_ddr_dma(adapter) != 0) { 796 if (!lancer_chip(adapter) && be_test_ddr_dma(adapter) != 0) {
706 data[3] = 1; 797 data[3] = 1;
707 test->flags |= ETH_TEST_FL_FAILED; 798 test->flags |= ETH_TEST_FL_FAILED;
708 } 799 }
@@ -787,6 +878,81 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
787 return status; 878 return status;
788} 879}
789 880
881static u32 be_get_msg_level(struct net_device *netdev)
882{
883 struct be_adapter *adapter = netdev_priv(netdev);
884
885 if (lancer_chip(adapter)) {
886 dev_err(&adapter->pdev->dev, "Operation not supported\n");
887 return -EOPNOTSUPP;
888 }
889
890 return adapter->msg_enable;
891}
892
893static void be_set_fw_log_level(struct be_adapter *adapter, u32 level)
894{
895 struct be_dma_mem extfat_cmd;
896 struct be_fat_conf_params *cfgs;
897 int status;
898 int i, j;
899
900 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
901 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
902 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
903 &extfat_cmd.dma);
904 if (!extfat_cmd.va) {
905 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
906 __func__);
907 goto err;
908 }
909 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
910 if (!status) {
911 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
912 sizeof(struct be_cmd_resp_hdr));
913 for (i = 0; i < cfgs->num_modules; i++) {
914 for (j = 0; j < cfgs->module[i].num_modes; j++) {
915 if (cfgs->module[i].trace_lvl[j].mode ==
916 MODE_UART)
917 cfgs->module[i].trace_lvl[j].dbg_lvl =
918 cpu_to_le32(level);
919 }
920 }
921 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd,
922 cfgs);
923 if (status)
924 dev_err(&adapter->pdev->dev,
925 "Message level set failed\n");
926 } else {
927 dev_err(&adapter->pdev->dev, "Message level get failed\n");
928 }
929
930 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
931 extfat_cmd.dma);
932err:
933 return;
934}
935
936static void be_set_msg_level(struct net_device *netdev, u32 level)
937{
938 struct be_adapter *adapter = netdev_priv(netdev);
939
940 if (lancer_chip(adapter)) {
941 dev_err(&adapter->pdev->dev, "Operation not supported\n");
942 return;
943 }
944
945 if (adapter->msg_enable == level)
946 return;
947
948 if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW))
949 be_set_fw_log_level(adapter, level & NETIF_MSG_HW ?
950 FW_LOG_LEVEL_DEFAULT : FW_LOG_LEVEL_FATAL);
951 adapter->msg_enable = level;
952
953 return;
954}
955
790const struct ethtool_ops be_ethtool_ops = { 956const struct ethtool_ops be_ethtool_ops = {
791 .get_settings = be_get_settings, 957 .get_settings = be_get_settings,
792 .get_drvinfo = be_get_drvinfo, 958 .get_drvinfo = be_get_drvinfo,
@@ -802,6 +968,8 @@ const struct ethtool_ops be_ethtool_ops = {
802 .set_pauseparam = be_set_pauseparam, 968 .set_pauseparam = be_set_pauseparam,
803 .get_strings = be_get_stat_strings, 969 .get_strings = be_get_stat_strings,
804 .set_phys_id = be_set_phys_id, 970 .set_phys_id = be_set_phys_id,
971 .get_msglevel = be_get_msg_level,
972 .set_msglevel = be_set_msg_level,
805 .get_sset_count = be_get_sset_count, 973 .get_sset_count = be_get_sset_count,
806 .get_ethtool_stats = be_get_ethtool_stats, 974 .get_ethtool_stats = be_get_ethtool_stats,
807 .get_regs_len = be_get_reg_len, 975 .get_regs_len = be_get_reg_len,
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index f2c89e3ccabd..f38b58c8dbba 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -58,6 +58,8 @@
58 58
59#define SLI_PORT_CONTROL_IP_MASK 0x08000000 59#define SLI_PORT_CONTROL_IP_MASK 0x08000000
60 60
61#define PCICFG_CUST_SCRATCHPAD_CSR 0x1EC
62
61/********* Memory BAR register ************/ 63/********* Memory BAR register ************/
62#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc 64#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
63/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt 65/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
@@ -162,22 +164,23 @@
162#define QUERY_FAT 1 164#define QUERY_FAT 1
163 165
164/* Flashrom related descriptors */ 166/* Flashrom related descriptors */
167#define MAX_FLASH_COMP 32
165#define IMAGE_TYPE_FIRMWARE 160 168#define IMAGE_TYPE_FIRMWARE 160
166#define IMAGE_TYPE_BOOTCODE 224 169#define IMAGE_TYPE_BOOTCODE 224
167#define IMAGE_TYPE_OPTIONROM 32 170#define IMAGE_TYPE_OPTIONROM 32
168 171
169#define NUM_FLASHDIR_ENTRIES 32 172#define NUM_FLASHDIR_ENTRIES 32
170 173
171#define IMG_TYPE_ISCSI_ACTIVE 0 174#define OPTYPE_ISCSI_ACTIVE 0
172#define IMG_TYPE_REDBOOT 1 175#define OPTYPE_REDBOOT 1
173#define IMG_TYPE_BIOS 2 176#define OPTYPE_BIOS 2
174#define IMG_TYPE_PXE_BIOS 3 177#define OPTYPE_PXE_BIOS 3
175#define IMG_TYPE_FCOE_BIOS 8 178#define OPTYPE_FCOE_BIOS 8
176#define IMG_TYPE_ISCSI_BACKUP 9 179#define OPTYPE_ISCSI_BACKUP 9
177#define IMG_TYPE_FCOE_FW_ACTIVE 10 180#define OPTYPE_FCOE_FW_ACTIVE 10
178#define IMG_TYPE_FCOE_FW_BACKUP 11 181#define OPTYPE_FCOE_FW_BACKUP 11
179#define IMG_TYPE_NCSI_FW 13 182#define OPTYPE_NCSI_FW 13
180#define IMG_TYPE_PHY_FW 99 183#define OPTYPE_PHY_FW 99
181#define TN_8022 13 184#define TN_8022 13
182 185
183#define ILLEGAL_IOCTL_REQ 2 186#define ILLEGAL_IOCTL_REQ 2
@@ -223,6 +226,24 @@
223#define FLASH_REDBOOT_START_g3 (262144) 226#define FLASH_REDBOOT_START_g3 (262144)
224#define FLASH_PHY_FW_START_g3 1310720 227#define FLASH_PHY_FW_START_g3 1310720
225 228
229#define IMAGE_NCSI 16
230#define IMAGE_OPTION_ROM_PXE 32
231#define IMAGE_OPTION_ROM_FCoE 33
232#define IMAGE_OPTION_ROM_ISCSI 34
233#define IMAGE_FLASHISM_JUMPVECTOR 48
234#define IMAGE_FLASH_ISM 49
235#define IMAGE_JUMP_VECTOR 50
236#define IMAGE_FIRMWARE_iSCSI 160
237#define IMAGE_FIRMWARE_COMP_iSCSI 161
238#define IMAGE_FIRMWARE_FCoE 162
239#define IMAGE_FIRMWARE_COMP_FCoE 163
240#define IMAGE_FIRMWARE_BACKUP_iSCSI 176
241#define IMAGE_FIRMWARE_BACKUP_COMP_iSCSI 177
242#define IMAGE_FIRMWARE_BACKUP_FCoE 178
243#define IMAGE_FIRMWARE_BACKUP_COMP_FCoE 179
244#define IMAGE_FIRMWARE_PHY 192
245#define IMAGE_BOOT_CODE 224
246
226/************* Rx Packet Type Encoding **************/ 247/************* Rx Packet Type Encoding **************/
227#define BE_UNICAST_PACKET 0 248#define BE_UNICAST_PACKET 0
228#define BE_MULTICAST_PACKET 1 249#define BE_MULTICAST_PACKET 1
@@ -445,6 +466,7 @@ struct flash_comp {
445 unsigned long offset; 466 unsigned long offset;
446 int optype; 467 int optype;
447 int size; 468 int size;
469 int img_type;
448}; 470};
449 471
450struct image_hdr { 472struct image_hdr {
@@ -481,17 +503,19 @@ struct flash_section_hdr {
481 u32 format_rev; 503 u32 format_rev;
482 u32 cksum; 504 u32 cksum;
483 u32 antidote; 505 u32 antidote;
484 u32 build_no; 506 u32 num_images;
485 u8 id_string[64]; 507 u8 id_string[128];
486 u32 active_entry_mask; 508 u32 rsvd[4];
487 u32 valid_entry_mask; 509} __packed;
488 u32 org_content_mask; 510
489 u32 rsvd0; 511struct flash_section_hdr_g2 {
490 u32 rsvd1; 512 u32 format_rev;
491 u32 rsvd2; 513 u32 cksum;
492 u32 rsvd3; 514 u32 antidote;
493 u32 rsvd4; 515 u32 build_num;
494}; 516 u8 id_string[128];
517 u32 rsvd[8];
518} __packed;
495 519
496struct flash_section_entry { 520struct flash_section_entry {
497 u32 type; 521 u32 type;
@@ -503,10 +527,16 @@ struct flash_section_entry {
503 u32 rsvd0; 527 u32 rsvd0;
504 u32 rsvd1; 528 u32 rsvd1;
505 u8 ver_data[32]; 529 u8 ver_data[32];
506}; 530} __packed;
507 531
508struct flash_section_info { 532struct flash_section_info {
509 u8 cookie[32]; 533 u8 cookie[32];
510 struct flash_section_hdr fsec_hdr; 534 struct flash_section_hdr fsec_hdr;
511 struct flash_section_entry fsec_entry[32]; 535 struct flash_section_entry fsec_entry[32];
512}; 536} __packed;
537
538struct flash_section_info_g2 {
539 u8 cookie[32];
540 struct flash_section_hdr_g2 fsec_hdr;
541 struct flash_section_entry fsec_entry[32];
542} __packed;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 528a886bc2cd..081c77701168 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -421,6 +421,9 @@ void be_parse_stats(struct be_adapter *adapter)
421 populate_be2_stats(adapter); 421 populate_be2_stats(adapter);
422 } 422 }
423 423
424 if (lancer_chip(adapter))
425 goto done;
426
424 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */ 427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
425 for_all_rx_queues(adapter, rxo, i) { 428 for_all_rx_queues(adapter, rxo, i) {
426 /* below erx HW counter can actually wrap around after 429 /* below erx HW counter can actually wrap around after
@@ -429,6 +432,8 @@ void be_parse_stats(struct be_adapter *adapter)
429 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags, 432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
430 (u16)erx->rx_drops_no_fragments[rxo->q.id]); 433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
431 } 434 }
435done:
436 return;
432} 437}
433 438
434static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, 439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
@@ -797,22 +802,30 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
797 if (adapter->promiscuous) 802 if (adapter->promiscuous)
798 return 0; 803 return 0;
799 804
800 if (adapter->vlans_added <= adapter->max_vlans) { 805 if (adapter->vlans_added > adapter->max_vlans)
801 /* Construct VLAN Table to give to HW */ 806 goto set_vlan_promisc;
802 for (i = 0; i < VLAN_N_VID; i++) { 807
803 if (adapter->vlan_tag[i]) { 808 /* Construct VLAN Table to give to HW */
804 vtag[ntags] = cpu_to_le16(i); 809 for (i = 0; i < VLAN_N_VID; i++)
805 ntags++; 810 if (adapter->vlan_tag[i])
806 } 811 vtag[ntags++] = cpu_to_le16(i);
807 } 812
808 status = be_cmd_vlan_config(adapter, adapter->if_handle, 813 status = be_cmd_vlan_config(adapter, adapter->if_handle,
809 vtag, ntags, 1, 0); 814 vtag, ntags, 1, 0);
810 } else { 815
811 status = be_cmd_vlan_config(adapter, adapter->if_handle, 816 /* Set to VLAN promisc mode as setting VLAN filter failed */
812 NULL, 0, 1, 1); 817 if (status) {
818 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
819 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
820 goto set_vlan_promisc;
813 } 821 }
814 822
815 return status; 823 return status;
824
825set_vlan_promisc:
826 status = be_cmd_vlan_config(adapter, adapter->if_handle,
827 NULL, 0, 1, 1);
828 return status;
816} 829}
817 830
818static int be_vlan_add_vid(struct net_device *netdev, u16 vid) 831static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
@@ -862,6 +875,7 @@ ret:
862static void be_set_rx_mode(struct net_device *netdev) 875static void be_set_rx_mode(struct net_device *netdev)
863{ 876{
864 struct be_adapter *adapter = netdev_priv(netdev); 877 struct be_adapter *adapter = netdev_priv(netdev);
878 int status;
865 879
866 if (netdev->flags & IFF_PROMISC) { 880 if (netdev->flags & IFF_PROMISC) {
867 be_cmd_rx_filter(adapter, IFF_PROMISC, ON); 881 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
@@ -908,7 +922,14 @@ static void be_set_rx_mode(struct net_device *netdev)
908 } 922 }
909 } 923 }
910 924
911 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON); 925 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
926
927 /* Set to MCAST promisc mode if setting MULTICAST address fails */
928 if (status) {
929 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
930 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
931 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
932 }
912done: 933done:
913 return; 934 return;
914} 935}
@@ -1028,6 +1049,29 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
1028 return status; 1049 return status;
1029} 1050}
1030 1051
1052static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1053{
1054 struct pci_dev *dev, *pdev = adapter->pdev;
1055 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1056 u16 offset, stride;
1057
1058 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1059 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1060 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1061
1062 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1063 while (dev) {
1064 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1065 if (dev->is_virtfn && dev->devfn == vf_fn) {
1066 vfs++;
1067 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1068 assigned_vfs++;
1069 }
1070 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1071 }
1072 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1073}
1074
1031static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo) 1075static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1032{ 1076{
1033 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]); 1077 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
@@ -1238,6 +1282,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo,
1238 skb_checksum_none_assert(skb); 1282 skb_checksum_none_assert(skb);
1239 1283
1240 skb->protocol = eth_type_trans(skb, netdev); 1284 skb->protocol = eth_type_trans(skb, netdev);
1285 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1241 if (netdev->features & NETIF_F_RXHASH) 1286 if (netdev->features & NETIF_F_RXHASH)
1242 skb->rxhash = rxcp->rss_hash; 1287 skb->rxhash = rxcp->rss_hash;
1243 1288
@@ -1294,6 +1339,7 @@ void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1294 skb->len = rxcp->pkt_size; 1339 skb->len = rxcp->pkt_size;
1295 skb->data_len = rxcp->pkt_size; 1340 skb->data_len = rxcp->pkt_size;
1296 skb->ip_summed = CHECKSUM_UNNECESSARY; 1341 skb->ip_summed = CHECKSUM_UNNECESSARY;
1342 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1297 if (adapter->netdev->features & NETIF_F_RXHASH) 1343 if (adapter->netdev->features & NETIF_F_RXHASH)
1298 skb->rxhash = rxcp->rss_hash; 1344 skb->rxhash = rxcp->rss_hash;
1299 1345
@@ -1555,7 +1601,9 @@ static int event_handle(struct be_eq_obj *eqo)
1555 if (!num) 1601 if (!num)
1556 rearm = true; 1602 rearm = true;
1557 1603
1558 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num); 1604 if (num || msix_enabled(eqo->adapter))
1605 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1606
1559 if (num) 1607 if (num)
1560 napi_schedule(&eqo->napi); 1608 napi_schedule(&eqo->napi);
1561 1609
@@ -1764,9 +1812,9 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
1764 1812
1765static int be_num_txqs_want(struct be_adapter *adapter) 1813static int be_num_txqs_want(struct be_adapter *adapter)
1766{ 1814{
1767 if (sriov_enabled(adapter) || be_is_mc(adapter) || 1815 if (sriov_want(adapter) || be_is_mc(adapter) ||
1768 lancer_chip(adapter) || !be_physfn(adapter) || 1816 lancer_chip(adapter) || !be_physfn(adapter) ||
1769 adapter->generation == BE_GEN2) 1817 adapter->generation == BE_GEN2)
1770 return 1; 1818 return 1;
1771 else 1819 else
1772 return MAX_TX_QS; 1820 return MAX_TX_QS;
@@ -2093,7 +2141,7 @@ static void be_msix_disable(struct be_adapter *adapter)
2093static uint be_num_rss_want(struct be_adapter *adapter) 2141static uint be_num_rss_want(struct be_adapter *adapter)
2094{ 2142{
2095 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && 2143 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2096 adapter->num_vfs == 0 && be_physfn(adapter) && 2144 !sriov_want(adapter) && be_physfn(adapter) &&
2097 !be_is_mc(adapter)) 2145 !be_is_mc(adapter))
2098 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; 2146 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2099 else 2147 else
@@ -2127,53 +2175,6 @@ done:
2127 return; 2175 return;
2128} 2176}
2129 2177
2130static int be_sriov_enable(struct be_adapter *adapter)
2131{
2132 be_check_sriov_fn_type(adapter);
2133
2134#ifdef CONFIG_PCI_IOV
2135 if (be_physfn(adapter) && num_vfs) {
2136 int status, pos;
2137 u16 dev_vfs;
2138
2139 pos = pci_find_ext_capability(adapter->pdev,
2140 PCI_EXT_CAP_ID_SRIOV);
2141 pci_read_config_word(adapter->pdev,
2142 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
2143
2144 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2145 if (adapter->num_vfs != num_vfs)
2146 dev_info(&adapter->pdev->dev,
2147 "Device supports %d VFs and not %d\n",
2148 adapter->num_vfs, num_vfs);
2149
2150 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2151 if (status)
2152 adapter->num_vfs = 0;
2153
2154 if (adapter->num_vfs) {
2155 adapter->vf_cfg = kcalloc(num_vfs,
2156 sizeof(struct be_vf_cfg),
2157 GFP_KERNEL);
2158 if (!adapter->vf_cfg)
2159 return -ENOMEM;
2160 }
2161 }
2162#endif
2163 return 0;
2164}
2165
2166static void be_sriov_disable(struct be_adapter *adapter)
2167{
2168#ifdef CONFIG_PCI_IOV
2169 if (sriov_enabled(adapter)) {
2170 pci_disable_sriov(adapter->pdev);
2171 kfree(adapter->vf_cfg);
2172 adapter->num_vfs = 0;
2173 }
2174#endif
2175}
2176
2177static inline int be_msix_vec_get(struct be_adapter *adapter, 2178static inline int be_msix_vec_get(struct be_adapter *adapter,
2178 struct be_eq_obj *eqo) 2179 struct be_eq_obj *eqo)
2179{ 2180{
@@ -2475,6 +2476,11 @@ static void be_vf_clear(struct be_adapter *adapter)
2475 struct be_vf_cfg *vf_cfg; 2476 struct be_vf_cfg *vf_cfg;
2476 u32 vf; 2477 u32 vf;
2477 2478
2479 if (be_find_vfs(adapter, ASSIGNED)) {
2480 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2481 goto done;
2482 }
2483
2478 for_all_vfs(adapter, vf_cfg, vf) { 2484 for_all_vfs(adapter, vf_cfg, vf) {
2479 if (lancer_chip(adapter)) 2485 if (lancer_chip(adapter))
2480 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1); 2486 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
@@ -2484,6 +2490,10 @@ static void be_vf_clear(struct be_adapter *adapter)
2484 2490
2485 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1); 2491 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2486 } 2492 }
2493 pci_disable_sriov(adapter->pdev);
2494done:
2495 kfree(adapter->vf_cfg);
2496 adapter->num_vfs = 0;
2487} 2497}
2488 2498
2489static int be_clear(struct be_adapter *adapter) 2499static int be_clear(struct be_adapter *adapter)
@@ -2513,29 +2523,60 @@ static int be_clear(struct be_adapter *adapter)
2513 be_cmd_fw_clean(adapter); 2523 be_cmd_fw_clean(adapter);
2514 2524
2515 be_msix_disable(adapter); 2525 be_msix_disable(adapter);
2516 kfree(adapter->pmac_id); 2526 pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 0);
2517 return 0; 2527 return 0;
2518} 2528}
2519 2529
2520static void be_vf_setup_init(struct be_adapter *adapter) 2530static int be_vf_setup_init(struct be_adapter *adapter)
2521{ 2531{
2522 struct be_vf_cfg *vf_cfg; 2532 struct be_vf_cfg *vf_cfg;
2523 int vf; 2533 int vf;
2524 2534
2535 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2536 GFP_KERNEL);
2537 if (!adapter->vf_cfg)
2538 return -ENOMEM;
2539
2525 for_all_vfs(adapter, vf_cfg, vf) { 2540 for_all_vfs(adapter, vf_cfg, vf) {
2526 vf_cfg->if_handle = -1; 2541 vf_cfg->if_handle = -1;
2527 vf_cfg->pmac_id = -1; 2542 vf_cfg->pmac_id = -1;
2528 } 2543 }
2544 return 0;
2529} 2545}
2530 2546
2531static int be_vf_setup(struct be_adapter *adapter) 2547static int be_vf_setup(struct be_adapter *adapter)
2532{ 2548{
2533 struct be_vf_cfg *vf_cfg; 2549 struct be_vf_cfg *vf_cfg;
2550 struct device *dev = &adapter->pdev->dev;
2534 u32 cap_flags, en_flags, vf; 2551 u32 cap_flags, en_flags, vf;
2535 u16 def_vlan, lnk_speed; 2552 u16 def_vlan, lnk_speed;
2536 int status; 2553 int status, enabled_vfs;
2537 2554
2538 be_vf_setup_init(adapter); 2555 enabled_vfs = be_find_vfs(adapter, ENABLED);
2556 if (enabled_vfs) {
2557 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2558 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2559 return 0;
2560 }
2561
2562 if (num_vfs > adapter->dev_num_vfs) {
2563 dev_warn(dev, "Device supports %d VFs and not %d\n",
2564 adapter->dev_num_vfs, num_vfs);
2565 num_vfs = adapter->dev_num_vfs;
2566 }
2567
2568 status = pci_enable_sriov(adapter->pdev, num_vfs);
2569 if (!status) {
2570 adapter->num_vfs = num_vfs;
2571 } else {
2572 /* Platform doesn't support SRIOV though device supports it */
2573 dev_warn(dev, "SRIOV enable failed\n");
2574 return 0;
2575 }
2576
2577 status = be_vf_setup_init(adapter);
2578 if (status)
2579 goto err;
2539 2580
2540 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 2581 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2541 BE_IF_FLAGS_MULTICAST; 2582 BE_IF_FLAGS_MULTICAST;
@@ -2546,9 +2587,11 @@ static int be_vf_setup(struct be_adapter *adapter)
2546 goto err; 2587 goto err;
2547 } 2588 }
2548 2589
2549 status = be_vf_eth_addr_config(adapter); 2590 if (!enabled_vfs) {
2550 if (status) 2591 status = be_vf_eth_addr_config(adapter);
2551 goto err; 2592 if (status)
2593 goto err;
2594 }
2552 2595
2553 for_all_vfs(adapter, vf_cfg, vf) { 2596 for_all_vfs(adapter, vf_cfg, vf) {
2554 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed, 2597 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
@@ -2571,11 +2614,12 @@ err:
2571static void be_setup_init(struct be_adapter *adapter) 2614static void be_setup_init(struct be_adapter *adapter)
2572{ 2615{
2573 adapter->vlan_prio_bmap = 0xff; 2616 adapter->vlan_prio_bmap = 0xff;
2574 adapter->link_speed = -1; 2617 adapter->phy.link_speed = -1;
2575 adapter->if_handle = -1; 2618 adapter->if_handle = -1;
2576 adapter->be3_native = false; 2619 adapter->be3_native = false;
2577 adapter->promiscuous = false; 2620 adapter->promiscuous = false;
2578 adapter->eq_next_idx = 0; 2621 adapter->eq_next_idx = 0;
2622 adapter->phy.forced_port_speed = -1;
2579} 2623}
2580 2624
2581static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac) 2625static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
@@ -2604,9 +2648,25 @@ do_none:
2604 return status; 2648 return status;
2605} 2649}
2606 2650
2651/* Routine to query per function resource limits */
2652static int be_get_config(struct be_adapter *adapter)
2653{
2654 int pos;
2655 u16 dev_num_vfs;
2656
2657 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2658 if (pos) {
2659 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2660 &dev_num_vfs);
2661 adapter->dev_num_vfs = dev_num_vfs;
2662 }
2663 return 0;
2664}
2665
2607static int be_setup(struct be_adapter *adapter) 2666static int be_setup(struct be_adapter *adapter)
2608{ 2667{
2609 struct net_device *netdev = adapter->netdev; 2668 struct net_device *netdev = adapter->netdev;
2669 struct device *dev = &adapter->pdev->dev;
2610 u32 cap_flags, en_flags; 2670 u32 cap_flags, en_flags;
2611 u32 tx_fc, rx_fc; 2671 u32 tx_fc, rx_fc;
2612 int status; 2672 int status;
@@ -2614,6 +2674,8 @@ static int be_setup(struct be_adapter *adapter)
2614 2674
2615 be_setup_init(adapter); 2675 be_setup_init(adapter);
2616 2676
2677 be_get_config(adapter);
2678
2617 be_cmd_req_native_mode(adapter); 2679 be_cmd_req_native_mode(adapter);
2618 2680
2619 be_msix_enable(adapter); 2681 be_msix_enable(adapter);
@@ -2680,36 +2742,33 @@ static int be_setup(struct be_adapter *adapter)
2680 2742
2681 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL); 2743 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2682 2744
2683 status = be_vid_config(adapter, false, 0); 2745 be_vid_config(adapter, false, 0);
2684 if (status)
2685 goto err;
2686 2746
2687 be_set_rx_mode(adapter->netdev); 2747 be_set_rx_mode(adapter->netdev);
2688 2748
2689 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc); 2749 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2690 /* For Lancer: It is legal for this cmd to fail on VF */
2691 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2692 goto err;
2693 2750
2694 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) { 2751 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2695 status = be_cmd_set_flow_control(adapter, adapter->tx_fc, 2752 be_cmd_set_flow_control(adapter, adapter->tx_fc,
2696 adapter->rx_fc); 2753 adapter->rx_fc);
2697 /* For Lancer: It is legal for this cmd to fail on VF */
2698 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2699 goto err;
2700 }
2701 2754
2702 pcie_set_readrq(adapter->pdev, 4096); 2755 pcie_set_readrq(adapter->pdev, 4096);
2703 2756
2704 if (sriov_enabled(adapter)) { 2757 if (be_physfn(adapter) && num_vfs) {
2705 status = be_vf_setup(adapter); 2758 if (adapter->dev_num_vfs)
2706 if (status) 2759 be_vf_setup(adapter);
2707 goto err; 2760 else
2761 dev_warn(dev, "device doesn't support SRIOV\n");
2708 } 2762 }
2709 2763
2764 be_cmd_get_phy_info(adapter);
2765 if (be_pause_supported(adapter))
2766 adapter->phy.fc_autoneg = 1;
2767
2710 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 2768 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2711 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; 2769 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2712 2770
2771 pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 1);
2713 return 0; 2772 return 0;
2714err: 2773err:
2715 be_clear(adapter); 2774 be_clear(adapter);
@@ -2731,6 +2790,8 @@ static void be_netpoll(struct net_device *netdev)
2731#endif 2790#endif
2732 2791
2733#define FW_FILE_HDR_SIGN "ServerEngines Corp. " 2792#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2793char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2794
2734static bool be_flash_redboot(struct be_adapter *adapter, 2795static bool be_flash_redboot(struct be_adapter *adapter,
2735 const u8 *p, u32 img_start, int image_size, 2796 const u8 *p, u32 img_start, int image_size,
2736 int hdr_size) 2797 int hdr_size)
@@ -2760,71 +2821,105 @@ static bool be_flash_redboot(struct be_adapter *adapter,
2760 2821
2761static bool phy_flashing_required(struct be_adapter *adapter) 2822static bool phy_flashing_required(struct be_adapter *adapter)
2762{ 2823{
2763 int status = 0; 2824 return (adapter->phy.phy_type == TN_8022 &&
2764 struct be_phy_info phy_info; 2825 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2826}
2765 2827
2766 status = be_cmd_get_phy_info(adapter, &phy_info); 2828static bool is_comp_in_ufi(struct be_adapter *adapter,
2767 if (status) 2829 struct flash_section_info *fsec, int type)
2768 return false; 2830{
2769 if ((phy_info.phy_type == TN_8022) && 2831 int i = 0, img_type = 0;
2770 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) { 2832 struct flash_section_info_g2 *fsec_g2 = NULL;
2771 return true; 2833
2834 if (adapter->generation != BE_GEN3)
2835 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2836
2837 for (i = 0; i < MAX_FLASH_COMP; i++) {
2838 if (fsec_g2)
2839 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2840 else
2841 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2842
2843 if (img_type == type)
2844 return true;
2772 } 2845 }
2773 return false; 2846 return false;
2847
2848}
2849
2850struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2851 int header_size,
2852 const struct firmware *fw)
2853{
2854 struct flash_section_info *fsec = NULL;
2855 const u8 *p = fw->data;
2856
2857 p += header_size;
2858 while (p < (fw->data + fw->size)) {
2859 fsec = (struct flash_section_info *)p;
2860 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2861 return fsec;
2862 p += 32;
2863 }
2864 return NULL;
2774} 2865}
2775 2866
2776static int be_flash_data(struct be_adapter *adapter, 2867static int be_flash_data(struct be_adapter *adapter,
2777 const struct firmware *fw, 2868 const struct firmware *fw,
2778 struct be_dma_mem *flash_cmd, int num_of_images) 2869 struct be_dma_mem *flash_cmd,
2870 int num_of_images)
2779 2871
2780{ 2872{
2781 int status = 0, i, filehdr_size = 0; 2873 int status = 0, i, filehdr_size = 0;
2874 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2782 u32 total_bytes = 0, flash_op; 2875 u32 total_bytes = 0, flash_op;
2783 int num_bytes; 2876 int num_bytes;
2784 const u8 *p = fw->data; 2877 const u8 *p = fw->data;
2785 struct be_cmd_write_flashrom *req = flash_cmd->va; 2878 struct be_cmd_write_flashrom *req = flash_cmd->va;
2786 const struct flash_comp *pflashcomp; 2879 const struct flash_comp *pflashcomp;
2787 int num_comp; 2880 int num_comp, hdr_size;
2788 2881 struct flash_section_info *fsec = NULL;
2789 static const struct flash_comp gen3_flash_types[10] = { 2882
2790 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE, 2883 struct flash_comp gen3_flash_types[] = {
2791 FLASH_IMAGE_MAX_SIZE_g3}, 2884 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2792 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT, 2885 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2793 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3}, 2886 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2794 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS, 2887 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2795 FLASH_BIOS_IMAGE_MAX_SIZE_g3}, 2888 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2796 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS, 2889 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2797 FLASH_BIOS_IMAGE_MAX_SIZE_g3}, 2890 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2798 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS, 2891 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2799 FLASH_BIOS_IMAGE_MAX_SIZE_g3}, 2892 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2800 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP, 2893 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2801 FLASH_IMAGE_MAX_SIZE_g3}, 2894 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2802 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE, 2895 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2803 FLASH_IMAGE_MAX_SIZE_g3}, 2896 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2804 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP, 2897 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2805 FLASH_IMAGE_MAX_SIZE_g3}, 2898 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2806 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW, 2899 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2807 FLASH_NCSI_IMAGE_MAX_SIZE_g3}, 2900 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2808 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW, 2901 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2809 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3} 2902 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2903 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
2810 }; 2904 };
2811 static const struct flash_comp gen2_flash_types[8] = { 2905
2812 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE, 2906 struct flash_comp gen2_flash_types[] = {
2813 FLASH_IMAGE_MAX_SIZE_g2}, 2907 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2814 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT, 2908 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2815 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2}, 2909 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2816 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS, 2910 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2817 FLASH_BIOS_IMAGE_MAX_SIZE_g2}, 2911 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2818 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS, 2912 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2819 FLASH_BIOS_IMAGE_MAX_SIZE_g2}, 2913 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2820 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS, 2914 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2821 FLASH_BIOS_IMAGE_MAX_SIZE_g2}, 2915 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2822 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP, 2916 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2823 FLASH_IMAGE_MAX_SIZE_g2}, 2917 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2824 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE, 2918 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2825 FLASH_IMAGE_MAX_SIZE_g2}, 2919 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2826 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP, 2920 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2827 FLASH_IMAGE_MAX_SIZE_g2} 2921 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2922 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
2828 }; 2923 };
2829 2924
2830 if (adapter->generation == BE_GEN3) { 2925 if (adapter->generation == BE_GEN3) {
@@ -2836,22 +2931,37 @@ static int be_flash_data(struct be_adapter *adapter,
2836 filehdr_size = sizeof(struct flash_file_hdr_g2); 2931 filehdr_size = sizeof(struct flash_file_hdr_g2);
2837 num_comp = ARRAY_SIZE(gen2_flash_types); 2932 num_comp = ARRAY_SIZE(gen2_flash_types);
2838 } 2933 }
2934 /* Get flash section info*/
2935 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2936 if (!fsec) {
2937 dev_err(&adapter->pdev->dev,
2938 "Invalid Cookie. UFI corrupted ?\n");
2939 return -1;
2940 }
2839 for (i = 0; i < num_comp; i++) { 2941 for (i = 0; i < num_comp; i++) {
2840 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) && 2942 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
2841 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0) 2943 continue;
2944
2945 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2946 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2842 continue; 2947 continue;
2843 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) { 2948
2949 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
2844 if (!phy_flashing_required(adapter)) 2950 if (!phy_flashing_required(adapter))
2845 continue; 2951 continue;
2846 } 2952 }
2847 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) && 2953
2848 (!be_flash_redboot(adapter, fw->data, 2954 hdr_size = filehdr_size +
2849 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size + 2955 (num_of_images * sizeof(struct image_hdr));
2850 (num_of_images * sizeof(struct image_hdr))))) 2956
2957 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
2958 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
2959 pflashcomp[i].size, hdr_size)))
2851 continue; 2960 continue;
2961
2962 /* Flash the component */
2852 p = fw->data; 2963 p = fw->data;
2853 p += filehdr_size + pflashcomp[i].offset 2964 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
2854 + (num_of_images * sizeof(struct image_hdr));
2855 if (p + pflashcomp[i].size > fw->data + fw->size) 2965 if (p + pflashcomp[i].size > fw->data + fw->size)
2856 return -1; 2966 return -1;
2857 total_bytes = pflashcomp[i].size; 2967 total_bytes = pflashcomp[i].size;
@@ -2862,12 +2972,12 @@ static int be_flash_data(struct be_adapter *adapter,
2862 num_bytes = total_bytes; 2972 num_bytes = total_bytes;
2863 total_bytes -= num_bytes; 2973 total_bytes -= num_bytes;
2864 if (!total_bytes) { 2974 if (!total_bytes) {
2865 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) 2975 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
2866 flash_op = FLASHROM_OPER_PHY_FLASH; 2976 flash_op = FLASHROM_OPER_PHY_FLASH;
2867 else 2977 else
2868 flash_op = FLASHROM_OPER_FLASH; 2978 flash_op = FLASHROM_OPER_FLASH;
2869 } else { 2979 } else {
2870 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) 2980 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
2871 flash_op = FLASHROM_OPER_PHY_SAVE; 2981 flash_op = FLASHROM_OPER_PHY_SAVE;
2872 else 2982 else
2873 flash_op = FLASHROM_OPER_SAVE; 2983 flash_op = FLASHROM_OPER_SAVE;
@@ -2879,7 +2989,7 @@ static int be_flash_data(struct be_adapter *adapter,
2879 if (status) { 2989 if (status) {
2880 if ((status == ILLEGAL_IOCTL_REQ) && 2990 if ((status == ILLEGAL_IOCTL_REQ) &&
2881 (pflashcomp[i].optype == 2991 (pflashcomp[i].optype ==
2882 IMG_TYPE_PHY_FW)) 2992 OPTYPE_PHY_FW))
2883 break; 2993 break;
2884 dev_err(&adapter->pdev->dev, 2994 dev_err(&adapter->pdev->dev,
2885 "cmd to write to flash rom failed.\n"); 2995 "cmd to write to flash rom failed.\n");
@@ -3280,8 +3390,6 @@ static void __devexit be_remove(struct pci_dev *pdev)
3280 3390
3281 be_ctrl_cleanup(adapter); 3391 be_ctrl_cleanup(adapter);
3282 3392
3283 be_sriov_disable(adapter);
3284
3285 pci_set_drvdata(pdev, NULL); 3393 pci_set_drvdata(pdev, NULL);
3286 pci_release_regions(pdev); 3394 pci_release_regions(pdev);
3287 pci_disable_device(pdev); 3395 pci_disable_device(pdev);
@@ -3295,9 +3403,43 @@ bool be_is_wol_supported(struct be_adapter *adapter)
3295 !be_is_wol_excluded(adapter)) ? true : false; 3403 !be_is_wol_excluded(adapter)) ? true : false;
3296} 3404}
3297 3405
3298static int be_get_config(struct be_adapter *adapter) 3406u32 be_get_fw_log_level(struct be_adapter *adapter)
3299{ 3407{
3408 struct be_dma_mem extfat_cmd;
3409 struct be_fat_conf_params *cfgs;
3300 int status; 3410 int status;
3411 u32 level = 0;
3412 int j;
3413
3414 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3415 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3416 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3417 &extfat_cmd.dma);
3418
3419 if (!extfat_cmd.va) {
3420 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3421 __func__);
3422 goto err;
3423 }
3424
3425 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3426 if (!status) {
3427 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3428 sizeof(struct be_cmd_resp_hdr));
3429 for (j = 0; j < cfgs->module[0].num_modes; j++) {
3430 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3431 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3432 }
3433 }
3434 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3435 extfat_cmd.dma);
3436err:
3437 return level;
3438}
3439static int be_get_initial_config(struct be_adapter *adapter)
3440{
3441 int status;
3442 u32 level;
3301 3443
3302 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num, 3444 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3303 &adapter->function_mode, &adapter->function_caps); 3445 &adapter->function_mode, &adapter->function_caps);
@@ -3335,10 +3477,13 @@ static int be_get_config(struct be_adapter *adapter)
3335 if (be_is_wol_supported(adapter)) 3477 if (be_is_wol_supported(adapter))
3336 adapter->wol = true; 3478 adapter->wol = true;
3337 3479
3480 level = be_get_fw_log_level(adapter);
3481 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3482
3338 return 0; 3483 return 0;
3339} 3484}
3340 3485
3341static int be_dev_family_check(struct be_adapter *adapter) 3486static int be_dev_type_check(struct be_adapter *adapter)
3342{ 3487{
3343 struct pci_dev *pdev = adapter->pdev; 3488 struct pci_dev *pdev = adapter->pdev;
3344 u32 sli_intf = 0, if_type; 3489 u32 sli_intf = 0, if_type;
@@ -3371,6 +3516,9 @@ static int be_dev_family_check(struct be_adapter *adapter)
3371 default: 3516 default:
3372 adapter->generation = 0; 3517 adapter->generation = 0;
3373 } 3518 }
3519
3520 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3521 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3374 return 0; 3522 return 0;
3375} 3523}
3376 3524
@@ -3514,6 +3662,14 @@ reschedule:
3514 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 3662 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3515} 3663}
3516 3664
3665static bool be_reset_required(struct be_adapter *adapter)
3666{
3667 u32 reg;
3668
3669 pci_read_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, &reg);
3670 return reg;
3671}
3672
3517static int __devinit be_probe(struct pci_dev *pdev, 3673static int __devinit be_probe(struct pci_dev *pdev,
3518 const struct pci_device_id *pdev_id) 3674 const struct pci_device_id *pdev_id)
3519{ 3675{
@@ -3539,7 +3695,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
3539 adapter->pdev = pdev; 3695 adapter->pdev = pdev;
3540 pci_set_drvdata(pdev, adapter); 3696 pci_set_drvdata(pdev, adapter);
3541 3697
3542 status = be_dev_family_check(adapter); 3698 status = be_dev_type_check(adapter);
3543 if (status) 3699 if (status)
3544 goto free_netdev; 3700 goto free_netdev;
3545 3701
@@ -3557,13 +3713,9 @@ static int __devinit be_probe(struct pci_dev *pdev,
3557 } 3713 }
3558 } 3714 }
3559 3715
3560 status = be_sriov_enable(adapter);
3561 if (status)
3562 goto free_netdev;
3563
3564 status = be_ctrl_init(adapter); 3716 status = be_ctrl_init(adapter);
3565 if (status) 3717 if (status)
3566 goto disable_sriov; 3718 goto free_netdev;
3567 3719
3568 if (lancer_chip(adapter)) { 3720 if (lancer_chip(adapter)) {
3569 status = lancer_wait_ready(adapter); 3721 status = lancer_wait_ready(adapter);
@@ -3590,9 +3742,11 @@ static int __devinit be_probe(struct pci_dev *pdev,
3590 if (status) 3742 if (status)
3591 goto ctrl_clean; 3743 goto ctrl_clean;
3592 3744
3593 status = be_cmd_reset_function(adapter); 3745 if (be_reset_required(adapter)) {
3594 if (status) 3746 status = be_cmd_reset_function(adapter);
3595 goto ctrl_clean; 3747 if (status)
3748 goto ctrl_clean;
3749 }
3596 3750
3597 /* The INTR bit may be set in the card when probed by a kdump kernel 3751 /* The INTR bit may be set in the card when probed by a kdump kernel
3598 * after a crash. 3752 * after a crash.
@@ -3604,7 +3758,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
3604 if (status) 3758 if (status)
3605 goto ctrl_clean; 3759 goto ctrl_clean;
3606 3760
3607 status = be_get_config(adapter); 3761 status = be_get_initial_config(adapter);
3608 if (status) 3762 if (status)
3609 goto stats_clean; 3763 goto stats_clean;
3610 3764
@@ -3633,8 +3787,6 @@ stats_clean:
3633 be_stats_cleanup(adapter); 3787 be_stats_cleanup(adapter);
3634ctrl_clean: 3788ctrl_clean:
3635 be_ctrl_cleanup(adapter); 3789 be_ctrl_cleanup(adapter);
3636disable_sriov:
3637 be_sriov_disable(adapter);
3638free_netdev: 3790free_netdev:
3639 free_netdev(netdev); 3791 free_netdev(netdev);
3640 pci_set_drvdata(pdev, NULL); 3792 pci_set_drvdata(pdev, NULL);
@@ -3749,6 +3901,11 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3749 3901
3750 pci_disable_device(pdev); 3902 pci_disable_device(pdev);
3751 3903
3904 /* The error could cause the FW to trigger a flash debug dump.
3905 * Resetting the card while flash dump is in progress
3906 * can cause it not to recover; wait for it to finish
3907 */
3908 ssleep(30);
3752 return PCI_ERS_RESULT_NEED_RESET; 3909 return PCI_ERS_RESULT_NEED_RESET;
3753} 3910}
3754 3911
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 1637b9862292..9d71c9cc300b 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -545,9 +545,6 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev,
545 /* Reset the chip to erase previous misconfiguration. */ 545 /* Reset the chip to erase previous misconfiguration. */
546 iowrite32(0x00000001, ioaddr + BCR); 546 iowrite32(0x00000001, ioaddr + BCR);
547 547
548 dev->base_addr = (unsigned long)ioaddr;
549 dev->irq = irq;
550
551 /* Make certain the descriptor lists are aligned. */ 548 /* Make certain the descriptor lists are aligned. */
552 np = netdev_priv(dev); 549 np = netdev_priv(dev);
553 np->mem = ioaddr; 550 np->mem = ioaddr;
@@ -832,11 +829,13 @@ static int netdev_open(struct net_device *dev)
832{ 829{
833 struct netdev_private *np = netdev_priv(dev); 830 struct netdev_private *np = netdev_priv(dev);
834 void __iomem *ioaddr = np->mem; 831 void __iomem *ioaddr = np->mem;
835 int i; 832 const int irq = np->pci_dev->irq;
833 int rc, i;
836 834
837 iowrite32(0x00000001, ioaddr + BCR); /* Reset */ 835 iowrite32(0x00000001, ioaddr + BCR); /* Reset */
838 836
839 if (request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev)) 837 rc = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
838 if (rc)
840 return -EAGAIN; 839 return -EAGAIN;
841 840
842 for (i = 0; i < 3; i++) 841 for (i = 0; i < 3; i++)
@@ -924,8 +923,7 @@ static int netdev_open(struct net_device *dev)
924 np->reset_timer.data = (unsigned long) dev; 923 np->reset_timer.data = (unsigned long) dev;
925 np->reset_timer.function = reset_timer; 924 np->reset_timer.function = reset_timer;
926 np->reset_timer_armed = 0; 925 np->reset_timer_armed = 0;
927 926 return rc;
928 return 0;
929} 927}
930 928
931 929
@@ -1910,7 +1908,7 @@ static int netdev_close(struct net_device *dev)
1910 del_timer_sync(&np->timer); 1908 del_timer_sync(&np->timer);
1911 del_timer_sync(&np->reset_timer); 1909 del_timer_sync(&np->reset_timer);
1912 1910
1913 free_irq(dev->irq, dev); 1911 free_irq(np->pci_dev->irq, dev);
1914 1912
1915 /* Free all the skbuffs in the Rx queue. */ 1913 /* Free all the skbuffs in the Rx queue. */
1916 for (i = 0; i < RX_RING_SIZE; i++) { 1914 for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index a12b3f5bc025..7fa0227c9c02 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -1161,6 +1161,7 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
1161 .set_settings = fec_enet_set_settings, 1161 .set_settings = fec_enet_set_settings,
1162 .get_drvinfo = fec_enet_get_drvinfo, 1162 .get_drvinfo = fec_enet_get_drvinfo,
1163 .get_link = ethtool_op_get_link, 1163 .get_link = ethtool_op_get_link,
1164 .get_ts_info = ethtool_op_get_ts_info,
1164}; 1165};
1165 1166
1166static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 1167static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 7b34d8c698da..97f947b3d94a 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -811,6 +811,7 @@ static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
811 .get_link = ethtool_op_get_link, 811 .get_link = ethtool_op_get_link,
812 .get_msglevel = mpc52xx_fec_get_msglevel, 812 .get_msglevel = mpc52xx_fec_get_msglevel,
813 .set_msglevel = mpc52xx_fec_set_msglevel, 813 .set_msglevel = mpc52xx_fec_set_msglevel,
814 .get_ts_info = ethtool_op_get_ts_info,
814}; 815};
815 816
816 817
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index e4e6cd2c5f82..2b7633f766d9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -963,6 +963,7 @@ static const struct ethtool_ops fs_ethtool_ops = {
963 .get_msglevel = fs_get_msglevel, 963 .get_msglevel = fs_get_msglevel,
964 .set_msglevel = fs_set_msglevel, 964 .set_msglevel = fs_set_msglevel,
965 .get_regs = fs_get_regs, 965 .get_regs = fs_get_regs,
966 .get_ts_info = ethtool_op_get_ts_info,
966}; 967};
967 968
968static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 969static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index e7bed5303997..1adb0245b9dd 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -136,7 +136,7 @@ static void gfar_netpoll(struct net_device *dev);
136int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); 136int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
137static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); 137static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
138static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 138static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
139 int amount_pull); 139 int amount_pull, struct napi_struct *napi);
140void gfar_halt(struct net_device *dev); 140void gfar_halt(struct net_device *dev);
141static void gfar_halt_nodisable(struct net_device *dev); 141static void gfar_halt_nodisable(struct net_device *dev);
142void gfar_start(struct net_device *dev); 142void gfar_start(struct net_device *dev);
@@ -2675,12 +2675,12 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2675/* gfar_process_frame() -- handle one incoming packet if skb 2675/* gfar_process_frame() -- handle one incoming packet if skb
2676 * isn't NULL. */ 2676 * isn't NULL. */
2677static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 2677static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2678 int amount_pull) 2678 int amount_pull, struct napi_struct *napi)
2679{ 2679{
2680 struct gfar_private *priv = netdev_priv(dev); 2680 struct gfar_private *priv = netdev_priv(dev);
2681 struct rxfcb *fcb = NULL; 2681 struct rxfcb *fcb = NULL;
2682 2682
2683 int ret; 2683 gro_result_t ret;
2684 2684
2685 /* fcb is at the beginning if exists */ 2685 /* fcb is at the beginning if exists */
2686 fcb = (struct rxfcb *)skb->data; 2686 fcb = (struct rxfcb *)skb->data;
@@ -2719,9 +2719,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2719 __vlan_hwaccel_put_tag(skb, fcb->vlctl); 2719 __vlan_hwaccel_put_tag(skb, fcb->vlctl);
2720 2720
2721 /* Send the packet up the stack */ 2721 /* Send the packet up the stack */
2722 ret = netif_receive_skb(skb); 2722 ret = napi_gro_receive(napi, skb);
2723 2723
2724 if (NET_RX_DROP == ret) 2724 if (GRO_DROP == ret)
2725 priv->extra_stats.kernel_dropped++; 2725 priv->extra_stats.kernel_dropped++;
2726 2726
2727 return 0; 2727 return 0;
@@ -2783,7 +2783,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2783 skb_put(skb, pkt_len); 2783 skb_put(skb, pkt_len);
2784 rx_queue->stats.rx_bytes += pkt_len; 2784 rx_queue->stats.rx_bytes += pkt_len;
2785 skb_record_rx_queue(skb, rx_queue->qindex); 2785 skb_record_rx_queue(skb, rx_queue->qindex);
2786 gfar_process_frame(dev, skb, amount_pull); 2786 gfar_process_frame(dev, skb, amount_pull,
2787 &rx_queue->grp->napi);
2787 2788
2788 } else { 2789 } else {
2789 netif_warn(priv, rx_err, dev, "Missing skb!\n"); 2790 netif_warn(priv, rx_err, dev, "Missing skb!\n");
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 4c9f8d487dbb..2136c7ff5e6d 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1210,4 +1210,7 @@ struct filer_table {
1210 struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20]; 1210 struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20];
1211}; 1211};
1212 1212
1213/* The gianfar_ptp module will set this variable */
1214extern int gfar_phc_index;
1215
1213#endif /* __GIANFAR_H */ 1216#endif /* __GIANFAR_H */
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 8d74efd04bb9..8a025570d97e 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -26,6 +26,7 @@
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/netdevice.h> 27#include <linux/netdevice.h>
28#include <linux/etherdevice.h> 28#include <linux/etherdevice.h>
29#include <linux/net_tstamp.h>
29#include <linux/skbuff.h> 30#include <linux/skbuff.h>
30#include <linux/spinlock.h> 31#include <linux/spinlock.h>
31#include <linux/mm.h> 32#include <linux/mm.h>
@@ -1739,6 +1740,34 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1739 return ret; 1740 return ret;
1740} 1741}
1741 1742
1743int gfar_phc_index = -1;
1744
1745static int gfar_get_ts_info(struct net_device *dev,
1746 struct ethtool_ts_info *info)
1747{
1748 struct gfar_private *priv = netdev_priv(dev);
1749
1750 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
1751 info->so_timestamping =
1752 SOF_TIMESTAMPING_RX_SOFTWARE |
1753 SOF_TIMESTAMPING_SOFTWARE;
1754 info->phc_index = -1;
1755 return 0;
1756 }
1757 info->so_timestamping =
1758 SOF_TIMESTAMPING_TX_HARDWARE |
1759 SOF_TIMESTAMPING_RX_HARDWARE |
1760 SOF_TIMESTAMPING_RAW_HARDWARE;
1761 info->phc_index = gfar_phc_index;
1762 info->tx_types =
1763 (1 << HWTSTAMP_TX_OFF) |
1764 (1 << HWTSTAMP_TX_ON);
1765 info->rx_filters =
1766 (1 << HWTSTAMP_FILTER_NONE) |
1767 (1 << HWTSTAMP_FILTER_ALL);
1768 return 0;
1769}
1770
1742const struct ethtool_ops gfar_ethtool_ops = { 1771const struct ethtool_ops gfar_ethtool_ops = {
1743 .get_settings = gfar_gsettings, 1772 .get_settings = gfar_gsettings,
1744 .set_settings = gfar_ssettings, 1773 .set_settings = gfar_ssettings,
@@ -1761,4 +1790,5 @@ const struct ethtool_ops gfar_ethtool_ops = {
1761#endif 1790#endif
1762 .set_rxnfc = gfar_set_nfc, 1791 .set_rxnfc = gfar_set_nfc,
1763 .get_rxnfc = gfar_get_nfc, 1792 .get_rxnfc = gfar_get_nfc,
1793 .get_ts_info = gfar_get_ts_info,
1764}; 1794};
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 5fd620bec15c..c08e5d40fecb 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -515,6 +515,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
515 err = PTR_ERR(etsects->clock); 515 err = PTR_ERR(etsects->clock);
516 goto no_clock; 516 goto no_clock;
517 } 517 }
518 gfar_phc_clock = ptp_clock_index(etsects->clock);
518 519
519 dev_set_drvdata(&dev->dev, etsects); 520 dev_set_drvdata(&dev->dev, etsects);
520 521
@@ -538,6 +539,7 @@ static int gianfar_ptp_remove(struct platform_device *dev)
538 gfar_write(&etsects->regs->tmr_temask, 0); 539 gfar_write(&etsects->regs->tmr_temask, 0);
539 gfar_write(&etsects->regs->tmr_ctrl, 0); 540 gfar_write(&etsects->regs->tmr_ctrl, 0);
540 541
542 gfar_phc_clock = -1;
541 ptp_clock_unregister(etsects->clock); 543 ptp_clock_unregister(etsects->clock);
542 iounmap(etsects->regs); 544 iounmap(etsects->regs);
543 release_resource(etsects->rsrc); 545 release_resource(etsects->rsrc);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 17a46e76123f..9ac14f804851 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -116,10 +116,10 @@ static struct ucc_geth_info ugeth_primary_info = {
116 .maxGroupAddrInHash = 4, 116 .maxGroupAddrInHash = 4,
117 .maxIndAddrInHash = 4, 117 .maxIndAddrInHash = 4,
118 .prel = 7, 118 .prel = 7,
119 .maxFrameLength = 1518, 119 .maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */
120 .minFrameLength = 64, 120 .minFrameLength = 64,
121 .maxD1Length = 1520, 121 .maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */
122 .maxD2Length = 1520, 122 .maxD2Length = 1520+16, /* Add extra bytes for VLANs etc. */
123 .vlantype = 0x8100, 123 .vlantype = 0x8100,
124 .ecamptr = ((uint32_t) NULL), 124 .ecamptr = ((uint32_t) NULL),
125 .eventRegMask = UCCE_OTHER, 125 .eventRegMask = UCCE_OTHER,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 2e395a2566b8..f71b3e7b12de 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -877,7 +877,7 @@ struct ucc_geth_hardware_statistics {
877 877
878/* Driver definitions */ 878/* Driver definitions */
879#define TX_BD_RING_LEN 0x10 879#define TX_BD_RING_LEN 0x10
880#define RX_BD_RING_LEN 0x10 880#define RX_BD_RING_LEN 0x20
881 881
882#define TX_RING_MOD_MASK(size) (size-1) 882#define TX_RING_MOD_MASK(size) (size-1)
883#define RX_RING_MOD_MASK(size) (size-1) 883#define RX_RING_MOD_MASK(size) (size-1)
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index a97257f91a3d..37b035306013 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -415,6 +415,7 @@ static const struct ethtool_ops uec_ethtool_ops = {
415 .get_ethtool_stats = uec_get_ethtool_stats, 415 .get_ethtool_stats = uec_get_ethtool_stats,
416 .get_wol = uec_get_wol, 416 .get_wol = uec_get_wol,
417 .set_wol = uec_set_wol, 417 .set_wol = uec_set_wol,
418 .get_ts_info = ethtool_op_get_ts_info,
418}; 419};
419 420
420void uec_set_ethtool_ops(struct net_device *netdev) 421void uec_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/fujitsu/at1700.c b/drivers/net/ethernet/fujitsu/at1700.c
index 3d94797c8f9b..4b80dc4531ad 100644
--- a/drivers/net/ethernet/fujitsu/at1700.c
+++ b/drivers/net/ethernet/fujitsu/at1700.c
@@ -27,7 +27,7 @@
27 ATI provided their EEPROM configuration code header file. 27 ATI provided their EEPROM configuration code header file.
28 Thanks to NIIBE Yutaka <gniibe@mri.co.jp> for bug fixes. 28 Thanks to NIIBE Yutaka <gniibe@mri.co.jp> for bug fixes.
29 29
30 MCA bus (AT1720) support by Rene Schmit <rene@bss.lu> 30 MCA bus (AT1720) support (now deleted) by Rene Schmit <rene@bss.lu>
31 31
32 Bugs: 32 Bugs:
33 The MB86965 has a design flaw that makes all probes unreliable. Not 33 The MB86965 has a design flaw that makes all probes unreliable. Not
@@ -38,7 +38,6 @@
38#include <linux/errno.h> 38#include <linux/errno.h>
39#include <linux/netdevice.h> 39#include <linux/netdevice.h>
40#include <linux/etherdevice.h> 40#include <linux/etherdevice.h>
41#include <linux/mca-legacy.h>
42#include <linux/module.h> 41#include <linux/module.h>
43#include <linux/kernel.h> 42#include <linux/kernel.h>
44#include <linux/types.h> 43#include <linux/types.h>
@@ -79,24 +78,6 @@ static unsigned at1700_probe_list[] __initdata = {
79 0x260, 0x280, 0x2a0, 0x240, 0x340, 0x320, 0x380, 0x300, 0 78 0x260, 0x280, 0x2a0, 0x240, 0x340, 0x320, 0x380, 0x300, 0
80}; 79};
81 80
82/*
83 * MCA
84 */
85#ifdef CONFIG_MCA_LEGACY
86static int at1700_ioaddr_pattern[] __initdata = {
87 0x00, 0x04, 0x01, 0x05, 0x02, 0x06, 0x03, 0x07
88};
89
90static int at1700_mca_probe_list[] __initdata = {
91 0x400, 0x1400, 0x2400, 0x3400, 0x4400, 0x5400, 0x6400, 0x7400, 0
92};
93
94static int at1700_irq_pattern[] __initdata = {
95 0x00, 0x00, 0x00, 0x30, 0x70, 0xb0, 0x00, 0x00,
96 0x00, 0xf0, 0x34, 0x74, 0xb4, 0x00, 0x00, 0xf4, 0x00
97};
98#endif
99
100/* use 0 for production, 1 for verification, >2 for debug */ 81/* use 0 for production, 1 for verification, >2 for debug */
101#ifndef NET_DEBUG 82#ifndef NET_DEBUG
102#define NET_DEBUG 1 83#define NET_DEBUG 1
@@ -114,7 +95,6 @@ struct net_local {
114 uint tx_queue_ready:1; /* Tx queue is ready to be sent. */ 95 uint tx_queue_ready:1; /* Tx queue is ready to be sent. */
115 uint rx_started:1; /* Packets are Rxing. */ 96 uint rx_started:1; /* Packets are Rxing. */
116 uchar tx_queue; /* Number of packet on the Tx queue. */ 97 uchar tx_queue; /* Number of packet on the Tx queue. */
117 char mca_slot; /* -1 means ISA */
118 ushort tx_queue_len; /* Current length of the Tx queue. */ 98 ushort tx_queue_len; /* Current length of the Tx queue. */
119}; 99};
120 100
@@ -166,21 +146,6 @@ static void set_rx_mode(struct net_device *dev);
166static void net_tx_timeout (struct net_device *dev); 146static void net_tx_timeout (struct net_device *dev);
167 147
168 148
169#ifdef CONFIG_MCA_LEGACY
170struct at1720_mca_adapters_struct {
171 char* name;
172 int id;
173};
174/* rEnE : maybe there are others I don't know off... */
175
176static struct at1720_mca_adapters_struct at1720_mca_adapters[] __initdata = {
177 { "Allied Telesys AT1720AT", 0x6410 },
178 { "Allied Telesys AT1720BT", 0x6413 },
179 { "Allied Telesys AT1720T", 0x6416 },
180 { NULL, 0 },
181};
182#endif
183
184/* Check for a network adaptor of this type, and return '0' iff one exists. 149/* Check for a network adaptor of this type, and return '0' iff one exists.
185 If dev->base_addr == 0, probe all likely locations. 150 If dev->base_addr == 0, probe all likely locations.
186 If dev->base_addr == 1, always return failure. 151 If dev->base_addr == 1, always return failure.
@@ -194,11 +159,6 @@ static int irq;
194 159
195static void cleanup_card(struct net_device *dev) 160static void cleanup_card(struct net_device *dev)
196{ 161{
197#ifdef CONFIG_MCA_LEGACY
198 struct net_local *lp = netdev_priv(dev);
199 if (lp->mca_slot >= 0)
200 mca_mark_as_unused(lp->mca_slot);
201#endif
202 free_irq(dev->irq, NULL); 162 free_irq(dev->irq, NULL);
203 release_region(dev->base_addr, AT1700_IO_EXTENT); 163 release_region(dev->base_addr, AT1700_IO_EXTENT);
204} 164}
@@ -273,7 +233,7 @@ static int __init at1700_probe1(struct net_device *dev, int ioaddr)
273 static const char fmv_irqmap_pnp[8] = {3, 4, 5, 7, 9, 10, 11, 15}; 233 static const char fmv_irqmap_pnp[8] = {3, 4, 5, 7, 9, 10, 11, 15};
274 static const char at1700_irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15}; 234 static const char at1700_irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15};
275 unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0; 235 unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0;
276 int slot, ret = -ENODEV; 236 int ret = -ENODEV;
277 struct net_local *lp = netdev_priv(dev); 237 struct net_local *lp = netdev_priv(dev);
278 238
279 if (!request_region(ioaddr, AT1700_IO_EXTENT, DRV_NAME)) 239 if (!request_region(ioaddr, AT1700_IO_EXTENT, DRV_NAME))
@@ -288,64 +248,6 @@ static int __init at1700_probe1(struct net_device *dev, int ioaddr)
288 ioaddr, read_eeprom(ioaddr, 4), read_eeprom(ioaddr, 5), 248 ioaddr, read_eeprom(ioaddr, 4), read_eeprom(ioaddr, 5),
289 read_eeprom(ioaddr, 6), inw(ioaddr + EEPROM_Ctrl)); 249 read_eeprom(ioaddr, 6), inw(ioaddr + EEPROM_Ctrl));
290#endif 250#endif
291
292#ifdef CONFIG_MCA_LEGACY
293 /* rEnE (rene@bss.lu): got this from 3c509 driver source , adapted for AT1720 */
294
295 /* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch, heavily
296 modified by Chris Beauregard (cpbeaure@csclub.uwaterloo.ca)
297 to support standard MCA probing. */
298
299 /* redone for multi-card detection by ZP Gu (zpg@castle.net) */
300 /* now works as a module */
301
302 if (MCA_bus) {
303 int j;
304 int l_i;
305 u_char pos3, pos4;
306
307 for (j = 0; at1720_mca_adapters[j].name != NULL; j ++) {
308 slot = 0;
309 while (slot != MCA_NOTFOUND) {
310
311 slot = mca_find_unused_adapter( at1720_mca_adapters[j].id, slot );
312 if (slot == MCA_NOTFOUND) break;
313
314 /* if we get this far, an adapter has been detected and is
315 enabled */
316
317 pos3 = mca_read_stored_pos( slot, 3 );
318 pos4 = mca_read_stored_pos( slot, 4 );
319
320 for (l_i = 0; l_i < 8; l_i++)
321 if (( pos3 & 0x07) == at1700_ioaddr_pattern[l_i])
322 break;
323 ioaddr = at1700_mca_probe_list[l_i];
324
325 for (irq = 0; irq < 0x10; irq++)
326 if (((((pos4>>4) & 0x0f) | (pos3 & 0xf0)) & 0xff) == at1700_irq_pattern[irq])
327 break;
328
329 /* probing for a card at a particular IO/IRQ */
330 if ((dev->irq && dev->irq != irq) ||
331 (dev->base_addr && dev->base_addr != ioaddr)) {
332 slot++; /* probing next slot */
333 continue;
334 }
335
336 dev->irq = irq;
337
338 /* claim the slot */
339 mca_set_adapter_name( slot, at1720_mca_adapters[j].name );
340 mca_mark_as_used(slot);
341
342 goto found;
343 }
344 }
345 /* if we get here, we didn't find an MCA adapter - try ISA */
346 }
347#endif
348 slot = -1;
349 /* We must check for the EEPROM-config boards first, else accessing 251 /* We must check for the EEPROM-config boards first, else accessing
350 IOCONFIG0 will move the board! */ 252 IOCONFIG0 will move the board! */
351 if (at1700_probe_list[inb(ioaddr + IOCONFIG1) & 0x07] == ioaddr && 253 if (at1700_probe_list[inb(ioaddr + IOCONFIG1) & 0x07] == ioaddr &&
@@ -360,11 +262,7 @@ static int __init at1700_probe1(struct net_device *dev, int ioaddr)
360 goto err_out; 262 goto err_out;
361 } 263 }
362 264
363#ifdef CONFIG_MCA_LEGACY 265 /* Reset the internal state machines. */
364found:
365#endif
366
367 /* Reset the internal state machines. */
368 outb(0, ioaddr + RESET); 266 outb(0, ioaddr + RESET);
369 267
370 if (is_at1700) { 268 if (is_at1700) {
@@ -380,11 +278,11 @@ found:
380 break; 278 break;
381 } 279 }
382 if (i == 8) { 280 if (i == 8) {
383 goto err_mca; 281 goto err_out;
384 } 282 }
385 } else { 283 } else {
386 if (fmv18x_probe_list[inb(ioaddr + IOCONFIG) & 0x07] != ioaddr) 284 if (fmv18x_probe_list[inb(ioaddr + IOCONFIG) & 0x07] != ioaddr)
387 goto err_mca; 285 goto err_out;
388 irq = fmv_irqmap[(inb(ioaddr + IOCONFIG)>>6) & 0x03]; 286 irq = fmv_irqmap[(inb(ioaddr + IOCONFIG)>>6) & 0x03];
389 } 287 }
390 } 288 }
@@ -464,23 +362,17 @@ found:
464 spin_lock_init(&lp->lock); 362 spin_lock_init(&lp->lock);
465 363
466 lp->jumpered = is_fmv18x; 364 lp->jumpered = is_fmv18x;
467 lp->mca_slot = slot;
468 /* Snarf the interrupt vector now. */ 365 /* Snarf the interrupt vector now. */
469 ret = request_irq(irq, net_interrupt, 0, DRV_NAME, dev); 366 ret = request_irq(irq, net_interrupt, 0, DRV_NAME, dev);
470 if (ret) { 367 if (ret) {
471 printk(KERN_ERR "AT1700 at %#3x is unusable due to a " 368 printk(KERN_ERR "AT1700 at %#3x is unusable due to a "
472 "conflict on IRQ %d.\n", 369 "conflict on IRQ %d.\n",
473 ioaddr, irq); 370 ioaddr, irq);
474 goto err_mca; 371 goto err_out;
475 } 372 }
476 373
477 return 0; 374 return 0;
478 375
479err_mca:
480#ifdef CONFIG_MCA_LEGACY
481 if (slot >= 0)
482 mca_mark_as_unused(slot);
483#endif
484err_out: 376err_out:
485 release_region(ioaddr, AT1700_IO_EXTENT); 377 release_region(ioaddr, AT1700_IO_EXTENT);
486 return ret; 378 return ret;
diff --git a/drivers/net/ethernet/i825xx/3c523.c b/drivers/net/ethernet/i825xx/3c523.c
deleted file mode 100644
index 8451ecd4c1ec..000000000000
--- a/drivers/net/ethernet/i825xx/3c523.c
+++ /dev/null
@@ -1,1312 +0,0 @@
1/*
2 net-3-driver for the 3c523 Etherlink/MC card (i82586 Ethernet chip)
3
4
5 This is an extension to the Linux operating system, and is covered by the
6 same GNU General Public License that covers that work.
7
8 Copyright 1995, 1996 by Chris Beauregard (cpbeaure@undergrad.math.uwaterloo.ca)
9
10 This is basically Michael Hipp's ni52 driver, with a new probing
11 algorithm and some minor changes to the 82586 CA and reset routines.
12 Thanks a lot Michael for a really clean i82586 implementation! Unless
13 otherwise documented in ni52.c, any bugs are mine.
14
15 Contrary to the Ethernet-HOWTO, this isn't based on the 3c507 driver in
16 any way. The ni52 is a lot easier to modify.
17
18 sources:
19 ni52.c
20
21 Crynwr packet driver collection was a great reference for my first
22 attempt at this sucker. The 3c507 driver also helped, until I noticed
23 that ni52.c was a lot nicer.
24
25 EtherLink/MC: Micro Channel Ethernet Adapter Technical Reference
26 Manual, courtesy of 3Com CardFacts, documents the 3c523-specific
27 stuff. Information on CardFacts is found in the Ethernet HOWTO.
28 Also see <a href="http://www.3com.com/">
29
30 Microprocessor Communications Support Chips, T.J. Byers, ISBN
31 0-444-01224-9, has a section on the i82586. It tells you just enough
32 to know that you really don't want to learn how to program the chip.
33
34 The original device probe code was stolen from ps2esdi.c
35
36 Known Problems:
37 Since most of the code was stolen from ni52.c, you'll run across the
38 same bugs in the 0.62 version of ni52.c, plus maybe a few because of
39 the 3c523 idiosynchacies. The 3c523 has 16K of RAM though, so there
40 shouldn't be the overrun problem that the 8K ni52 has.
41
42 This driver is for a 16K adapter. It should work fine on the 64K
43 adapters, but it will only use one of the 4 banks of RAM. Modifying
44 this for the 64K version would require a lot of heinous bank
45 switching, which I'm sure not interested in doing. If you try to
46 implement a bank switching version, you'll basically have to remember
47 what bank is enabled and do a switch every time you access a memory
48 location that's not current. You'll also have to remap pointers on
49 the driver side, because it only knows about 16K of the memory.
50 Anyone desperate or masochistic enough to try?
51
52 It seems to be stable now when multiple transmit buffers are used. I
53 can't see any performance difference, but then I'm working on a 386SX.
54
55 Multicast doesn't work. It doesn't even pretend to work. Don't use
56 it. Don't compile your kernel with multicast support. I don't know
57 why.
58
59 Features:
60 This driver is useable as a loadable module. If you try to specify an
61 IRQ or a IO address (via insmod 3c523.o irq=xx io=0xyyy), it will
62 search the MCA slots until it finds a 3c523 with the specified
63 parameters.
64
65 This driver does support multiple ethernet cards when used as a module
66 (up to MAX_3C523_CARDS, the default being 4)
67
68 This has been tested with both BNC and TP versions, internal and
69 external transceivers. Haven't tested with the 64K version (that I
70 know of).
71
72 History:
73 Jan 1st, 1996
74 first public release
75 Feb 4th, 1996
76 update to 1.3.59, incorporated multicast diffs from ni52.c
77 Feb 15th, 1996
78 added shared irq support
79 Apr 1999
80 added support for multiple cards when used as a module
81 added option to disable multicast as is causes problems
82 Ganesh Sittampalam <ganesh.sittampalam@magdalen.oxford.ac.uk>
83 Stuart Adamson <stuart.adamson@compsoc.net>
84 Nov 2001
85 added support for ethtool (jgarzik)
86
87 $Header: /fsys2/home/chrisb/linux-1.3.59-MCA/drivers/net/RCS/3c523.c,v 1.1 1996/02/05 01:53:46 chrisb Exp chrisb $
88 */
89
90#define DRV_NAME "3c523"
91#define DRV_VERSION "17-Nov-2001"
92
93#include <linux/init.h>
94#include <linux/netdevice.h>
95#include <linux/etherdevice.h>
96#include <linux/module.h>
97#include <linux/kernel.h>
98#include <linux/string.h>
99#include <linux/errno.h>
100#include <linux/ioport.h>
101#include <linux/skbuff.h>
102#include <linux/interrupt.h>
103#include <linux/delay.h>
104#include <linux/mca-legacy.h>
105#include <linux/ethtool.h>
106#include <linux/bitops.h>
107#include <linux/jiffies.h>
108
109#include <asm/uaccess.h>
110#include <asm/processor.h>
111#include <asm/io.h>
112
113#include "3c523.h"
114
115/*************************************************************************/
116#define DEBUG /* debug on */
117#define SYSBUSVAL 0 /* 1 = 8 Bit, 0 = 16 bit - 3c523 only does 16 bit */
118#undef ELMC_MULTICAST /* Disable multicast support as it is somewhat seriously broken at the moment */
119
120#define make32(ptr16) (p->memtop + (short) (ptr16) )
121#define make24(ptr32) ((char *) (ptr32) - p->base)
122#define make16(ptr32) ((unsigned short) ((unsigned long) (ptr32) - (unsigned long) p->memtop ))
123
124/*************************************************************************/
125/*
126 Tables to which we can map values in the configuration registers.
127 */
128static int irq_table[] __initdata = {
129 12, 7, 3, 9
130};
131
132static int csr_table[] __initdata = {
133 0x300, 0x1300, 0x2300, 0x3300
134};
135
136static int shm_table[] __initdata = {
137 0x0c0000, 0x0c8000, 0x0d0000, 0x0d8000
138};
139
140/******************* how to calculate the buffers *****************************
141
142
143 * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works
144 * --------------- in a different (more stable?) mode. Only in this mode it's
145 * possible to configure the driver with 'NO_NOPCOMMANDS'
146
147sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8;
148sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT
149sizeof(rfd) = 24; sizeof(rbd) = 12;
150sizeof(tbd) = 8; sizeof(transmit_cmd) = 16;
151sizeof(nop_cmd) = 8;
152
153 * if you don't know the driver, better do not change this values: */
154
155#define RECV_BUFF_SIZE 1524 /* slightly oversized */
156#define XMIT_BUFF_SIZE 1524 /* slightly oversized */
157#define NUM_XMIT_BUFFS 1 /* config for both, 8K and 16K shmem */
158#define NUM_RECV_BUFFS_8 4 /* config for 8K shared mem */
159#define NUM_RECV_BUFFS_16 9 /* config for 16K shared mem */
160
161#if (NUM_XMIT_BUFFS == 1)
162#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */
163#endif
164
165/**************************************************************************/
166
167#define DELAY(x) { mdelay(32 * x); }
168
169/* a much shorter delay: */
170#define DELAY_16(); { udelay(16) ; }
171
172/* wait for command with timeout: */
173#define WAIT_4_SCB_CMD() { int i; \
174 for(i=0;i<1024;i++) { \
175 if(!p->scb->cmd) break; \
176 DELAY_16(); \
177 if(i == 1023) { \
178 pr_warning("%s:%d: scb_cmd timed out .. resetting i82586\n",\
179 dev->name,__LINE__); \
180 elmc_id_reset586(); } } }
181
182static irqreturn_t elmc_interrupt(int irq, void *dev_id);
183static int elmc_open(struct net_device *dev);
184static int elmc_close(struct net_device *dev);
185static netdev_tx_t elmc_send_packet(struct sk_buff *, struct net_device *);
186static struct net_device_stats *elmc_get_stats(struct net_device *dev);
187static void elmc_timeout(struct net_device *dev);
188#ifdef ELMC_MULTICAST
189static void set_multicast_list(struct net_device *dev);
190#endif
191static const struct ethtool_ops netdev_ethtool_ops;
192
193/* helper-functions */
194static int init586(struct net_device *dev);
195static int check586(struct net_device *dev, unsigned long where, unsigned size);
196static void alloc586(struct net_device *dev);
197static void startrecv586(struct net_device *dev);
198static void *alloc_rfa(struct net_device *dev, void *ptr);
199static void elmc_rcv_int(struct net_device *dev);
200static void elmc_xmt_int(struct net_device *dev);
201static void elmc_rnr_int(struct net_device *dev);
202
203struct priv {
204 unsigned long base;
205 char *memtop;
206 unsigned long mapped_start; /* Start of ioremap */
207 volatile struct rfd_struct *rfd_last, *rfd_top, *rfd_first;
208 volatile struct scp_struct *scp; /* volatile is important */
209 volatile struct iscp_struct *iscp; /* volatile is important */
210 volatile struct scb_struct *scb; /* volatile is important */
211 volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS];
212#if (NUM_XMIT_BUFFS == 1)
213 volatile struct transmit_cmd_struct *xmit_cmds[2];
214 volatile struct nop_cmd_struct *nop_cmds[2];
215#else
216 volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS];
217 volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS];
218#endif
219 volatile int nop_point, num_recv_buffs;
220 volatile char *xmit_cbuffs[NUM_XMIT_BUFFS];
221 volatile int xmit_count, xmit_last;
222 volatile int slot;
223};
224
225#define elmc_attn586() {elmc_do_attn586(dev->base_addr,ELMC_CTRL_INTE);}
226#define elmc_reset586() {elmc_do_reset586(dev->base_addr,ELMC_CTRL_INTE);}
227
228/* with interrupts disabled - this will clear the interrupt bit in the
229 3c523 control register, and won't put it back. This effectively
230 disables interrupts on the card. */
231#define elmc_id_attn586() {elmc_do_attn586(dev->base_addr,0);}
232#define elmc_id_reset586() {elmc_do_reset586(dev->base_addr,0);}
233
234/*************************************************************************/
235/*
236 Do a Channel Attention on the 3c523. This is extremely board dependent.
237 */
238static void elmc_do_attn586(int ioaddr, int ints)
239{
240 /* the 3c523 requires a minimum of 500 ns. The delays here might be
241 a little too large, and hence they may cut the performance of the
242 card slightly. If someone who knows a little more about Linux
243 timing would care to play with these, I'd appreciate it. */
244
245 /* this bit masking stuff is crap. I'd rather have separate
246 registers with strobe triggers for each of these functions. <sigh>
247 Ya take what ya got. */
248
249 outb(ELMC_CTRL_RST | 0x3 | ELMC_CTRL_CA | ints, ioaddr + ELMC_CTRL);
250 DELAY_16(); /* > 500 ns */
251 outb(ELMC_CTRL_RST | 0x3 | ints, ioaddr + ELMC_CTRL);
252}
253
254/*************************************************************************/
255/*
256 Reset the 82586 on the 3c523. Also very board dependent.
257 */
258static void elmc_do_reset586(int ioaddr, int ints)
259{
260 /* toggle the RST bit low then high */
261 outb(0x3 | ELMC_CTRL_LBK, ioaddr + ELMC_CTRL);
262 DELAY_16(); /* > 500 ns */
263 outb(ELMC_CTRL_RST | ELMC_CTRL_LBK | 0x3, ioaddr + ELMC_CTRL);
264
265 elmc_do_attn586(ioaddr, ints);
266}
267
268/**********************************************
269 * close device
270 */
271
272static int elmc_close(struct net_device *dev)
273{
274 netif_stop_queue(dev);
275 elmc_id_reset586(); /* the hard way to stop the receiver */
276 free_irq(dev->irq, dev);
277 return 0;
278}
279
280/**********************************************
281 * open device
282 */
283
284static int elmc_open(struct net_device *dev)
285{
286 int ret;
287
288 elmc_id_attn586(); /* disable interrupts */
289
290 ret = request_irq(dev->irq, elmc_interrupt, IRQF_SHARED,
291 dev->name, dev);
292 if (ret) {
293 pr_err("%s: couldn't get irq %d\n", dev->name, dev->irq);
294 elmc_id_reset586();
295 return ret;
296 }
297 alloc586(dev);
298 init586(dev);
299 startrecv586(dev);
300 netif_start_queue(dev);
301 return 0; /* most done by init */
302}
303
304/**********************************************
305 * Check to see if there's an 82586 out there.
306 */
307
308static int __init check586(struct net_device *dev, unsigned long where, unsigned size)
309{
310 struct priv *p = netdev_priv(dev);
311 char *iscp_addrs[2];
312 int i = 0;
313
314 p->base = (unsigned long) isa_bus_to_virt((unsigned long)where) + size - 0x01000000;
315 p->memtop = isa_bus_to_virt((unsigned long)where) + size;
316 p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS);
317 memset((char *) p->scp, 0, sizeof(struct scp_struct));
318 p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */
319
320 iscp_addrs[0] = isa_bus_to_virt((unsigned long)where);
321 iscp_addrs[1] = (char *) p->scp - sizeof(struct iscp_struct);
322
323 for (i = 0; i < 2; i++) {
324 p->iscp = (struct iscp_struct *) iscp_addrs[i];
325 memset((char *) p->iscp, 0, sizeof(struct iscp_struct));
326
327 p->scp->iscp = make24(p->iscp);
328 p->iscp->busy = 1;
329
330 elmc_id_reset586();
331
332 /* reset586 does an implicit CA */
333
334 /* apparently, you sometimes have to kick the 82586 twice... */
335 elmc_id_attn586();
336 DELAY(1);
337
338 if (p->iscp->busy) { /* i82586 clears 'busy' after successful init */
339 return 0;
340 }
341 }
342 return 1;
343}
344
345/******************************************************************
346 * set iscp at the right place, called by elmc_probe and open586.
347 */
348
349static void alloc586(struct net_device *dev)
350{
351 struct priv *p = netdev_priv(dev);
352
353 elmc_id_reset586();
354 DELAY(2);
355
356 p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS);
357 p->scb = (struct scb_struct *) isa_bus_to_virt(dev->mem_start);
358 p->iscp = (struct iscp_struct *) ((char *) p->scp - sizeof(struct iscp_struct));
359
360 memset((char *) p->iscp, 0, sizeof(struct iscp_struct));
361 memset((char *) p->scp, 0, sizeof(struct scp_struct));
362
363 p->scp->iscp = make24(p->iscp);
364 p->scp->sysbus = SYSBUSVAL;
365 p->iscp->scb_offset = make16(p->scb);
366
367 p->iscp->busy = 1;
368 elmc_id_reset586();
369 elmc_id_attn586();
370
371 DELAY(2);
372
373 if (p->iscp->busy)
374 pr_err("%s: Init-Problems (alloc).\n", dev->name);
375
376 memset((char *) p->scb, 0, sizeof(struct scb_struct));
377}
378
379/*****************************************************************/
380
381static int elmc_getinfo(char *buf, int slot, void *d)
382{
383 int len = 0;
384 struct net_device *dev = d;
385
386 if (dev == NULL)
387 return len;
388
389 len += sprintf(buf + len, "Revision: 0x%x\n",
390 inb(dev->base_addr + ELMC_REVISION) & 0xf);
391 len += sprintf(buf + len, "IRQ: %d\n", dev->irq);
392 len += sprintf(buf + len, "IO Address: %#lx-%#lx\n", dev->base_addr,
393 dev->base_addr + ELMC_IO_EXTENT);
394 len += sprintf(buf + len, "Memory: %#lx-%#lx\n", dev->mem_start,
395 dev->mem_end - 1);
396 len += sprintf(buf + len, "Transceiver: %s\n", dev->if_port ?
397 "External" : "Internal");
398 len += sprintf(buf + len, "Device: %s\n", dev->name);
399 len += sprintf(buf + len, "Hardware Address: %pM\n",
400 dev->dev_addr);
401
402 return len;
403} /* elmc_getinfo() */
404
405static const struct net_device_ops netdev_ops = {
406 .ndo_open = elmc_open,
407 .ndo_stop = elmc_close,
408 .ndo_get_stats = elmc_get_stats,
409 .ndo_start_xmit = elmc_send_packet,
410 .ndo_tx_timeout = elmc_timeout,
411#ifdef ELMC_MULTICAST
412 .ndo_set_rx_mode = set_multicast_list,
413#endif
414 .ndo_change_mtu = eth_change_mtu,
415 .ndo_set_mac_address = eth_mac_addr,
416 .ndo_validate_addr = eth_validate_addr,
417};
418
419/*****************************************************************/
420
421static int __init do_elmc_probe(struct net_device *dev)
422{
423 static int slot;
424 int base_addr = dev->base_addr;
425 int irq = dev->irq;
426 u_char status = 0;
427 u_char revision = 0;
428 int i = 0;
429 unsigned int size = 0;
430 int retval;
431 struct priv *pr = netdev_priv(dev);
432
433 if (MCA_bus == 0) {
434 return -ENODEV;
435 }
436 /* search through the slots for the 3c523. */
437 slot = mca_find_adapter(ELMC_MCA_ID, 0);
438 while (slot != -1) {
439 status = mca_read_stored_pos(slot, 2);
440
441 dev->irq=irq_table[(status & ELMC_STATUS_IRQ_SELECT) >> 6];
442 dev->base_addr=csr_table[(status & ELMC_STATUS_CSR_SELECT) >> 1];
443
444 /*
445 If we're trying to match a specified irq or IO address,
446 we'll reject a match unless it's what we're looking for.
447 Also reject it if the card is already in use.
448 */
449
450 if ((irq && irq != dev->irq) ||
451 (base_addr && base_addr != dev->base_addr)) {
452 slot = mca_find_adapter(ELMC_MCA_ID, slot + 1);
453 continue;
454 }
455 if (!request_region(dev->base_addr, ELMC_IO_EXTENT, DRV_NAME)) {
456 slot = mca_find_adapter(ELMC_MCA_ID, slot + 1);
457 continue;
458 }
459
460 /* found what we're looking for... */
461 break;
462 }
463
464 /* we didn't find any 3c523 in the slots we checked for */
465 if (slot == MCA_NOTFOUND)
466 return (base_addr || irq) ? -ENXIO : -ENODEV;
467
468 mca_set_adapter_name(slot, "3Com 3c523 Etherlink/MC");
469 mca_set_adapter_procfn(slot, (MCA_ProcFn) elmc_getinfo, dev);
470
471 /* if we get this far, adapter has been found - carry on */
472 pr_info("%s: 3c523 adapter found in slot %d\n", dev->name, slot + 1);
473
474 /* Now we extract configuration info from the card.
475 The 3c523 provides information in two of the POS registers, but
476 the second one is only needed if we want to tell the card what IRQ
477 to use. I suspect that whoever sets the thing up initially would
478 prefer we don't screw with those things.
479
480 Note that we read the status info when we found the card...
481
482 See 3c523.h for more details.
483 */
484
485 /* revision is stored in the first 4 bits of the revision register */
486 revision = inb(dev->base_addr + ELMC_REVISION) & 0xf;
487
488 /* according to docs, we read the interrupt and write it back to
489 the IRQ select register, since the POST might not configure the IRQ
490 properly. */
491 switch (dev->irq) {
492 case 3:
493 mca_write_pos(slot, 3, 0x04);
494 break;
495 case 7:
496 mca_write_pos(slot, 3, 0x02);
497 break;
498 case 9:
499 mca_write_pos(slot, 3, 0x08);
500 break;
501 case 12:
502 mca_write_pos(slot, 3, 0x01);
503 break;
504 }
505
506 pr->slot = slot;
507
508 pr_info("%s: 3Com 3c523 Rev 0x%x at %#lx\n", dev->name, (int) revision,
509 dev->base_addr);
510
511 /* Determine if we're using the on-board transceiver (i.e. coax) or
512 an external one. The information is pretty much useless, but I
513 guess it's worth brownie points. */
514 dev->if_port = (status & ELMC_STATUS_DISABLE_THIN);
515
516 /* The 3c523 has a 24K chunk of memory. The first 16K is the
517 shared memory, while the last 8K is for the EtherStart BIOS ROM.
518 Which we don't care much about here. We'll just tell Linux that
519 we're using 16K. MCA won't permit address space conflicts caused
520 by not mapping the other 8K. */
521 dev->mem_start = shm_table[(status & ELMC_STATUS_MEMORY_SELECT) >> 3];
522
523 /* We're using MCA, so it's a given that the information about memory
524 size is correct. The Crynwr drivers do something like this. */
525
526 elmc_id_reset586(); /* seems like a good idea before checking it... */
527
528 size = 0x4000; /* check for 16K mem */
529 if (!check586(dev, dev->mem_start, size)) {
530 pr_err("%s: memprobe, Can't find memory at 0x%lx!\n", dev->name,
531 dev->mem_start);
532 retval = -ENODEV;
533 goto err_out;
534 }
535 dev->mem_end = dev->mem_start + size; /* set mem_end showed by 'ifconfig' */
536
537 pr->memtop = isa_bus_to_virt(dev->mem_start) + size;
538 pr->base = (unsigned long) isa_bus_to_virt(dev->mem_start) + size - 0x01000000;
539 alloc586(dev);
540
541 elmc_id_reset586(); /* make sure it doesn't generate spurious ints */
542
543 /* set number of receive-buffs according to memsize */
544 pr->num_recv_buffs = NUM_RECV_BUFFS_16;
545
546 /* dump all the assorted information */
547 pr_info("%s: IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->name,
548 dev->irq, dev->if_port ? "ex" : "in",
549 dev->mem_start, dev->mem_end - 1);
550
551 /* The hardware address for the 3c523 is stored in the first six
552 bytes of the IO address. */
553 for (i = 0; i < 6; i++)
554 dev->dev_addr[i] = inb(dev->base_addr + i);
555
556 pr_info("%s: hardware address %pM\n",
557 dev->name, dev->dev_addr);
558
559 dev->netdev_ops = &netdev_ops;
560 dev->watchdog_timeo = HZ;
561 dev->ethtool_ops = &netdev_ethtool_ops;
562
563 /* note that we haven't actually requested the IRQ from the kernel.
564 That gets done in elmc_open(). I'm not sure that's such a good idea,
565 but it works, so I'll go with it. */
566
567#ifndef ELMC_MULTICAST
568 dev->flags&=~IFF_MULTICAST; /* Multicast doesn't work */
569#endif
570
571 retval = register_netdev(dev);
572 if (retval)
573 goto err_out;
574
575 return 0;
576err_out:
577 mca_set_adapter_procfn(slot, NULL, NULL);
578 release_region(dev->base_addr, ELMC_IO_EXTENT);
579 return retval;
580}
581
582#ifdef MODULE
583static void cleanup_card(struct net_device *dev)
584{
585 mca_set_adapter_procfn(((struct priv *)netdev_priv(dev))->slot,
586 NULL, NULL);
587 release_region(dev->base_addr, ELMC_IO_EXTENT);
588}
589#else
590struct net_device * __init elmc_probe(int unit)
591{
592 struct net_device *dev = alloc_etherdev(sizeof(struct priv));
593 int err;
594
595 if (!dev)
596 return ERR_PTR(-ENOMEM);
597
598 sprintf(dev->name, "eth%d", unit);
599 netdev_boot_setup_check(dev);
600
601 err = do_elmc_probe(dev);
602 if (err)
603 goto out;
604 return dev;
605out:
606 free_netdev(dev);
607 return ERR_PTR(err);
608}
609#endif
610
611/**********************************************
612 * init the chip (elmc-interrupt should be disabled?!)
613 * needs a correct 'allocated' memory
614 */
615
616static int init586(struct net_device *dev)
617{
618 void *ptr;
619 unsigned long s;
620 int i, result = 0;
621 struct priv *p = netdev_priv(dev);
622 volatile struct configure_cmd_struct *cfg_cmd;
623 volatile struct iasetup_cmd_struct *ias_cmd;
624 volatile struct tdr_cmd_struct *tdr_cmd;
625 volatile struct mcsetup_cmd_struct *mc_cmd;
626 struct netdev_hw_addr *ha;
627 int num_addrs = netdev_mc_count(dev);
628
629 ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct));
630
631 cfg_cmd = (struct configure_cmd_struct *) ptr; /* configure-command */
632 cfg_cmd->cmd_status = 0;
633 cfg_cmd->cmd_cmd = CMD_CONFIGURE | CMD_LAST;
634 cfg_cmd->cmd_link = 0xffff;
635
636 cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */
637 cfg_cmd->fifo = 0x08; /* fifo-limit (8=tx:32/rx:64) */
638 cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */
639 cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */
640 cfg_cmd->priority = 0x00;
641 cfg_cmd->ifs = 0x60;
642 cfg_cmd->time_low = 0x00;
643 cfg_cmd->time_high = 0xf2;
644 cfg_cmd->promisc = 0;
645 if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC))
646 cfg_cmd->promisc = 1;
647 cfg_cmd->carr_coll = 0x00;
648
649 p->scb->cbl_offset = make16(cfg_cmd);
650
651 p->scb->cmd = CUC_START; /* cmd.-unit start */
652 elmc_id_attn586();
653
654 s = jiffies; /* warning: only active with interrupts on !! */
655 while (!(cfg_cmd->cmd_status & STAT_COMPL)) {
656 if (time_after(jiffies, s + 30*HZ/100))
657 break;
658 }
659
660 if ((cfg_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_COMPL | STAT_OK)) {
661 pr_warning("%s (elmc): configure command failed: %x\n", dev->name, cfg_cmd->cmd_status);
662 return 1;
663 }
664 /*
665 * individual address setup
666 */
667 ias_cmd = (struct iasetup_cmd_struct *) ptr;
668
669 ias_cmd->cmd_status = 0;
670 ias_cmd->cmd_cmd = CMD_IASETUP | CMD_LAST;
671 ias_cmd->cmd_link = 0xffff;
672
673 memcpy((char *) &ias_cmd->iaddr, (char *) dev->dev_addr, ETH_ALEN);
674
675 p->scb->cbl_offset = make16(ias_cmd);
676
677 p->scb->cmd = CUC_START; /* cmd.-unit start */
678 elmc_id_attn586();
679
680 s = jiffies;
681 while (!(ias_cmd->cmd_status & STAT_COMPL)) {
682 if (time_after(jiffies, s + 30*HZ/100))
683 break;
684 }
685
686 if ((ias_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_OK | STAT_COMPL)) {
687 pr_warning("%s (elmc): individual address setup command failed: %04x\n",
688 dev->name, ias_cmd->cmd_status);
689 return 1;
690 }
691 /*
692 * TDR, wire check .. e.g. no resistor e.t.c
693 */
694 tdr_cmd = (struct tdr_cmd_struct *) ptr;
695
696 tdr_cmd->cmd_status = 0;
697 tdr_cmd->cmd_cmd = CMD_TDR | CMD_LAST;
698 tdr_cmd->cmd_link = 0xffff;
699 tdr_cmd->status = 0;
700
701 p->scb->cbl_offset = make16(tdr_cmd);
702
703 p->scb->cmd = CUC_START; /* cmd.-unit start */
704 elmc_attn586();
705
706 s = jiffies;
707 while (!(tdr_cmd->cmd_status & STAT_COMPL)) {
708 if (time_after(jiffies, s + 30*HZ/100)) {
709 pr_warning("%s: %d Problems while running the TDR.\n", dev->name, __LINE__);
710 result = 1;
711 break;
712 }
713 }
714
715 if (!result) {
716 DELAY(2); /* wait for result */
717 result = tdr_cmd->status;
718
719 p->scb->cmd = p->scb->status & STAT_MASK;
720 elmc_id_attn586(); /* ack the interrupts */
721
722 if (result & TDR_LNK_OK) {
723 /* empty */
724 } else if (result & TDR_XCVR_PRB) {
725 pr_warning("%s: TDR: Transceiver problem!\n", dev->name);
726 } else if (result & TDR_ET_OPN) {
727 pr_warning("%s: TDR: No correct termination %d clocks away.\n", dev->name, result & TDR_TIMEMASK);
728 } else if (result & TDR_ET_SRT) {
729 if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */
730 pr_warning("%s: TDR: Detected a short circuit %d clocks away.\n", dev->name, result & TDR_TIMEMASK);
731 } else {
732 pr_warning("%s: TDR: Unknown status %04x\n", dev->name, result);
733 }
734 }
735 /*
736 * ack interrupts
737 */
738 p->scb->cmd = p->scb->status & STAT_MASK;
739 elmc_id_attn586();
740
741 /*
742 * alloc nop/xmit-cmds
743 */
744#if (NUM_XMIT_BUFFS == 1)
745 for (i = 0; i < 2; i++) {
746 p->nop_cmds[i] = (struct nop_cmd_struct *) ptr;
747 p->nop_cmds[i]->cmd_cmd = CMD_NOP;
748 p->nop_cmds[i]->cmd_status = 0;
749 p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
750 ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
751 }
752 p->xmit_cmds[0] = (struct transmit_cmd_struct *) ptr; /* transmit cmd/buff 0 */
753 ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
754#else
755 for (i = 0; i < NUM_XMIT_BUFFS; i++) {
756 p->nop_cmds[i] = (struct nop_cmd_struct *) ptr;
757 p->nop_cmds[i]->cmd_cmd = CMD_NOP;
758 p->nop_cmds[i]->cmd_status = 0;
759 p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
760 ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
761 p->xmit_cmds[i] = (struct transmit_cmd_struct *) ptr; /*transmit cmd/buff 0 */
762 ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
763 }
764#endif
765
766 ptr = alloc_rfa(dev, (void *) ptr); /* init receive-frame-area */
767
768 /*
769 * Multicast setup
770 */
771
772 if (num_addrs) {
773 /* I don't understand this: do we really need memory after the init? */
774 int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
775 if (len <= 0) {
776 pr_err("%s: Ooooops, no memory for MC-Setup!\n", dev->name);
777 } else {
778 if (len < num_addrs) {
779 num_addrs = len;
780 pr_warning("%s: Sorry, can only apply %d MC-Address(es).\n",
781 dev->name, num_addrs);
782 }
783 mc_cmd = (struct mcsetup_cmd_struct *) ptr;
784 mc_cmd->cmd_status = 0;
785 mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST;
786 mc_cmd->cmd_link = 0xffff;
787 mc_cmd->mc_cnt = num_addrs * 6;
788 i = 0;
789 netdev_for_each_mc_addr(ha, dev)
790 memcpy((char *) mc_cmd->mc_list[i++],
791 ha->addr, 6);
792 p->scb->cbl_offset = make16(mc_cmd);
793 p->scb->cmd = CUC_START;
794 elmc_id_attn586();
795 s = jiffies;
796 while (!(mc_cmd->cmd_status & STAT_COMPL)) {
797 if (time_after(jiffies, s + 30*HZ/100))
798 break;
799 }
800 if (!(mc_cmd->cmd_status & STAT_COMPL)) {
801 pr_warning("%s: Can't apply multicast-address-list.\n", dev->name);
802 }
803 }
804 }
805 /*
806 * alloc xmit-buffs / init xmit_cmds
807 */
808 for (i = 0; i < NUM_XMIT_BUFFS; i++) {
809 p->xmit_cbuffs[i] = (char *) ptr; /* char-buffs */
810 ptr = (char *) ptr + XMIT_BUFF_SIZE;
811 p->xmit_buffs[i] = (struct tbd_struct *) ptr; /* TBD */
812 ptr = (char *) ptr + sizeof(struct tbd_struct);
813 if ((void *) ptr > (void *) p->iscp) {
814 pr_err("%s: not enough shared-mem for your configuration!\n", dev->name);
815 return 1;
816 }
817 memset((char *) (p->xmit_cmds[i]), 0, sizeof(struct transmit_cmd_struct));
818 memset((char *) (p->xmit_buffs[i]), 0, sizeof(struct tbd_struct));
819 p->xmit_cmds[i]->cmd_status = STAT_COMPL;
820 p->xmit_cmds[i]->cmd_cmd = CMD_XMIT | CMD_INT;
821 p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i]));
822 p->xmit_buffs[i]->next = 0xffff;
823 p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i]));
824 }
825
826 p->xmit_count = 0;
827 p->xmit_last = 0;
828#ifndef NO_NOPCOMMANDS
829 p->nop_point = 0;
830#endif
831
832 /*
833 * 'start transmitter' (nop-loop)
834 */
835#ifndef NO_NOPCOMMANDS
836 p->scb->cbl_offset = make16(p->nop_cmds[0]);
837 p->scb->cmd = CUC_START;
838 elmc_id_attn586();
839 WAIT_4_SCB_CMD();
840#else
841 p->xmit_cmds[0]->cmd_link = 0xffff;
842 p->xmit_cmds[0]->cmd_cmd = CMD_XMIT | CMD_LAST | CMD_INT;
843#endif
844
845 return 0;
846}
847
848/******************************************************
849 * This is a helper routine for elmc_rnr_int() and init586().
850 * It sets up the Receive Frame Area (RFA).
851 */
852
853static void *alloc_rfa(struct net_device *dev, void *ptr)
854{
855 volatile struct rfd_struct *rfd = (struct rfd_struct *) ptr;
856 volatile struct rbd_struct *rbd;
857 int i;
858 struct priv *p = netdev_priv(dev);
859
860 memset((char *) rfd, 0, sizeof(struct rfd_struct) * p->num_recv_buffs);
861 p->rfd_first = rfd;
862
863 for (i = 0; i < p->num_recv_buffs; i++) {
864 rfd[i].next = make16(rfd + (i + 1) % p->num_recv_buffs);
865 }
866 rfd[p->num_recv_buffs - 1].last = RFD_SUSP; /* RU suspend */
867
868 ptr = (void *) (rfd + p->num_recv_buffs);
869
870 rbd = (struct rbd_struct *) ptr;
871 ptr = (void *) (rbd + p->num_recv_buffs);
872
873 /* clr descriptors */
874 memset((char *) rbd, 0, sizeof(struct rbd_struct) * p->num_recv_buffs);
875
876 for (i = 0; i < p->num_recv_buffs; i++) {
877 rbd[i].next = make16((rbd + (i + 1) % p->num_recv_buffs));
878 rbd[i].size = RECV_BUFF_SIZE;
879 rbd[i].buffer = make24(ptr);
880 ptr = (char *) ptr + RECV_BUFF_SIZE;
881 }
882
883 p->rfd_top = p->rfd_first;
884 p->rfd_last = p->rfd_first + p->num_recv_buffs - 1;
885
886 p->scb->rfa_offset = make16(p->rfd_first);
887 p->rfd_first->rbd_offset = make16(rbd);
888
889 return ptr;
890}
891
892
893/**************************************************
894 * Interrupt Handler ...
895 */
896
897static irqreturn_t
898elmc_interrupt(int irq, void *dev_id)
899{
900 struct net_device *dev = dev_id;
901 unsigned short stat;
902 struct priv *p;
903
904 if (!netif_running(dev)) {
905 /* The 3c523 has this habit of generating interrupts during the
906 reset. I'm not sure if the ni52 has this same problem, but it's
907 really annoying if we haven't finished initializing it. I was
908 hoping all the elmc_id_* commands would disable this, but I
909 might have missed a few. */
910
911 elmc_id_attn586(); /* ack inter. and disable any more */
912 return IRQ_HANDLED;
913 } else if (!(ELMC_CTRL_INT & inb(dev->base_addr + ELMC_CTRL))) {
914 /* wasn't this device */
915 return IRQ_NONE;
916 }
917 /* reading ELMC_CTRL also clears the INT bit. */
918
919 p = netdev_priv(dev);
920
921 while ((stat = p->scb->status & STAT_MASK))
922 {
923 p->scb->cmd = stat;
924 elmc_attn586(); /* ack inter. */
925
926 if (stat & STAT_CX) {
927 /* command with I-bit set complete */
928 elmc_xmt_int(dev);
929 }
930 if (stat & STAT_FR) {
931 /* received a frame */
932 elmc_rcv_int(dev);
933 }
934#ifndef NO_NOPCOMMANDS
935 if (stat & STAT_CNA) {
936 /* CU went 'not ready' */
937 if (netif_running(dev)) {
938 pr_warning("%s: oops! CU has left active state. stat: %04x/%04x.\n",
939 dev->name, (int) stat, (int) p->scb->status);
940 }
941 }
942#endif
943
944 if (stat & STAT_RNR) {
945 /* RU went 'not ready' */
946
947 if (p->scb->status & RU_SUSPEND) {
948 /* special case: RU_SUSPEND */
949
950 WAIT_4_SCB_CMD();
951 p->scb->cmd = RUC_RESUME;
952 elmc_attn586();
953 } else {
954 pr_warning("%s: Receiver-Unit went 'NOT READY': %04x/%04x.\n",
955 dev->name, (int) stat, (int) p->scb->status);
956 elmc_rnr_int(dev);
957 }
958 }
959 WAIT_4_SCB_CMD(); /* wait for ack. (elmc_xmt_int can be faster than ack!!) */
960 if (p->scb->cmd) { /* timed out? */
961 break;
962 }
963 }
964 return IRQ_HANDLED;
965}
966
967/*******************************************************
968 * receive-interrupt
969 */
970
971static void elmc_rcv_int(struct net_device *dev)
972{
973 int status;
974 unsigned short totlen;
975 struct sk_buff *skb;
976 struct rbd_struct *rbd;
977 struct priv *p = netdev_priv(dev);
978
979 for (; (status = p->rfd_top->status) & STAT_COMPL;) {
980 rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);
981
982 if (status & STAT_OK) { /* frame received without error? */
983 if ((totlen = rbd->status) & RBD_LAST) { /* the first and the last buffer? */
984 totlen &= RBD_MASK; /* length of this frame */
985 rbd->status = 0;
986 skb = netdev_alloc_skb(dev, totlen + 2);
987 if (skb != NULL) {
988 skb_reserve(skb, 2); /* 16 byte alignment */
989 skb_put(skb,totlen);
990 skb_copy_to_linear_data(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen);
991 skb->protocol = eth_type_trans(skb, dev);
992 netif_rx(skb);
993 dev->stats.rx_packets++;
994 dev->stats.rx_bytes += totlen;
995 } else {
996 dev->stats.rx_dropped++;
997 }
998 } else {
999 pr_warning("%s: received oversized frame.\n", dev->name);
1000 dev->stats.rx_dropped++;
1001 }
1002 } else { /* frame !(ok), only with 'save-bad-frames' */
1003 pr_warning("%s: oops! rfd-error-status: %04x\n", dev->name, status);
1004 dev->stats.rx_errors++;
1005 }
1006 p->rfd_top->status = 0;
1007 p->rfd_top->last = RFD_SUSP;
1008 p->rfd_last->last = 0; /* delete RU_SUSP */
1009 p->rfd_last = p->rfd_top;
1010 p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */
1011 }
1012}
1013
1014/**********************************************************
1015 * handle 'Receiver went not ready'.
1016 */
1017
1018static void elmc_rnr_int(struct net_device *dev)
1019{
1020 struct priv *p = netdev_priv(dev);
1021
1022 dev->stats.rx_errors++;
1023
1024 WAIT_4_SCB_CMD(); /* wait for the last cmd */
1025 p->scb->cmd = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */
1026 elmc_attn586();
1027 WAIT_4_SCB_CMD(); /* wait for accept cmd. */
1028
1029 alloc_rfa(dev, (char *) p->rfd_first);
1030 startrecv586(dev); /* restart RU */
1031
1032 pr_warning("%s: Receive-Unit restarted. Status: %04x\n", dev->name, p->scb->status);
1033
1034}
1035
1036/**********************************************************
1037 * handle xmit - interrupt
1038 */
1039
1040static void elmc_xmt_int(struct net_device *dev)
1041{
1042 int status;
1043 struct priv *p = netdev_priv(dev);
1044
1045 status = p->xmit_cmds[p->xmit_last]->cmd_status;
1046 if (!(status & STAT_COMPL)) {
1047 pr_warning("%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name);
1048 }
1049 if (status & STAT_OK) {
1050 dev->stats.tx_packets++;
1051 dev->stats.collisions += (status & TCMD_MAXCOLLMASK);
1052 } else {
1053 dev->stats.tx_errors++;
1054 if (status & TCMD_LATECOLL) {
1055 pr_warning("%s: late collision detected.\n", dev->name);
1056 dev->stats.collisions++;
1057 } else if (status & TCMD_NOCARRIER) {
1058 dev->stats.tx_carrier_errors++;
1059 pr_warning("%s: no carrier detected.\n", dev->name);
1060 } else if (status & TCMD_LOSTCTS) {
1061 pr_warning("%s: loss of CTS detected.\n", dev->name);
1062 } else if (status & TCMD_UNDERRUN) {
1063 dev->stats.tx_fifo_errors++;
1064 pr_warning("%s: DMA underrun detected.\n", dev->name);
1065 } else if (status & TCMD_MAXCOLL) {
1066 pr_warning("%s: Max. collisions exceeded.\n", dev->name);
1067 dev->stats.collisions += 16;
1068 }
1069 }
1070
1071#if (NUM_XMIT_BUFFS != 1)
1072 if ((++p->xmit_last) == NUM_XMIT_BUFFS) {
1073 p->xmit_last = 0;
1074 }
1075#endif
1076
1077 netif_wake_queue(dev);
1078}
1079
1080/***********************************************************
1081 * (re)start the receiver
1082 */
1083
1084static void startrecv586(struct net_device *dev)
1085{
1086 struct priv *p = netdev_priv(dev);
1087
1088 p->scb->rfa_offset = make16(p->rfd_first);
1089 p->scb->cmd = RUC_START;
1090 elmc_attn586(); /* start cmd. */
1091 WAIT_4_SCB_CMD(); /* wait for accept cmd. (no timeout!!) */
1092}
1093
1094/******************************************************
1095 * timeout
1096 */
1097
1098static void elmc_timeout(struct net_device *dev)
1099{
1100 struct priv *p = netdev_priv(dev);
1101 /* COMMAND-UNIT active? */
1102 if (p->scb->status & CU_ACTIVE) {
1103 pr_debug("%s: strange ... timeout with CU active?!?\n", dev->name);
1104 pr_debug("%s: X0: %04x N0: %04x N1: %04x %d\n", dev->name,
1105 (int)p->xmit_cmds[0]->cmd_status,
1106 (int)p->nop_cmds[0]->cmd_status,
1107 (int)p->nop_cmds[1]->cmd_status, (int)p->nop_point);
1108 p->scb->cmd = CUC_ABORT;
1109 elmc_attn586();
1110 WAIT_4_SCB_CMD();
1111 p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]);
1112 p->scb->cmd = CUC_START;
1113 elmc_attn586();
1114 WAIT_4_SCB_CMD();
1115 netif_wake_queue(dev);
1116 } else {
1117 pr_debug("%s: xmitter timed out, try to restart! stat: %04x\n",
1118 dev->name, p->scb->status);
1119 pr_debug("%s: command-stats: %04x %04x\n", dev->name,
1120 p->xmit_cmds[0]->cmd_status, p->xmit_cmds[1]->cmd_status);
1121 elmc_close(dev);
1122 elmc_open(dev);
1123 }
1124}
1125
1126/******************************************************
1127 * send frame
1128 */
1129
1130static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
1131{
1132 int len;
1133 int i;
1134#ifndef NO_NOPCOMMANDS
1135 int next_nop;
1136#endif
1137 struct priv *p = netdev_priv(dev);
1138
1139 netif_stop_queue(dev);
1140
1141 len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
1142
1143 if (len != skb->len)
1144 memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN);
1145 skb_copy_from_linear_data(skb, (char *) p->xmit_cbuffs[p->xmit_count], skb->len);
1146
1147#if (NUM_XMIT_BUFFS == 1)
1148#ifdef NO_NOPCOMMANDS
1149 p->xmit_buffs[0]->size = TBD_LAST | len;
1150 for (i = 0; i < 16; i++) {
1151 p->scb->cbl_offset = make16(p->xmit_cmds[0]);
1152 p->scb->cmd = CUC_START;
1153 p->xmit_cmds[0]->cmd_status = 0;
1154 elmc_attn586();
1155 if (!i) {
1156 dev_kfree_skb(skb);
1157 }
1158 WAIT_4_SCB_CMD();
1159 if ((p->scb->status & CU_ACTIVE)) { /* test it, because CU sometimes doesn't start immediately */
1160 break;
1161 }
1162 if (p->xmit_cmds[0]->cmd_status) {
1163 break;
1164 }
1165 if (i == 15) {
1166 pr_warning("%s: Can't start transmit-command.\n", dev->name);
1167 }
1168 }
1169#else
1170 next_nop = (p->nop_point + 1) & 0x1;
1171 p->xmit_buffs[0]->size = TBD_LAST | len;
1172
1173 p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link
1174 = make16((p->nop_cmds[next_nop]));
1175 p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
1176
1177 p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
1178 p->nop_point = next_nop;
1179 dev_kfree_skb(skb);
1180#endif
1181#else
1182 p->xmit_buffs[p->xmit_count]->size = TBD_LAST | len;
1183 if ((next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS) {
1184 next_nop = 0;
1185 }
1186 p->xmit_cmds[p->xmit_count]->cmd_status = 0;
1187 p->xmit_cmds[p->xmit_count]->cmd_link = p->nop_cmds[next_nop]->cmd_link
1188 = make16((p->nop_cmds[next_nop]));
1189 p->nop_cmds[next_nop]->cmd_status = 0;
1190 p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
1191 p->xmit_count = next_nop;
1192 if (p->xmit_count != p->xmit_last)
1193 netif_wake_queue(dev);
1194 dev_kfree_skb(skb);
1195#endif
1196 return NETDEV_TX_OK;
1197}
1198
1199/*******************************************
1200 * Someone wanna have the statistics
1201 */
1202
1203static struct net_device_stats *elmc_get_stats(struct net_device *dev)
1204{
1205 struct priv *p = netdev_priv(dev);
1206 unsigned short crc, aln, rsc, ovrn;
1207
1208 crc = p->scb->crc_errs; /* get error-statistic from the ni82586 */
1209 p->scb->crc_errs -= crc;
1210 aln = p->scb->aln_errs;
1211 p->scb->aln_errs -= aln;
1212 rsc = p->scb->rsc_errs;
1213 p->scb->rsc_errs -= rsc;
1214 ovrn = p->scb->ovrn_errs;
1215 p->scb->ovrn_errs -= ovrn;
1216
1217 dev->stats.rx_crc_errors += crc;
1218 dev->stats.rx_fifo_errors += ovrn;
1219 dev->stats.rx_frame_errors += aln;
1220 dev->stats.rx_dropped += rsc;
1221
1222 return &dev->stats;
1223}
1224
1225/********************************************************
1226 * Set MC list ..
1227 */
1228
1229#ifdef ELMC_MULTICAST
1230static void set_multicast_list(struct net_device *dev)
1231{
1232 if (!dev->start) {
1233 /* without a running interface, promiscuous doesn't work */
1234 return;
1235 }
1236 dev->start = 0;
1237 alloc586(dev);
1238 init586(dev);
1239 startrecv586(dev);
1240 dev->start = 1;
1241}
1242#endif
1243
1244static void netdev_get_drvinfo(struct net_device *dev,
1245 struct ethtool_drvinfo *info)
1246{
1247 strcpy(info->driver, DRV_NAME);
1248 strcpy(info->version, DRV_VERSION);
1249 sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
1250}
1251
1252static const struct ethtool_ops netdev_ethtool_ops = {
1253 .get_drvinfo = netdev_get_drvinfo,
1254};
1255
1256#ifdef MODULE
1257
1258/* Increase if needed ;) */
1259#define MAX_3C523_CARDS 4
1260
1261static struct net_device *dev_elmc[MAX_3C523_CARDS];
1262static int irq[MAX_3C523_CARDS];
1263static int io[MAX_3C523_CARDS];
1264module_param_array(irq, int, NULL, 0);
1265module_param_array(io, int, NULL, 0);
1266MODULE_PARM_DESC(io, "EtherLink/MC I/O base address(es)");
1267MODULE_PARM_DESC(irq, "EtherLink/MC IRQ number(s)");
1268MODULE_LICENSE("GPL");
1269
1270int __init init_module(void)
1271{
1272 int this_dev,found = 0;
1273
1274 /* Loop until we either can't find any more cards, or we have MAX_3C523_CARDS */
1275 for(this_dev=0; this_dev<MAX_3C523_CARDS; this_dev++) {
1276 struct net_device *dev = alloc_etherdev(sizeof(struct priv));
1277 if (!dev)
1278 break;
1279 dev->irq=irq[this_dev];
1280 dev->base_addr=io[this_dev];
1281 if (do_elmc_probe(dev) == 0) {
1282 dev_elmc[this_dev] = dev;
1283 found++;
1284 continue;
1285 }
1286 free_netdev(dev);
1287 if (io[this_dev]==0)
1288 break;
1289 pr_warning("3c523.c: No 3c523 card found at io=%#x\n",io[this_dev]);
1290 }
1291
1292 if(found==0) {
1293 if (io[0]==0)
1294 pr_notice("3c523.c: No 3c523 cards found\n");
1295 return -ENXIO;
1296 } else return 0;
1297}
1298
1299void __exit cleanup_module(void)
1300{
1301 int this_dev;
1302 for (this_dev=0; this_dev<MAX_3C523_CARDS; this_dev++) {
1303 struct net_device *dev = dev_elmc[this_dev];
1304 if (dev) {
1305 unregister_netdev(dev);
1306 cleanup_card(dev);
1307 free_netdev(dev);
1308 }
1309 }
1310}
1311
1312#endif /* MODULE */
diff --git a/drivers/net/ethernet/i825xx/3c523.h b/drivers/net/ethernet/i825xx/3c523.h
deleted file mode 100644
index 6956441687b9..000000000000
--- a/drivers/net/ethernet/i825xx/3c523.h
+++ /dev/null
@@ -1,355 +0,0 @@
1#ifndef _3c523_INCLUDE_
2#define _3c523_INCLUDE_
3/*
4 This is basically a hacked version of ni52.h, for the 3c523
5 Etherlink/MC.
6*/
7
8/*
9 * Intel i82586 Ethernet definitions
10 *
11 * This is an extension to the Linux operating system, and is covered by the
12 * same GNU General Public License that covers that work.
13 *
14 * Copyright 1995 by Chris Beauregard (cpbeaure@undergrad.math.uwaterloo.ca)
15 *
16 * See 3c523.c for details.
17 *
18 * $Header: /home/chrisb/linux-1.2.13-3c523/drivers/net/RCS/3c523.h,v 1.6 1996/01/20 05:09:00 chrisb Exp chrisb $
19 */
20
21/*
22 * where to find the System Configuration Pointer (SCP)
23 */
24#define SCP_DEFAULT_ADDRESS 0xfffff4
25
26
27/*
28 * System Configuration Pointer Struct
29 */
30
31struct scp_struct
32{
33 unsigned short zero_dum0; /* has to be zero */
34 unsigned char sysbus; /* 0=16Bit,1=8Bit */
35 unsigned char zero_dum1; /* has to be zero for 586 */
36 unsigned short zero_dum2;
37 unsigned short zero_dum3;
38 char *iscp; /* pointer to the iscp-block */
39};
40
41
42/*
43 * Intermediate System Configuration Pointer (ISCP)
44 */
45struct iscp_struct
46{
47 unsigned char busy; /* 586 clears after successful init */
48 unsigned char zero_dummy; /* hast to be zero */
49 unsigned short scb_offset; /* pointeroffset to the scb_base */
50 char *scb_base; /* base-address of all 16-bit offsets */
51};
52
53/*
54 * System Control Block (SCB)
55 */
56struct scb_struct
57{
58 unsigned short status; /* status word */
59 unsigned short cmd; /* command word */
60 unsigned short cbl_offset; /* pointeroffset, command block list */
61 unsigned short rfa_offset; /* pointeroffset, receive frame area */
62 unsigned short crc_errs; /* CRC-Error counter */
63 unsigned short aln_errs; /* alignmenterror counter */
64 unsigned short rsc_errs; /* Resourceerror counter */
65 unsigned short ovrn_errs; /* OVerrunerror counter */
66};
67
68/*
69 * possible command values for the command word
70 */
71#define RUC_MASK 0x0070 /* mask for RU commands */
72#define RUC_NOP 0x0000 /* NOP-command */
73#define RUC_START 0x0010 /* start RU */
74#define RUC_RESUME 0x0020 /* resume RU after suspend */
75#define RUC_SUSPEND 0x0030 /* suspend RU */
76#define RUC_ABORT 0x0040 /* abort receiver operation immediately */
77
78#define CUC_MASK 0x0700 /* mask for CU command */
79#define CUC_NOP 0x0000 /* NOP-command */
80#define CUC_START 0x0100 /* start execution of 1. cmd on the CBL */
81#define CUC_RESUME 0x0200 /* resume after suspend */
82#define CUC_SUSPEND 0x0300 /* Suspend CU */
83#define CUC_ABORT 0x0400 /* abort command operation immediately */
84
85#define ACK_MASK 0xf000 /* mask for ACK command */
86#define ACK_CX 0x8000 /* acknowledges STAT_CX */
87#define ACK_FR 0x4000 /* ack. STAT_FR */
88#define ACK_CNA 0x2000 /* ack. STAT_CNA */
89#define ACK_RNR 0x1000 /* ack. STAT_RNR */
90
91/*
92 * possible status values for the status word
93 */
94#define STAT_MASK 0xf000 /* mask for cause of interrupt */
95#define STAT_CX 0x8000 /* CU finished cmd with its I bit set */
96#define STAT_FR 0x4000 /* RU finished receiving a frame */
97#define STAT_CNA 0x2000 /* CU left active state */
98#define STAT_RNR 0x1000 /* RU left ready state */
99
100#define CU_STATUS 0x700 /* CU status, 0=idle */
101#define CU_SUSPEND 0x100 /* CU is suspended */
102#define CU_ACTIVE 0x200 /* CU is active */
103
104#define RU_STATUS 0x70 /* RU status, 0=idle */
105#define RU_SUSPEND 0x10 /* RU suspended */
106#define RU_NOSPACE 0x20 /* RU no resources */
107#define RU_READY 0x40 /* RU is ready */
108
109/*
110 * Receive Frame Descriptor (RFD)
111 */
112struct rfd_struct
113{
114 unsigned short status; /* status word */
115 unsigned short last; /* Bit15,Last Frame on List / Bit14,suspend */
116 unsigned short next; /* linkoffset to next RFD */
117 unsigned short rbd_offset; /* pointeroffset to RBD-buffer */
118 unsigned char dest[6]; /* ethernet-address, destination */
119 unsigned char source[6]; /* ethernet-address, source */
120 unsigned short length; /* 802.3 frame-length */
121 unsigned short zero_dummy; /* dummy */
122};
123
124#define RFD_LAST 0x8000 /* last: last rfd in the list */
125#define RFD_SUSP 0x4000 /* last: suspend RU after */
126#define RFD_ERRMASK 0x0fe1 /* status: errormask */
127#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA */
128#define RFD_RNR 0x0200 /* status: receiver out of resources */
129
130/*
131 * Receive Buffer Descriptor (RBD)
132 */
133struct rbd_struct
134{
135 unsigned short status; /* status word,number of used bytes in buff */
136 unsigned short next; /* pointeroffset to next RBD */
137 char *buffer; /* receive buffer address pointer */
138 unsigned short size; /* size of this buffer */
139 unsigned short zero_dummy; /* dummy */
140};
141
142#define RBD_LAST 0x8000 /* last buffer */
143#define RBD_USED 0x4000 /* this buffer has data */
144#define RBD_MASK 0x3fff /* size-mask for length */
145
146/*
147 * Statusvalues for Commands/RFD
148 */
149#define STAT_COMPL 0x8000 /* status: frame/command is complete */
150#define STAT_BUSY 0x4000 /* status: frame/command is busy */
151#define STAT_OK 0x2000 /* status: frame/command is ok */
152
153/*
154 * Action-Commands
155 */
156#define CMD_NOP 0x0000 /* NOP */
157#define CMD_IASETUP 0x0001 /* initial address setup command */
158#define CMD_CONFIGURE 0x0002 /* configure command */
159#define CMD_MCSETUP 0x0003 /* MC setup command */
160#define CMD_XMIT 0x0004 /* transmit command */
161#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */
162#define CMD_DUMP 0x0006 /* dump command */
163#define CMD_DIAGNOSE 0x0007 /* diagnose command */
164
165/*
166 * Action command bits
167 */
168#define CMD_LAST 0x8000 /* indicates last command in the CBL */
169#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */
170#define CMD_INT 0x2000 /* generate interrupt after execution */
171
172/*
173 * NOP - command
174 */
175struct nop_cmd_struct
176{
177 unsigned short cmd_status; /* status of this command */
178 unsigned short cmd_cmd; /* the command itself (+bits) */
179 unsigned short cmd_link; /* offsetpointer to next command */
180};
181
182/*
183 * IA Setup command
184 */
185struct iasetup_cmd_struct
186{
187 unsigned short cmd_status;
188 unsigned short cmd_cmd;
189 unsigned short cmd_link;
190 unsigned char iaddr[6];
191};
192
193/*
194 * Configure command
195 */
196struct configure_cmd_struct
197{
198 unsigned short cmd_status;
199 unsigned short cmd_cmd;
200 unsigned short cmd_link;
201 unsigned char byte_cnt; /* size of the config-cmd */
202 unsigned char fifo; /* fifo/recv monitor */
203 unsigned char sav_bf; /* save bad frames (bit7=1)*/
204 unsigned char adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
205 unsigned char priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
206 unsigned char ifs; /* inter frame spacing */
207 unsigned char time_low; /* slot time low */
208 unsigned char time_high; /* slot time high(0-2) and max. retries(4-7) */
209 unsigned char promisc; /* promisc-mode(0) , et al (1-7) */
210 unsigned char carr_coll; /* carrier(0-3)/collision(4-7) stuff */
211 unsigned char fram_len; /* minimal frame len */
212 unsigned char dummy; /* dummy */
213};
214
215/*
216 * Multicast Setup command
217 */
218struct mcsetup_cmd_struct
219{
220 unsigned short cmd_status;
221 unsigned short cmd_cmd;
222 unsigned short cmd_link;
223 unsigned short mc_cnt; /* number of bytes in the MC-List */
224 unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */
225};
226
227/*
228 * transmit command
229 */
230struct transmit_cmd_struct
231{
232 unsigned short cmd_status;
233 unsigned short cmd_cmd;
234 unsigned short cmd_link;
235 unsigned short tbd_offset; /* pointeroffset to TBD */
236 unsigned char dest[6]; /* destination address of the frame */
237 unsigned short length; /* user defined: 802.3 length / Ether type */
238};
239
240#define TCMD_ERRMASK 0x0fa0
241#define TCMD_MAXCOLLMASK 0x000f
242#define TCMD_MAXCOLL 0x0020
243#define TCMD_HEARTBEAT 0x0040
244#define TCMD_DEFERRED 0x0080
245#define TCMD_UNDERRUN 0x0100
246#define TCMD_LOSTCTS 0x0200
247#define TCMD_NOCARRIER 0x0400
248#define TCMD_LATECOLL 0x0800
249
250struct tdr_cmd_struct
251{
252 unsigned short cmd_status;
253 unsigned short cmd_cmd;
254 unsigned short cmd_link;
255 unsigned short status;
256};
257
258#define TDR_LNK_OK 0x8000 /* No link problem identified */
259#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */
260#define TDR_ET_OPN 0x2000 /* open, no correct termination */
261#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */
262#define TDR_TIMEMASK 0x07ff /* mask for the time field */
263
264/*
265 * Transmit Buffer Descriptor (TBD)
266 */
267struct tbd_struct
268{
269 unsigned short size; /* size + EOF-Flag(15) */
270 unsigned short next; /* pointeroffset to next TBD */
271 char *buffer; /* pointer to buffer */
272};
273
274#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */
275
276/*************************************************************************/
277/*
278Verbatim from the Crynwyr stuff:
279
280 The 3c523 responds with adapter code 0x6042 at slot
281registers xxx0 and xxx1. The setup register is at xxx2 and
282contains the following bits:
283
2840: card enable
2852,1: csr address select
286 00 = 0300
287 01 = 1300
288 10 = 2300
289 11 = 3300
2904,3: shared memory address select
291 00 = 0c0000
292 01 = 0c8000
293 10 = 0d0000
294 11 = 0d8000
2955: set to disable on-board thinnet
2967,6: (read-only) shows selected irq
297 00 = 12
298 01 = 7
299 10 = 3
300 11 = 9
301
302The interrupt-select register is at xxx3 and uses one bit per irq.
303
3040: int 12
3051: int 7
3062: int 3
3073: int 9
308
309 Again, the documentation stresses that the setup register
310should never be written. The interrupt-select register may be
311written with the value corresponding to bits 7.6 in
312the setup register to insure corret setup.
313*/
314
315/* Offsets from the base I/O address. */
316#define ELMC_SA 0 /* first 6 bytes are IEEE network address */
317#define ELMC_CTRL 6 /* control & status register */
318#define ELMC_REVISION 7 /* revision register, first 4 bits only */
319#define ELMC_IO_EXTENT 8
320
321/* these are the bit selects for the port register 2 */
322#define ELMC_STATUS_ENABLED 0x01
323#define ELMC_STATUS_CSR_SELECT 0x06
324#define ELMC_STATUS_MEMORY_SELECT 0x18
325#define ELMC_STATUS_DISABLE_THIN 0x20
326#define ELMC_STATUS_IRQ_SELECT 0xc0
327
328/* this is the card id used in the detection code. You might recognize
329it from @6042.adf */
330#define ELMC_MCA_ID 0x6042
331
332/*
333 The following define the bits for the control & status register
334
335 The bank select registers can be used if more than 16K of memory is
336 on the card. For some stupid reason, bank 3 is the one for the
337 bottom 16K, and the card defaults to bank 0. So we have to set the
338 bank to 3 before the card will even think of operating. To get bank
339 3, set BS0 and BS1 to high (of course...)
340*/
341#define ELMC_CTRL_BS0 0x01 /* RW bank select */
342#define ELMC_CTRL_BS1 0x02 /* RW bank select */
343#define ELMC_CTRL_INTE 0x04 /* RW interrupt enable, assert high */
344#define ELMC_CTRL_INT 0x08 /* R interrupt active, assert high */
345/*#define ELMC_CTRL_* 0x10*/ /* reserved */
346#define ELMC_CTRL_LBK 0x20 /* RW loopback enable, assert high */
347#define ELMC_CTRL_CA 0x40 /* RW channel attention, assert high */
348#define ELMC_CTRL_RST 0x80 /* RW 82586 reset, assert low */
349
350/* some handy compound bits */
351
352/* normal operation should have bank 3 and RST high, ints enabled */
353#define ELMC_NORMAL (ELMC_CTRL_INTE|ELMC_CTRL_RST|0x3)
354
355#endif /* _3c523_INCLUDE_ */
diff --git a/drivers/net/ethernet/i825xx/3c527.c b/drivers/net/ethernet/i825xx/3c527.c
deleted file mode 100644
index 278e791afe00..000000000000
--- a/drivers/net/ethernet/i825xx/3c527.c
+++ /dev/null
@@ -1,1660 +0,0 @@
1/* 3c527.c: 3Com Etherlink/MC32 driver for Linux 2.4 and 2.6.
2 *
3 * (c) Copyright 1998 Red Hat Software Inc
4 * Written by Alan Cox.
5 * Further debugging by Carl Drougge.
6 * Initial SMP support by Felipe W Damasio <felipewd@terra.com.br>
7 * Heavily modified by Richard Procter <rnp@paradise.net.nz>
8 *
9 * Based on skeleton.c written 1993-94 by Donald Becker and ne2.c
10 * (for the MCA stuff) written by Wim Dumon.
11 *
12 * Thanks to 3Com for making this possible by providing me with the
13 * documentation.
14 *
15 * This software may be used and distributed according to the terms
16 * of the GNU General Public License, incorporated herein by reference.
17 *
18 */
19
20#define DRV_NAME "3c527"
21#define DRV_VERSION "0.7-SMP"
22#define DRV_RELDATE "2003/09/21"
23
24static const char *version =
25DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter <rnp@paradise.net.nz>\n";
26
27/**
28 * DOC: Traps for the unwary
29 *
30 * The diagram (Figure 1-1) and the POS summary disagree with the
31 * "Interrupt Level" section in the manual.
32 *
33 * The manual contradicts itself when describing the minimum number
34 * buffers in the 'configure lists' command.
35 * My card accepts a buffer config of 4/4.
36 *
37 * Setting the SAV BP bit does not save bad packets, but
38 * only enables RX on-card stats collection.
39 *
40 * The documentation in places seems to miss things. In actual fact
41 * I've always eventually found everything is documented, it just
42 * requires careful study.
43 *
44 * DOC: Theory Of Operation
45 *
46 * The 3com 3c527 is a 32bit MCA bus mastering adapter with a large
47 * amount of on board intelligence that housekeeps a somewhat dumber
48 * Intel NIC. For performance we want to keep the transmit queue deep
49 * as the card can transmit packets while fetching others from main
50 * memory by bus master DMA. Transmission and reception are driven by
51 * circular buffer queues.
52 *
53 * The mailboxes can be used for controlling how the card traverses
54 * its buffer rings, but are used only for initial setup in this
55 * implementation. The exec mailbox allows a variety of commands to
56 * be executed. Each command must complete before the next is
57 * executed. Primarily we use the exec mailbox for controlling the
58 * multicast lists. We have to do a certain amount of interesting
59 * hoop jumping as the multicast list changes can occur in interrupt
60 * state when the card has an exec command pending. We defer such
61 * events until the command completion interrupt.
62 *
63 * A copy break scheme (taken from 3c59x.c) is employed whereby
64 * received frames exceeding a configurable length are passed
65 * directly to the higher networking layers without incuring a copy,
66 * in what amounts to a time/space trade-off.
67 *
68 * The card also keeps a large amount of statistical information
69 * on-board. In a perfect world, these could be used safely at no
70 * cost. However, lacking information to the contrary, processing
71 * them without races would involve so much extra complexity as to
72 * make it unworthwhile to do so. In the end, a hybrid SW/HW
73 * implementation was made necessary --- see mc32_update_stats().
74 *
75 * DOC: Notes
76 *
77 * It should be possible to use two or more cards, but at this stage
78 * only by loading two copies of the same module.
79 *
80 * The on-board 82586 NIC has trouble receiving multiple
81 * back-to-back frames and so is likely to drop packets from fast
82 * senders.
83**/
84
85#include <linux/module.h>
86
87#include <linux/errno.h>
88#include <linux/netdevice.h>
89#include <linux/etherdevice.h>
90#include <linux/if_ether.h>
91#include <linux/init.h>
92#include <linux/kernel.h>
93#include <linux/types.h>
94#include <linux/fcntl.h>
95#include <linux/interrupt.h>
96#include <linux/mca-legacy.h>
97#include <linux/ioport.h>
98#include <linux/in.h>
99#include <linux/skbuff.h>
100#include <linux/slab.h>
101#include <linux/string.h>
102#include <linux/wait.h>
103#include <linux/ethtool.h>
104#include <linux/completion.h>
105#include <linux/bitops.h>
106#include <linux/semaphore.h>
107
108#include <asm/uaccess.h>
109#include <asm/io.h>
110#include <asm/dma.h>
111
112#include "3c527.h"
113
114MODULE_LICENSE("GPL");
115
116/*
117 * The name of the card. Is used for messages and in the requests for
118 * io regions, irqs and dma channels
119 */
120static const char* cardname = DRV_NAME;
121
122/* use 0 for production, 1 for verification, >2 for debug */
123#ifndef NET_DEBUG
124#define NET_DEBUG 2
125#endif
126
127static unsigned int mc32_debug = NET_DEBUG;
128
129/* The number of low I/O ports used by the ethercard. */
130#define MC32_IO_EXTENT 8
131
132/* As implemented, values must be a power-of-2 -- 4/8/16/32 */
133#define TX_RING_LEN 32 /* Typically the card supports 37 */
134#define RX_RING_LEN 8 /* " " " */
135
136/* Copy break point, see above for details.
137 * Setting to > 1512 effectively disables this feature. */
138#define RX_COPYBREAK 200 /* Value from 3c59x.c */
139
140/* Issue the 82586 workaround command - this is for "busy lans", but
141 * basically means for all lans now days - has a performance (latency)
142 * cost, but best set. */
143static const int WORKAROUND_82586=1;
144
145/* Pointers to buffers and their on-card records */
146struct mc32_ring_desc
147{
148 volatile struct skb_header *p;
149 struct sk_buff *skb;
150};
151
152/* Information that needs to be kept for each board. */
153struct mc32_local
154{
155 int slot;
156
157 u32 base;
158 volatile struct mc32_mailbox *rx_box;
159 volatile struct mc32_mailbox *tx_box;
160 volatile struct mc32_mailbox *exec_box;
161 volatile struct mc32_stats *stats; /* Start of on-card statistics */
162 u16 tx_chain; /* Transmit list start offset */
163 u16 rx_chain; /* Receive list start offset */
164 u16 tx_len; /* Transmit list count */
165 u16 rx_len; /* Receive list count */
166
167 u16 xceiver_desired_state; /* HALTED or RUNNING */
168 u16 cmd_nonblocking; /* Thread is uninterested in command result */
169 u16 mc_reload_wait; /* A multicast load request is pending */
170 u32 mc_list_valid; /* True when the mclist is set */
171
172 struct mc32_ring_desc tx_ring[TX_RING_LEN]; /* Host Transmit ring */
173 struct mc32_ring_desc rx_ring[RX_RING_LEN]; /* Host Receive ring */
174
175 atomic_t tx_count; /* buffers left */
176 atomic_t tx_ring_head; /* index to tx en-queue end */
177 u16 tx_ring_tail; /* index to tx de-queue end */
178
179 u16 rx_ring_tail; /* index to rx de-queue end */
180
181 struct semaphore cmd_mutex; /* Serialises issuing of execute commands */
182 struct completion execution_cmd; /* Card has completed an execute command */
183 struct completion xceiver_cmd; /* Card has completed a tx or rx command */
184};
185
186/* The station (ethernet) address prefix, used for a sanity check. */
187#define SA_ADDR0 0x02
188#define SA_ADDR1 0x60
189#define SA_ADDR2 0xAC
190
191struct mca_adapters_t {
192 unsigned int id;
193 char *name;
194};
195
196static const struct mca_adapters_t mc32_adapters[] = {
197 { 0x0041, "3COM EtherLink MC/32" },
198 { 0x8EF5, "IBM High Performance Lan Adapter" },
199 { 0x0000, NULL }
200};
201
202
203/* Macros for ring index manipulations */
204static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); };
205static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); };
206
207static inline u16 next_tx(u16 tx) { return (tx+1)&(TX_RING_LEN-1); };
208
209
210/* Index to functions, as function prototypes. */
211static int mc32_probe1(struct net_device *dev, int ioaddr);
212static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len);
213static int mc32_open(struct net_device *dev);
214static void mc32_timeout(struct net_device *dev);
215static netdev_tx_t mc32_send_packet(struct sk_buff *skb,
216 struct net_device *dev);
217static irqreturn_t mc32_interrupt(int irq, void *dev_id);
218static int mc32_close(struct net_device *dev);
219static struct net_device_stats *mc32_get_stats(struct net_device *dev);
220static void mc32_set_multicast_list(struct net_device *dev);
221static void mc32_reset_multicast_list(struct net_device *dev);
222static const struct ethtool_ops netdev_ethtool_ops;
223
224static void cleanup_card(struct net_device *dev)
225{
226 struct mc32_local *lp = netdev_priv(dev);
227 unsigned slot = lp->slot;
228 mca_mark_as_unused(slot);
229 mca_set_adapter_name(slot, NULL);
230 free_irq(dev->irq, dev);
231 release_region(dev->base_addr, MC32_IO_EXTENT);
232}
233
234/**
235 * mc32_probe - Search for supported boards
236 * @unit: interface number to use
237 *
238 * Because MCA bus is a real bus and we can scan for cards we could do a
239 * single scan for all boards here. Right now we use the passed in device
240 * structure and scan for only one board. This needs fixing for modules
241 * in particular.
242 */
243
244struct net_device *__init mc32_probe(int unit)
245{
246 struct net_device *dev = alloc_etherdev(sizeof(struct mc32_local));
247 static int current_mca_slot = -1;
248 int i;
249 int err;
250
251 if (!dev)
252 return ERR_PTR(-ENOMEM);
253
254 if (unit >= 0)
255 sprintf(dev->name, "eth%d", unit);
256
257 /* Do not check any supplied i/o locations.
258 POS registers usually don't fail :) */
259
260 /* MCA cards have POS registers.
261 Autodetecting MCA cards is extremely simple.
262 Just search for the card. */
263
264 for(i = 0; (mc32_adapters[i].name != NULL); i++) {
265 current_mca_slot =
266 mca_find_unused_adapter(mc32_adapters[i].id, 0);
267
268 if(current_mca_slot != MCA_NOTFOUND) {
269 if(!mc32_probe1(dev, current_mca_slot))
270 {
271 mca_set_adapter_name(current_mca_slot,
272 mc32_adapters[i].name);
273 mca_mark_as_used(current_mca_slot);
274 err = register_netdev(dev);
275 if (err) {
276 cleanup_card(dev);
277 free_netdev(dev);
278 dev = ERR_PTR(err);
279 }
280 return dev;
281 }
282
283 }
284 }
285 free_netdev(dev);
286 return ERR_PTR(-ENODEV);
287}
288
289static const struct net_device_ops netdev_ops = {
290 .ndo_open = mc32_open,
291 .ndo_stop = mc32_close,
292 .ndo_start_xmit = mc32_send_packet,
293 .ndo_get_stats = mc32_get_stats,
294 .ndo_set_rx_mode = mc32_set_multicast_list,
295 .ndo_tx_timeout = mc32_timeout,
296 .ndo_change_mtu = eth_change_mtu,
297 .ndo_set_mac_address = eth_mac_addr,
298 .ndo_validate_addr = eth_validate_addr,
299};
300
301/**
302 * mc32_probe1 - Check a given slot for a board and test the card
303 * @dev: Device structure to fill in
304 * @slot: The MCA bus slot being used by this card
305 *
306 * Decode the slot data and configure the card structures. Having done this we
307 * can reset the card and configure it. The card does a full self test cycle
308 * in firmware so we have to wait for it to return and post us either a
309 * failure case or some addresses we use to find the board internals.
310 */
311
312static int __init mc32_probe1(struct net_device *dev, int slot)
313{
314 static unsigned version_printed;
315 int i, err;
316 u8 POS;
317 u32 base;
318 struct mc32_local *lp = netdev_priv(dev);
319 static const u16 mca_io_bases[] = {
320 0x7280,0x7290,
321 0x7680,0x7690,
322 0x7A80,0x7A90,
323 0x7E80,0x7E90
324 };
325 static const u32 mca_mem_bases[] = {
326 0x00C0000,
327 0x00C4000,
328 0x00C8000,
329 0x00CC000,
330 0x00D0000,
331 0x00D4000,
332 0x00D8000,
333 0x00DC000
334 };
335 static const char * const failures[] = {
336 "Processor instruction",
337 "Processor data bus",
338 "Processor data bus",
339 "Processor data bus",
340 "Adapter bus",
341 "ROM checksum",
342 "Base RAM",
343 "Extended RAM",
344 "82586 internal loopback",
345 "82586 initialisation failure",
346 "Adapter list configuration error"
347 };
348
349 /* Time to play MCA games */
350
351 if (mc32_debug && version_printed++ == 0)
352 pr_debug("%s", version);
353
354 pr_info("%s: %s found in slot %d: ", dev->name, cardname, slot);
355
356 POS = mca_read_stored_pos(slot, 2);
357
358 if(!(POS&1))
359 {
360 pr_cont("disabled.\n");
361 return -ENODEV;
362 }
363
364 /* Fill in the 'dev' fields. */
365 dev->base_addr = mca_io_bases[(POS>>1)&7];
366 dev->mem_start = mca_mem_bases[(POS>>4)&7];
367
368 POS = mca_read_stored_pos(slot, 4);
369 if(!(POS&1))
370 {
371 pr_cont("memory window disabled.\n");
372 return -ENODEV;
373 }
374
375 POS = mca_read_stored_pos(slot, 5);
376
377 i=(POS>>4)&3;
378 if(i==3)
379 {
380 pr_cont("invalid memory window.\n");
381 return -ENODEV;
382 }
383
384 i*=16384;
385 i+=16384;
386
387 dev->mem_end=dev->mem_start + i;
388
389 dev->irq = ((POS>>2)&3)+9;
390
391 if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname))
392 {
393 pr_cont("io 0x%3lX, which is busy.\n", dev->base_addr);
394 return -EBUSY;
395 }
396
397 pr_cont("io 0x%3lX irq %d mem 0x%lX (%dK)\n",
398 dev->base_addr, dev->irq, dev->mem_start, i/1024);
399
400
401 /* We ought to set the cache line size here.. */
402
403
404 /*
405 * Go PROM browsing
406 */
407
408 /* Retrieve and print the ethernet address. */
409 for (i = 0; i < 6; i++)
410 {
411 mca_write_pos(slot, 6, i+12);
412 mca_write_pos(slot, 7, 0);
413
414 dev->dev_addr[i] = mca_read_pos(slot,3);
415 }
416
417 pr_info("%s: Address %pM ", dev->name, dev->dev_addr);
418
419 mca_write_pos(slot, 6, 0);
420 mca_write_pos(slot, 7, 0);
421
422 POS = mca_read_stored_pos(slot, 4);
423
424 if(POS&2)
425 pr_cont(": BNC port selected.\n");
426 else
427 pr_cont(": AUI port selected.\n");
428
429 POS=inb(dev->base_addr+HOST_CTRL);
430 POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET;
431 POS&=~HOST_CTRL_INTE;
432 outb(POS, dev->base_addr+HOST_CTRL);
433 /* Reset adapter */
434 udelay(100);
435 /* Reset off */
436 POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET);
437 outb(POS, dev->base_addr+HOST_CTRL);
438
439 udelay(300);
440
441 /*
442 * Grab the IRQ
443 */
444
445 err = request_irq(dev->irq, mc32_interrupt, IRQF_SHARED, DRV_NAME, dev);
446 if (err) {
447 release_region(dev->base_addr, MC32_IO_EXTENT);
448 pr_err("%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
449 goto err_exit_ports;
450 }
451
452 memset(lp, 0, sizeof(struct mc32_local));
453 lp->slot = slot;
454
455 i=0;
456
457 base = inb(dev->base_addr);
458
459 while(base == 0xFF)
460 {
461 i++;
462 if(i == 1000)
463 {
464 pr_err("%s: failed to boot adapter.\n", dev->name);
465 err = -ENODEV;
466 goto err_exit_irq;
467 }
468 udelay(1000);
469 if(inb(dev->base_addr+2)&(1<<5))
470 base = inb(dev->base_addr);
471 }
472
473 if(base>0)
474 {
475 if(base < 0x0C)
476 pr_err("%s: %s%s.\n", dev->name, failures[base-1],
477 base<0x0A?" test failure":"");
478 else
479 pr_err("%s: unknown failure %d.\n", dev->name, base);
480 err = -ENODEV;
481 goto err_exit_irq;
482 }
483
484 base=0;
485 for(i=0;i<4;i++)
486 {
487 int n=0;
488
489 while(!(inb(dev->base_addr+2)&(1<<5)))
490 {
491 n++;
492 udelay(50);
493 if(n>100)
494 {
495 pr_err("%s: mailbox read fail (%d).\n", dev->name, i);
496 err = -ENODEV;
497 goto err_exit_irq;
498 }
499 }
500
501 base|=(inb(dev->base_addr)<<(8*i));
502 }
503
504 lp->exec_box=isa_bus_to_virt(dev->mem_start+base);
505
506 base=lp->exec_box->data[1]<<16|lp->exec_box->data[0];
507
508 lp->base = dev->mem_start+base;
509
510 lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]);
511 lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]);
512
513 lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]);
514
515 /*
516 * Descriptor chains (card relative)
517 */
518
519 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
520 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
521 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
522 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
523
524 sema_init(&lp->cmd_mutex, 0);
525 init_completion(&lp->execution_cmd);
526 init_completion(&lp->xceiver_cmd);
527
528 pr_info("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
529 dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
530
531 dev->netdev_ops = &netdev_ops;
532 dev->watchdog_timeo = HZ*5; /* Board does all the work */
533 dev->ethtool_ops = &netdev_ethtool_ops;
534
535 return 0;
536
537err_exit_irq:
538 free_irq(dev->irq, dev);
539err_exit_ports:
540 release_region(dev->base_addr, MC32_IO_EXTENT);
541 return err;
542}
543
544
545/**
546 * mc32_ready_poll - wait until we can feed it a command
547 * @dev: The device to wait for
548 *
549 * Wait until the card becomes ready to accept a command via the
550 * command register. This tells us nothing about the completion
551 * status of any pending commands and takes very little time at all.
552 */
553
554static inline void mc32_ready_poll(struct net_device *dev)
555{
556 int ioaddr = dev->base_addr;
557 while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR));
558}
559
560
561/**
562 * mc32_command_nowait - send a command non blocking
563 * @dev: The 3c527 to issue the command to
564 * @cmd: The command word to write to the mailbox
565 * @data: A data block if the command expects one
566 * @len: Length of the data block
567 *
568 * Send a command from interrupt state. If there is a command
569 * currently being executed then we return an error of -1. It
570 * simply isn't viable to wait around as commands may be
571 * slow. This can theoretically be starved on SMP, but it's hard
572 * to see a realistic situation. We do not wait for the command
573 * to complete --- we rely on the interrupt handler to tidy up
574 * after us.
575 */
576
577static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len)
578{
579 struct mc32_local *lp = netdev_priv(dev);
580 int ioaddr = dev->base_addr;
581 int ret = -1;
582
583 if (down_trylock(&lp->cmd_mutex) == 0)
584 {
585 lp->cmd_nonblocking=1;
586 lp->exec_box->mbox=0;
587 lp->exec_box->mbox=cmd;
588 memcpy((void *)lp->exec_box->data, data, len);
589 barrier(); /* the memcpy forgot the volatile so be sure */
590
591 /* Send the command */
592 mc32_ready_poll(dev);
593 outb(1<<6, ioaddr+HOST_CMD);
594
595 ret = 0;
596
597 /* Interrupt handler will signal mutex on completion */
598 }
599
600 return ret;
601}
602
603
604/**
605 * mc32_command - send a command and sleep until completion
606 * @dev: The 3c527 card to issue the command to
607 * @cmd: The command word to write to the mailbox
608 * @data: A data block if the command expects one
609 * @len: Length of the data block
610 *
611 * Sends exec commands in a user context. This permits us to wait around
612 * for the replies and also to wait for the command buffer to complete
613 * from a previous command before we execute our command. After our
614 * command completes we will attempt any pending multicast reload
615 * we blocked off by hogging the exec buffer.
616 *
617 * You feed the card a command, you wait, it interrupts you get a
618 * reply. All well and good. The complication arises because you use
619 * commands for filter list changes which come in at bh level from things
620 * like IPV6 group stuff.
621 */
622
623static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len)
624{
625 struct mc32_local *lp = netdev_priv(dev);
626 int ioaddr = dev->base_addr;
627 int ret = 0;
628
629 down(&lp->cmd_mutex);
630
631 /*
632 * My Turn
633 */
634
635 lp->cmd_nonblocking=0;
636 lp->exec_box->mbox=0;
637 lp->exec_box->mbox=cmd;
638 memcpy((void *)lp->exec_box->data, data, len);
639 barrier(); /* the memcpy forgot the volatile so be sure */
640
641 mc32_ready_poll(dev);
642 outb(1<<6, ioaddr+HOST_CMD);
643
644 wait_for_completion(&lp->execution_cmd);
645
646 if(lp->exec_box->mbox&(1<<13))
647 ret = -1;
648
649 up(&lp->cmd_mutex);
650
651 /*
652 * A multicast set got blocked - try it now
653 */
654
655 if(lp->mc_reload_wait)
656 {
657 mc32_reset_multicast_list(dev);
658 }
659
660 return ret;
661}
662
663
664/**
665 * mc32_start_transceiver - tell board to restart tx/rx
666 * @dev: The 3c527 card to issue the command to
667 *
668 * This may be called from the interrupt state, where it is used
669 * to restart the rx ring if the card runs out of rx buffers.
670 *
671 * We must first check if it's ok to (re)start the transceiver. See
672 * mc32_close for details.
673 */
674
675static void mc32_start_transceiver(struct net_device *dev) {
676
677 struct mc32_local *lp = netdev_priv(dev);
678 int ioaddr = dev->base_addr;
679
680 /* Ignore RX overflow on device closure */
681 if (lp->xceiver_desired_state==HALTED)
682 return;
683
684 /* Give the card the offset to the post-EOL-bit RX descriptor */
685 mc32_ready_poll(dev);
686 lp->rx_box->mbox=0;
687 lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next;
688 outb(HOST_CMD_START_RX, ioaddr+HOST_CMD);
689
690 mc32_ready_poll(dev);
691 lp->tx_box->mbox=0;
692 outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD); /* card ignores this on RX restart */
693
694 /* We are not interrupted on start completion */
695}
696
697
698/**
699 * mc32_halt_transceiver - tell board to stop tx/rx
700 * @dev: The 3c527 card to issue the command to
701 *
702 * We issue the commands to halt the card's transceiver. In fact,
703 * after some experimenting we now simply tell the card to
704 * suspend. When issuing aborts occasionally odd things happened.
705 *
706 * We then sleep until the card has notified us that both rx and
707 * tx have been suspended.
708 */
709
710static void mc32_halt_transceiver(struct net_device *dev)
711{
712 struct mc32_local *lp = netdev_priv(dev);
713 int ioaddr = dev->base_addr;
714
715 mc32_ready_poll(dev);
716 lp->rx_box->mbox=0;
717 outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD);
718 wait_for_completion(&lp->xceiver_cmd);
719
720 mc32_ready_poll(dev);
721 lp->tx_box->mbox=0;
722 outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD);
723 wait_for_completion(&lp->xceiver_cmd);
724}
725
726
727/**
728 * mc32_load_rx_ring - load the ring of receive buffers
729 * @dev: 3c527 to build the ring for
730 *
731 * This initialises the on-card and driver datastructures to
732 * the point where mc32_start_transceiver() can be called.
733 *
734 * The card sets up the receive ring for us. We are required to use the
735 * ring it provides, although the size of the ring is configurable.
736 *
737 * We allocate an sk_buff for each ring entry in turn and
738 * initialise its house-keeping info. At the same time, we read
739 * each 'next' pointer in our rx_ring array. This reduces slow
740 * shared-memory reads and makes it easy to access predecessor
741 * descriptors.
742 *
743 * We then set the end-of-list bit for the last entry so that the
744 * card will know when it has run out of buffers.
745 */
746
747static int mc32_load_rx_ring(struct net_device *dev)
748{
749 struct mc32_local *lp = netdev_priv(dev);
750 int i;
751 u16 rx_base;
752 volatile struct skb_header *p;
753
754 rx_base=lp->rx_chain;
755
756 for(i=0; i<RX_RING_LEN; i++) {
757 lp->rx_ring[i].skb=alloc_skb(1532, GFP_KERNEL);
758 if (lp->rx_ring[i].skb==NULL) {
759 for (;i>=0;i--)
760 kfree_skb(lp->rx_ring[i].skb);
761 return -ENOBUFS;
762 }
763 skb_reserve(lp->rx_ring[i].skb, 18);
764
765 p=isa_bus_to_virt(lp->base+rx_base);
766
767 p->control=0;
768 p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data);
769 p->status=0;
770 p->length=1532;
771
772 lp->rx_ring[i].p=p;
773 rx_base=p->next;
774 }
775
776 lp->rx_ring[i-1].p->control |= CONTROL_EOL;
777
778 lp->rx_ring_tail=0;
779
780 return 0;
781}
782
783
784/**
785 * mc32_flush_rx_ring - free the ring of receive buffers
786 * @lp: Local data of 3c527 to flush the rx ring of
787 *
788 * Free the buffer for each ring slot. This may be called
789 * before mc32_load_rx_ring(), eg. on error in mc32_open().
790 * Requires rx skb pointers to point to a valid skb, or NULL.
791 */
792
793static void mc32_flush_rx_ring(struct net_device *dev)
794{
795 struct mc32_local *lp = netdev_priv(dev);
796 int i;
797
798 for(i=0; i < RX_RING_LEN; i++)
799 {
800 if (lp->rx_ring[i].skb) {
801 dev_kfree_skb(lp->rx_ring[i].skb);
802 lp->rx_ring[i].skb = NULL;
803 }
804 lp->rx_ring[i].p=NULL;
805 }
806}
807
808
809/**
810 * mc32_load_tx_ring - load transmit ring
811 * @dev: The 3c527 card to issue the command to
812 *
813 * This sets up the host transmit data-structures.
814 *
815 * First, we obtain from the card it's current position in the tx
816 * ring, so that we will know where to begin transmitting
817 * packets.
818 *
819 * Then, we read the 'next' pointers from the on-card tx ring into
820 * our tx_ring array to reduce slow shared-mem reads. Finally, we
821 * intitalise the tx house keeping variables.
822 *
823 */
824
825static void mc32_load_tx_ring(struct net_device *dev)
826{
827 struct mc32_local *lp = netdev_priv(dev);
828 volatile struct skb_header *p;
829 int i;
830 u16 tx_base;
831
832 tx_base=lp->tx_box->data[0];
833
834 for(i=0 ; i<TX_RING_LEN ; i++)
835 {
836 p=isa_bus_to_virt(lp->base+tx_base);
837 lp->tx_ring[i].p=p;
838 lp->tx_ring[i].skb=NULL;
839
840 tx_base=p->next;
841 }
842
843 /* -1 so that tx_ring_head cannot "lap" tx_ring_tail */
844 /* see mc32_tx_ring */
845
846 atomic_set(&lp->tx_count, TX_RING_LEN-1);
847 atomic_set(&lp->tx_ring_head, 0);
848 lp->tx_ring_tail=0;
849}
850
851
852/**
853 * mc32_flush_tx_ring - free transmit ring
854 * @lp: Local data of 3c527 to flush the tx ring of
855 *
856 * If the ring is non-empty, zip over the it, freeing any
857 * allocated skb_buffs. The tx ring house-keeping variables are
858 * then reset. Requires rx skb pointers to point to a valid skb,
859 * or NULL.
860 */
861
862static void mc32_flush_tx_ring(struct net_device *dev)
863{
864 struct mc32_local *lp = netdev_priv(dev);
865 int i;
866
867 for (i=0; i < TX_RING_LEN; i++)
868 {
869 if (lp->tx_ring[i].skb)
870 {
871 dev_kfree_skb(lp->tx_ring[i].skb);
872 lp->tx_ring[i].skb = NULL;
873 }
874 }
875
876 atomic_set(&lp->tx_count, 0);
877 atomic_set(&lp->tx_ring_head, 0);
878 lp->tx_ring_tail=0;
879}
880
881
882/**
883 * mc32_open - handle 'up' of card
884 * @dev: device to open
885 *
886 * The user is trying to bring the card into ready state. This requires
887 * a brief dialogue with the card. Firstly we enable interrupts and then
888 * 'indications'. Without these enabled the card doesn't bother telling
889 * us what it has done. This had me puzzled for a week.
890 *
891 * We configure the number of card descriptors, then load the network
892 * address and multicast filters. Turn on the workaround mode. This
893 * works around a bug in the 82586 - it asks the firmware to do
894 * so. It has a performance (latency) hit but is needed on busy
895 * [read most] lans. We load the ring with buffers then we kick it
896 * all off.
897 */
898
899static int mc32_open(struct net_device *dev)
900{
901 int ioaddr = dev->base_addr;
902 struct mc32_local *lp = netdev_priv(dev);
903 u8 one=1;
904 u8 regs;
905 u16 descnumbuffs[2] = {TX_RING_LEN, RX_RING_LEN};
906
907 /*
908 * Interrupts enabled
909 */
910
911 regs=inb(ioaddr+HOST_CTRL);
912 regs|=HOST_CTRL_INTE;
913 outb(regs, ioaddr+HOST_CTRL);
914
915 /*
916 * Allow ourselves to issue commands
917 */
918
919 up(&lp->cmd_mutex);
920
921
922 /*
923 * Send the indications on command
924 */
925
926 mc32_command(dev, 4, &one, 2);
927
928 /*
929 * Poke it to make sure it's really dead.
930 */
931
932 mc32_halt_transceiver(dev);
933 mc32_flush_tx_ring(dev);
934
935 /*
936 * Ask card to set up on-card descriptors to our spec
937 */
938
939 if(mc32_command(dev, 8, descnumbuffs, 4)) {
940 pr_info("%s: %s rejected our buffer configuration!\n",
941 dev->name, cardname);
942 mc32_close(dev);
943 return -ENOBUFS;
944 }
945
946 /* Report new configuration */
947 mc32_command(dev, 6, NULL, 0);
948
949 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
950 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
951 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
952 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
953
954 /* Set Network Address */
955 mc32_command(dev, 1, dev->dev_addr, 6);
956
957 /* Set the filters */
958 mc32_set_multicast_list(dev);
959
960 if (WORKAROUND_82586) {
961 u16 zero_word=0;
962 mc32_command(dev, 0x0D, &zero_word, 2); /* 82586 bug workaround on */
963 }
964
965 mc32_load_tx_ring(dev);
966
967 if(mc32_load_rx_ring(dev))
968 {
969 mc32_close(dev);
970 return -ENOBUFS;
971 }
972
973 lp->xceiver_desired_state = RUNNING;
974
975 /* And finally, set the ball rolling... */
976 mc32_start_transceiver(dev);
977
978 netif_start_queue(dev);
979
980 return 0;
981}
982
983
984/**
985 * mc32_timeout - handle a timeout from the network layer
986 * @dev: 3c527 that timed out
987 *
988 * Handle a timeout on transmit from the 3c527. This normally means
989 * bad things as the hardware handles cable timeouts and mess for
990 * us.
991 *
992 */
993
994static void mc32_timeout(struct net_device *dev)
995{
996 pr_warning("%s: transmit timed out?\n", dev->name);
997 /* Try to restart the adaptor. */
998 netif_wake_queue(dev);
999}
1000
1001
1002/**
1003 * mc32_send_packet - queue a frame for transmit
1004 * @skb: buffer to transmit
1005 * @dev: 3c527 to send it out of
1006 *
1007 * Transmit a buffer. This normally means throwing the buffer onto
1008 * the transmit queue as the queue is quite large. If the queue is
1009 * full then we set tx_busy and return. Once the interrupt handler
1010 * gets messages telling it to reclaim transmit queue entries, we will
1011 * clear tx_busy and the kernel will start calling this again.
1012 *
1013 * We do not disable interrupts or acquire any locks; this can
1014 * run concurrently with mc32_tx_ring(), and the function itself
1015 * is serialised at a higher layer. However, similarly for the
1016 * card itself, we must ensure that we update tx_ring_head only
1017 * after we've established a valid packet on the tx ring (and
1018 * before we let the card "see" it, to prevent it racing with the
1019 * irq handler).
1020 *
1021 */
1022
1023static netdev_tx_t mc32_send_packet(struct sk_buff *skb,
1024 struct net_device *dev)
1025{
1026 struct mc32_local *lp = netdev_priv(dev);
1027 u32 head = atomic_read(&lp->tx_ring_head);
1028
1029 volatile struct skb_header *p, *np;
1030
1031 netif_stop_queue(dev);
1032
1033 if(atomic_read(&lp->tx_count)==0) {
1034 return NETDEV_TX_BUSY;
1035 }
1036
1037 if (skb_padto(skb, ETH_ZLEN)) {
1038 netif_wake_queue(dev);
1039 return NETDEV_TX_OK;
1040 }
1041
1042 atomic_dec(&lp->tx_count);
1043
1044 /* P is the last sending/sent buffer as a pointer */
1045 p=lp->tx_ring[head].p;
1046
1047 head = next_tx(head);
1048
1049 /* NP is the buffer we will be loading */
1050 np=lp->tx_ring[head].p;
1051
1052 /* We will need this to flush the buffer out */
1053 lp->tx_ring[head].skb=skb;
1054
1055 np->length = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
1056 np->data = isa_virt_to_bus(skb->data);
1057 np->status = 0;
1058 np->control = CONTROL_EOP | CONTROL_EOL;
1059 wmb();
1060
1061 /*
1062 * The new frame has been setup; we can now
1063 * let the interrupt handler and card "see" it
1064 */
1065
1066 atomic_set(&lp->tx_ring_head, head);
1067 p->control &= ~CONTROL_EOL;
1068
1069 netif_wake_queue(dev);
1070 return NETDEV_TX_OK;
1071}
1072
1073
1074/**
1075 * mc32_update_stats - pull off the on board statistics
1076 * @dev: 3c527 to service
1077 *
1078 *
1079 * Query and reset the on-card stats. There's the small possibility
1080 * of a race here, which would result in an underestimation of
1081 * actual errors. As such, we'd prefer to keep all our stats
1082 * collection in software. As a rule, we do. However it can't be
1083 * used for rx errors and collisions as, by default, the card discards
1084 * bad rx packets.
1085 *
1086 * Setting the SAV BP in the rx filter command supposedly
1087 * stops this behaviour. However, testing shows that it only seems to
1088 * enable the collation of on-card rx statistics --- the driver
1089 * never sees an RX descriptor with an error status set.
1090 *
1091 */
1092
1093static void mc32_update_stats(struct net_device *dev)
1094{
1095 struct mc32_local *lp = netdev_priv(dev);
1096 volatile struct mc32_stats *st = lp->stats;
1097
1098 u32 rx_errors=0;
1099
1100 rx_errors+=dev->stats.rx_crc_errors +=st->rx_crc_errors;
1101 st->rx_crc_errors=0;
1102 rx_errors+=dev->stats.rx_fifo_errors +=st->rx_overrun_errors;
1103 st->rx_overrun_errors=0;
1104 rx_errors+=dev->stats.rx_frame_errors +=st->rx_alignment_errors;
1105 st->rx_alignment_errors=0;
1106 rx_errors+=dev->stats.rx_length_errors+=st->rx_tooshort_errors;
1107 st->rx_tooshort_errors=0;
1108 rx_errors+=dev->stats.rx_missed_errors+=st->rx_outofresource_errors;
1109 st->rx_outofresource_errors=0;
1110 dev->stats.rx_errors=rx_errors;
1111
1112 /* Number of packets which saw one collision */
1113 dev->stats.collisions+=st->dataC[10];
1114 st->dataC[10]=0;
1115
1116 /* Number of packets which saw 2--15 collisions */
1117 dev->stats.collisions+=st->dataC[11];
1118 st->dataC[11]=0;
1119}
1120
1121
1122/**
1123 * mc32_rx_ring - process the receive ring
1124 * @dev: 3c527 that needs its receive ring processing
1125 *
1126 *
1127 * We have received one or more indications from the card that a
1128 * receive has completed. The buffer ring thus contains dirty
1129 * entries. We walk the ring by iterating over the circular rx_ring
1130 * array, starting at the next dirty buffer (which happens to be the
1131 * one we finished up at last time around).
1132 *
1133 * For each completed packet, we will either copy it and pass it up
1134 * the stack or, if the packet is near MTU sized, we allocate
1135 * another buffer and flip the old one up the stack.
1136 *
1137 * We must succeed in keeping a buffer on the ring. If necessary we
1138 * will toss a received packet rather than lose a ring entry. Once
1139 * the first uncompleted descriptor is found, we move the
1140 * End-Of-List bit to include the buffers just processed.
1141 *
1142 */
1143
1144static void mc32_rx_ring(struct net_device *dev)
1145{
1146 struct mc32_local *lp = netdev_priv(dev);
1147 volatile struct skb_header *p;
1148 u16 rx_ring_tail;
1149 u16 rx_old_tail;
1150 int x=0;
1151
1152 rx_old_tail = rx_ring_tail = lp->rx_ring_tail;
1153
1154 do
1155 {
1156 p=lp->rx_ring[rx_ring_tail].p;
1157
1158 if(!(p->status & (1<<7))) { /* Not COMPLETED */
1159 break;
1160 }
1161 if(p->status & (1<<6)) /* COMPLETED_OK */
1162 {
1163
1164 u16 length=p->length;
1165 struct sk_buff *skb;
1166 struct sk_buff *newskb;
1167
1168 /* Try to save time by avoiding a copy on big frames */
1169
1170 if ((length > RX_COPYBREAK) &&
1171 ((newskb = netdev_alloc_skb(dev, 1532)) != NULL))
1172 {
1173 skb=lp->rx_ring[rx_ring_tail].skb;
1174 skb_put(skb, length);
1175
1176 skb_reserve(newskb,18);
1177 lp->rx_ring[rx_ring_tail].skb=newskb;
1178 p->data=isa_virt_to_bus(newskb->data);
1179 }
1180 else
1181 {
1182 skb = netdev_alloc_skb(dev, length + 2);
1183
1184 if(skb==NULL) {
1185 dev->stats.rx_dropped++;
1186 goto dropped;
1187 }
1188
1189 skb_reserve(skb,2);
1190 memcpy(skb_put(skb, length),
1191 lp->rx_ring[rx_ring_tail].skb->data, length);
1192 }
1193
1194 skb->protocol=eth_type_trans(skb,dev);
1195 dev->stats.rx_packets++;
1196 dev->stats.rx_bytes += length;
1197 netif_rx(skb);
1198 }
1199
1200 dropped:
1201 p->length = 1532;
1202 p->status = 0;
1203
1204 rx_ring_tail=next_rx(rx_ring_tail);
1205 }
1206 while(x++<48);
1207
1208 /* If there was actually a frame to be processed, place the EOL bit */
1209 /* at the descriptor prior to the one to be filled next */
1210
1211 if (rx_ring_tail != rx_old_tail)
1212 {
1213 lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL;
1214 lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL;
1215
1216 lp->rx_ring_tail=rx_ring_tail;
1217 }
1218}
1219
1220
1221/**
1222 * mc32_tx_ring - process completed transmits
1223 * @dev: 3c527 that needs its transmit ring processing
1224 *
1225 *
1226 * This operates in a similar fashion to mc32_rx_ring. We iterate
1227 * over the transmit ring. For each descriptor which has been
1228 * processed by the card, we free its associated buffer and note
1229 * any errors. This continues until the transmit ring is emptied
1230 * or we reach a descriptor that hasn't yet been processed by the
1231 * card.
1232 *
1233 */
1234
1235static void mc32_tx_ring(struct net_device *dev)
1236{
1237 struct mc32_local *lp = netdev_priv(dev);
1238 volatile struct skb_header *np;
1239
1240 /*
1241 * We rely on head==tail to mean 'queue empty'.
1242 * This is why lp->tx_count=TX_RING_LEN-1: in order to prevent
1243 * tx_ring_head wrapping to tail and confusing a 'queue empty'
1244 * condition with 'queue full'
1245 */
1246
1247 while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head))
1248 {
1249 u16 t;
1250
1251 t=next_tx(lp->tx_ring_tail);
1252 np=lp->tx_ring[t].p;
1253
1254 if(!(np->status & (1<<7)))
1255 {
1256 /* Not COMPLETED */
1257 break;
1258 }
1259 dev->stats.tx_packets++;
1260 if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
1261 {
1262 dev->stats.tx_errors++;
1263
1264 switch(np->status&0x0F)
1265 {
1266 case 1:
1267 dev->stats.tx_aborted_errors++;
1268 break; /* Max collisions */
1269 case 2:
1270 dev->stats.tx_fifo_errors++;
1271 break;
1272 case 3:
1273 dev->stats.tx_carrier_errors++;
1274 break;
1275 case 4:
1276 dev->stats.tx_window_errors++;
1277 break; /* CTS Lost */
1278 case 5:
1279 dev->stats.tx_aborted_errors++;
1280 break; /* Transmit timeout */
1281 }
1282 }
1283 /* Packets are sent in order - this is
1284 basically a FIFO queue of buffers matching
1285 the card ring */
1286 dev->stats.tx_bytes+=lp->tx_ring[t].skb->len;
1287 dev_kfree_skb_irq(lp->tx_ring[t].skb);
1288 lp->tx_ring[t].skb=NULL;
1289 atomic_inc(&lp->tx_count);
1290 netif_wake_queue(dev);
1291
1292 lp->tx_ring_tail=t;
1293 }
1294
1295}
1296
1297
1298/**
1299 * mc32_interrupt - handle an interrupt from a 3c527
1300 * @irq: Interrupt number
1301 * @dev_id: 3c527 that requires servicing
1302 * @regs: Registers (unused)
1303 *
1304 *
1305 * An interrupt is raised whenever the 3c527 writes to the command
1306 * register. This register contains the message it wishes to send us
1307 * packed into a single byte field. We keep reading status entries
1308 * until we have processed all the control items, but simply count
1309 * transmit and receive reports. When all reports are in we empty the
1310 * transceiver rings as appropriate. This saves the overhead of
1311 * multiple command requests.
1312 *
1313 * Because MCA is level-triggered, we shouldn't miss indications.
1314 * Therefore, we needn't ask the card to suspend interrupts within
1315 * this handler. The card receives an implicit acknowledgment of the
1316 * current interrupt when we read the command register.
1317 *
1318 */
1319
1320static irqreturn_t mc32_interrupt(int irq, void *dev_id)
1321{
1322 struct net_device *dev = dev_id;
1323 struct mc32_local *lp;
1324 int ioaddr, status, boguscount = 0;
1325 int rx_event = 0;
1326 int tx_event = 0;
1327
1328 ioaddr = dev->base_addr;
1329 lp = netdev_priv(dev);
1330
1331 /* See whats cooking */
1332
1333 while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000)
1334 {
1335 status=inb(ioaddr+HOST_CMD);
1336
1337 pr_debug("Status TX%d RX%d EX%d OV%d BC%d\n",
1338 (status&7), (status>>3)&7, (status>>6)&1,
1339 (status>>7)&1, boguscount);
1340
1341 switch(status&7)
1342 {
1343 case 0:
1344 break;
1345 case 6: /* TX fail */
1346 case 2: /* TX ok */
1347 tx_event = 1;
1348 break;
1349 case 3: /* Halt */
1350 case 4: /* Abort */
1351 complete(&lp->xceiver_cmd);
1352 break;
1353 default:
1354 pr_notice("%s: strange tx ack %d\n", dev->name, status&7);
1355 }
1356 status>>=3;
1357 switch(status&7)
1358 {
1359 case 0:
1360 break;
1361 case 2: /* RX */
1362 rx_event=1;
1363 break;
1364 case 3: /* Halt */
1365 case 4: /* Abort */
1366 complete(&lp->xceiver_cmd);
1367 break;
1368 case 6:
1369 /* Out of RX buffers stat */
1370 /* Must restart rx */
1371 dev->stats.rx_dropped++;
1372 mc32_rx_ring(dev);
1373 mc32_start_transceiver(dev);
1374 break;
1375 default:
1376 pr_notice("%s: strange rx ack %d\n",
1377 dev->name, status&7);
1378 }
1379 status>>=3;
1380 if(status&1)
1381 {
1382 /*
1383 * No thread is waiting: we need to tidy
1384 * up ourself.
1385 */
1386
1387 if (lp->cmd_nonblocking) {
1388 up(&lp->cmd_mutex);
1389 if (lp->mc_reload_wait)
1390 mc32_reset_multicast_list(dev);
1391 }
1392 else complete(&lp->execution_cmd);
1393 }
1394 if(status&2)
1395 {
1396 /*
1397 * We get interrupted once per
1398 * counter that is about to overflow.
1399 */
1400
1401 mc32_update_stats(dev);
1402 }
1403 }
1404
1405
1406 /*
1407 * Process the transmit and receive rings
1408 */
1409
1410 if(tx_event)
1411 mc32_tx_ring(dev);
1412
1413 if(rx_event)
1414 mc32_rx_ring(dev);
1415
1416 return IRQ_HANDLED;
1417}
1418
1419
1420/**
1421 * mc32_close - user configuring the 3c527 down
1422 * @dev: 3c527 card to shut down
1423 *
1424 * The 3c527 is a bus mastering device. We must be careful how we
1425 * shut it down. It may also be running shared interrupt so we have
1426 * to be sure to silence it properly
1427 *
1428 * We indicate that the card is closing to the rest of the
1429 * driver. Otherwise, it is possible that the card may run out
1430 * of receive buffers and restart the transceiver while we're
1431 * trying to close it.
1432 *
1433 * We abort any receive and transmits going on and then wait until
1434 * any pending exec commands have completed in other code threads.
1435 * In theory we can't get here while that is true, in practice I am
1436 * paranoid
1437 *
1438 * We turn off the interrupt enable for the board to be sure it can't
1439 * intefere with other devices.
1440 */
1441
1442static int mc32_close(struct net_device *dev)
1443{
1444 struct mc32_local *lp = netdev_priv(dev);
1445 int ioaddr = dev->base_addr;
1446
1447 u8 regs;
1448 u16 one=1;
1449
1450 lp->xceiver_desired_state = HALTED;
1451 netif_stop_queue(dev);
1452
1453 /*
1454 * Send the indications on command (handy debug check)
1455 */
1456
1457 mc32_command(dev, 4, &one, 2);
1458
1459 /* Shut down the transceiver */
1460
1461 mc32_halt_transceiver(dev);
1462
1463 /* Ensure we issue no more commands beyond this point */
1464
1465 down(&lp->cmd_mutex);
1466
1467 /* Ok the card is now stopping */
1468
1469 regs=inb(ioaddr+HOST_CTRL);
1470 regs&=~HOST_CTRL_INTE;
1471 outb(regs, ioaddr+HOST_CTRL);
1472
1473 mc32_flush_rx_ring(dev);
1474 mc32_flush_tx_ring(dev);
1475
1476 mc32_update_stats(dev);
1477
1478 return 0;
1479}
1480
1481
1482/**
1483 * mc32_get_stats - hand back stats to network layer
1484 * @dev: The 3c527 card to handle
1485 *
1486 * We've collected all the stats we can in software already. Now
1487 * it's time to update those kept on-card and return the lot.
1488 *
1489 */
1490
1491static struct net_device_stats *mc32_get_stats(struct net_device *dev)
1492{
1493 mc32_update_stats(dev);
1494 return &dev->stats;
1495}
1496
1497
1498/**
1499 * do_mc32_set_multicast_list - attempt to update multicasts
1500 * @dev: 3c527 device to load the list on
1501 * @retry: indicates this is not the first call.
1502 *
1503 *
1504 * Actually set or clear the multicast filter for this adaptor. The
1505 * locking issues are handled by this routine. We have to track
1506 * state as it may take multiple calls to get the command sequence
1507 * completed. We just keep trying to schedule the loads until we
1508 * manage to process them all.
1509 *
1510 * num_addrs == -1 Promiscuous mode, receive all packets
1511 *
1512 * num_addrs == 0 Normal mode, clear multicast list
1513 *
1514 * num_addrs > 0 Multicast mode, receive normal and MC packets,
1515 * and do best-effort filtering.
1516 *
1517 * See mc32_update_stats() regards setting the SAV BP bit.
1518 *
1519 */
1520
1521static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1522{
1523 struct mc32_local *lp = netdev_priv(dev);
1524 u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
1525
1526 if ((dev->flags&IFF_PROMISC) ||
1527 (dev->flags&IFF_ALLMULTI) ||
1528 netdev_mc_count(dev) > 10)
1529 /* Enable promiscuous mode */
1530 filt |= 1;
1531 else if (!netdev_mc_empty(dev))
1532 {
1533 unsigned char block[62];
1534 unsigned char *bp;
1535 struct netdev_hw_addr *ha;
1536
1537 if(retry==0)
1538 lp->mc_list_valid = 0;
1539 if(!lp->mc_list_valid)
1540 {
1541 block[1]=0;
1542 block[0]=netdev_mc_count(dev);
1543 bp=block+2;
1544
1545 netdev_for_each_mc_addr(ha, dev) {
1546 memcpy(bp, ha->addr, 6);
1547 bp+=6;
1548 }
1549 if(mc32_command_nowait(dev, 2, block,
1550 2+6*netdev_mc_count(dev))==-1)
1551 {
1552 lp->mc_reload_wait = 1;
1553 return;
1554 }
1555 lp->mc_list_valid=1;
1556 }
1557 }
1558
1559 if(mc32_command_nowait(dev, 0, &filt, 2)==-1)
1560 {
1561 lp->mc_reload_wait = 1;
1562 }
1563 else {
1564 lp->mc_reload_wait = 0;
1565 }
1566}
1567
1568
1569/**
1570 * mc32_set_multicast_list - queue multicast list update
1571 * @dev: The 3c527 to use
1572 *
1573 * Commence loading the multicast list. This is called when the kernel
1574 * changes the lists. It will override any pending list we are trying to
1575 * load.
1576 */
1577
1578static void mc32_set_multicast_list(struct net_device *dev)
1579{
1580 do_mc32_set_multicast_list(dev,0);
1581}
1582
1583
1584/**
1585 * mc32_reset_multicast_list - reset multicast list
1586 * @dev: The 3c527 to use
1587 *
1588 * Attempt the next step in loading the multicast lists. If this attempt
1589 * fails to complete then it will be scheduled and this function called
1590 * again later from elsewhere.
1591 */
1592
1593static void mc32_reset_multicast_list(struct net_device *dev)
1594{
1595 do_mc32_set_multicast_list(dev,1);
1596}
1597
1598static void netdev_get_drvinfo(struct net_device *dev,
1599 struct ethtool_drvinfo *info)
1600{
1601 strcpy(info->driver, DRV_NAME);
1602 strcpy(info->version, DRV_VERSION);
1603 sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
1604}
1605
1606static u32 netdev_get_msglevel(struct net_device *dev)
1607{
1608 return mc32_debug;
1609}
1610
1611static void netdev_set_msglevel(struct net_device *dev, u32 level)
1612{
1613 mc32_debug = level;
1614}
1615
1616static const struct ethtool_ops netdev_ethtool_ops = {
1617 .get_drvinfo = netdev_get_drvinfo,
1618 .get_msglevel = netdev_get_msglevel,
1619 .set_msglevel = netdev_set_msglevel,
1620};
1621
1622#ifdef MODULE
1623
1624static struct net_device *this_device;
1625
1626/**
1627 * init_module - entry point
1628 *
1629 * Probe and locate a 3c527 card. This really should probe and locate
1630 * all the 3c527 cards in the machine not just one of them. Yes you can
1631 * insmod multiple modules for now but it's a hack.
1632 */
1633
1634int __init init_module(void)
1635{
1636 this_device = mc32_probe(-1);
1637 if (IS_ERR(this_device))
1638 return PTR_ERR(this_device);
1639 return 0;
1640}
1641
1642/**
1643 * cleanup_module - free resources for an unload
1644 *
1645 * Unloading time. We release the MCA bus resources and the interrupt
1646 * at which point everything is ready to unload. The card must be stopped
1647 * at this point or we would not have been called. When we unload we
1648 * leave the card stopped but not totally shut down. When the card is
1649 * initialized it must be rebooted or the rings reloaded before any
1650 * transmit operations are allowed to start scribbling into memory.
1651 */
1652
1653void __exit cleanup_module(void)
1654{
1655 unregister_netdev(this_device);
1656 cleanup_card(this_device);
1657 free_netdev(this_device);
1658}
1659
1660#endif /* MODULE */
diff --git a/drivers/net/ethernet/i825xx/3c527.h b/drivers/net/ethernet/i825xx/3c527.h
deleted file mode 100644
index d693b8d15cde..000000000000
--- a/drivers/net/ethernet/i825xx/3c527.h
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 * 3COM "EtherLink MC/32" Descriptions
3 */
4
5/*
6 * Registers
7 */
8
9#define HOST_CMD 0
10#define HOST_CMD_START_RX (1<<3)
11#define HOST_CMD_SUSPND_RX (3<<3)
12#define HOST_CMD_RESTRT_RX (5<<3)
13
14#define HOST_CMD_SUSPND_TX 3
15#define HOST_CMD_RESTRT_TX 5
16
17
18#define HOST_STATUS 2
19#define HOST_STATUS_CRR (1<<6)
20#define HOST_STATUS_CWR (1<<5)
21
22
23#define HOST_CTRL 6
24#define HOST_CTRL_ATTN (1<<7)
25#define HOST_CTRL_RESET (1<<6)
26#define HOST_CTRL_INTE (1<<2)
27
28#define HOST_RAMPAGE 8
29
30#define HALTED 0
31#define RUNNING 1
32
33struct mc32_mailbox
34{
35 u16 mbox;
36 u16 data[1];
37} __packed;
38
39struct skb_header
40{
41 u8 status;
42 u8 control;
43 u16 next; /* Do not change! */
44 u16 length;
45 u32 data;
46} __packed;
47
48struct mc32_stats
49{
50 /* RX Errors */
51 u32 rx_crc_errors;
52 u32 rx_alignment_errors;
53 u32 rx_overrun_errors;
54 u32 rx_tooshort_errors;
55 u32 rx_toolong_errors;
56 u32 rx_outofresource_errors;
57
58 u32 rx_discarded; /* via card pattern match filter */
59
60 /* TX Errors */
61 u32 tx_max_collisions;
62 u32 tx_carrier_errors;
63 u32 tx_underrun_errors;
64 u32 tx_cts_errors;
65 u32 tx_timeout_errors;
66
67 /* various cruft */
68 u32 dataA[6];
69 u16 dataB[5];
70 u32 dataC[14];
71} __packed;
72
73#define STATUS_MASK 0x0F
74#define COMPLETED (1<<7)
75#define COMPLETED_OK (1<<6)
76#define BUFFER_BUSY (1<<5)
77
78#define CONTROL_EOP (1<<7) /* End Of Packet */
79#define CONTROL_EOL (1<<6) /* End of List */
80
81#define MCA_MC32_ID 0x0041 /* Our MCA ident */
diff --git a/drivers/net/ethernet/i825xx/Kconfig b/drivers/net/ethernet/i825xx/Kconfig
index ca1ae985c6df..fed5080a6b62 100644
--- a/drivers/net/ethernet/i825xx/Kconfig
+++ b/drivers/net/ethernet/i825xx/Kconfig
@@ -43,28 +43,6 @@ config EL16
43 To compile this driver as a module, choose M here. The module 43 To compile this driver as a module, choose M here. The module
44 will be called 3c507. 44 will be called 3c507.
45 45
46config ELMC
47 tristate "3c523 \"EtherLink/MC\" support"
48 depends on MCA_LEGACY
49 ---help---
50 If you have a network (Ethernet) card of this type, say Y and read
51 the Ethernet-HOWTO, available from
52 <http://www.tldp.org/docs.html#howto>.
53
54 To compile this driver as a module, choose M here. The module
55 will be called 3c523.
56
57config ELMC_II
58 tristate "3c527 \"EtherLink/MC 32\" support (EXPERIMENTAL)"
59 depends on MCA && MCA_LEGACY
60 ---help---
61 If you have a network (Ethernet) card of this type, say Y and read
62 the Ethernet-HOWTO, available from
63 <http://www.tldp.org/docs.html#howto>.
64
65 To compile this driver as a module, choose M here. The module
66 will be called 3c527.
67
68config ARM_ETHER1 46config ARM_ETHER1
69 tristate "Acorn Ether1 support" 47 tristate "Acorn Ether1 support"
70 depends on ARM && ARCH_ACORN 48 depends on ARM && ARCH_ACORN
diff --git a/drivers/net/ethernet/i825xx/Makefile b/drivers/net/ethernet/i825xx/Makefile
index f68a3694968a..6adff85e8ecc 100644
--- a/drivers/net/ethernet/i825xx/Makefile
+++ b/drivers/net/ethernet/i825xx/Makefile
@@ -7,8 +7,6 @@ obj-$(CONFIG_EEXPRESS) += eexpress.o
7obj-$(CONFIG_EEXPRESS_PRO) += eepro.o 7obj-$(CONFIG_EEXPRESS_PRO) += eepro.o
8obj-$(CONFIG_ELPLUS) += 3c505.o 8obj-$(CONFIG_ELPLUS) += 3c505.o
9obj-$(CONFIG_EL16) += 3c507.o 9obj-$(CONFIG_EL16) += 3c507.o
10obj-$(CONFIG_ELMC) += 3c523.o
11obj-$(CONFIG_ELMC_II) += 3c527.o
12obj-$(CONFIG_LP486E) += lp486e.o 10obj-$(CONFIG_LP486E) += lp486e.o
13obj-$(CONFIG_NI52) += ni52.o 11obj-$(CONFIG_NI52) += ni52.o
14obj-$(CONFIG_SUN3_82586) += sun3_82586.o 12obj-$(CONFIG_SUN3_82586) += sun3_82586.o
diff --git a/drivers/net/ethernet/i825xx/eexpress.c b/drivers/net/ethernet/i825xx/eexpress.c
index cc2e66ad4436..7a6a2f04c5b1 100644
--- a/drivers/net/ethernet/i825xx/eexpress.c
+++ b/drivers/net/ethernet/i825xx/eexpress.c
@@ -9,7 +9,7 @@
9 * Many modifications, and currently maintained, by 9 * Many modifications, and currently maintained, by
10 * Philip Blundell <philb@gnu.org> 10 * Philip Blundell <philb@gnu.org>
11 * Added the Compaq LTE Alan Cox <alan@lxorguk.ukuu.org.uk> 11 * Added the Compaq LTE Alan Cox <alan@lxorguk.ukuu.org.uk>
12 * Added MCA support Adam Fritzler 12 * Added MCA support Adam Fritzler (now deleted)
13 * 13 *
14 * Note - this driver is experimental still - it has problems on faster 14 * Note - this driver is experimental still - it has problems on faster
15 * machines. Someone needs to sit down and go through it line by line with 15 * machines. Someone needs to sit down and go through it line by line with
@@ -111,7 +111,6 @@
111#include <linux/netdevice.h> 111#include <linux/netdevice.h>
112#include <linux/etherdevice.h> 112#include <linux/etherdevice.h>
113#include <linux/skbuff.h> 113#include <linux/skbuff.h>
114#include <linux/mca-legacy.h>
115#include <linux/spinlock.h> 114#include <linux/spinlock.h>
116#include <linux/bitops.h> 115#include <linux/bitops.h>
117#include <linux/jiffies.h> 116#include <linux/jiffies.h>
@@ -227,16 +226,6 @@ static unsigned short start_code[] = {
227/* maps irq number to EtherExpress magic value */ 226/* maps irq number to EtherExpress magic value */
228static char irqrmap[] = { 0,0,1,2,3,4,0,0,0,1,5,6,0,0,0,0 }; 227static char irqrmap[] = { 0,0,1,2,3,4,0,0,0,1,5,6,0,0,0,0 };
229 228
230#ifdef CONFIG_MCA_LEGACY
231/* mapping of the first four bits of the second POS register */
232static unsigned short mca_iomap[] = {
233 0x270, 0x260, 0x250, 0x240, 0x230, 0x220, 0x210, 0x200,
234 0x370, 0x360, 0x350, 0x340, 0x330, 0x320, 0x310, 0x300
235};
236/* bits 5-7 of the second POS register */
237static char mca_irqmap[] = { 12, 9, 3, 4, 5, 10, 11, 15 };
238#endif
239
240/* 229/*
241 * Prototypes for Linux interface 230 * Prototypes for Linux interface
242 */ 231 */
@@ -340,53 +329,6 @@ static int __init do_express_probe(struct net_device *dev)
340 329
341 dev->if_port = 0xff; /* not set */ 330 dev->if_port = 0xff; /* not set */
342 331
343#ifdef CONFIG_MCA_LEGACY
344 if (MCA_bus) {
345 int slot = 0;
346
347 /*
348 * Only find one card at a time. Subsequent calls
349 * will find others, however, proper multicard MCA
350 * probing and setup can't be done with the
351 * old-style Space.c init routines. -- ASF
352 */
353 while (slot != MCA_NOTFOUND) {
354 int pos0, pos1;
355
356 slot = mca_find_unused_adapter(0x628B, slot);
357 if (slot == MCA_NOTFOUND)
358 break;
359
360 pos0 = mca_read_stored_pos(slot, 2);
361 pos1 = mca_read_stored_pos(slot, 3);
362 ioaddr = mca_iomap[pos1&0xf];
363
364 dev->irq = mca_irqmap[(pos1>>4)&0x7];
365
366 /*
367 * XXX: Transceiver selection is done
368 * differently on the MCA version.
369 * How to get it to select something
370 * other than external/AUI is currently
371 * unknown. This code is just for looks. -- ASF
372 */
373 if ((pos0 & 0x7) == 0x1)
374 dev->if_port = AUI;
375 else if ((pos0 & 0x7) == 0x5) {
376 if (pos1 & 0x80)
377 dev->if_port = BNC;
378 else
379 dev->if_port = TPE;
380 }
381
382 mca_set_adapter_name(slot, "Intel EtherExpress 16 MCA");
383 mca_set_adapter_procfn(slot, NULL, dev);
384 mca_mark_as_used(slot);
385
386 break;
387 }
388 }
389#endif
390 if (ioaddr&0xfe00) { 332 if (ioaddr&0xfe00) {
391 if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress")) 333 if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress"))
392 return -EBUSY; 334 return -EBUSY;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 3516e17a399d..f4d2da0db1b1 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -290,16 +290,18 @@ static void ehea_update_bcmc_registrations(void)
290 290
291 arr[i].adh = adapter->handle; 291 arr[i].adh = adapter->handle;
292 arr[i].port_id = port->logical_port_id; 292 arr[i].port_id = port->logical_port_id;
293 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL | 293 arr[i].reg_type = EHEA_BCMC_MULTICAST |
294 EHEA_BCMC_MULTICAST |
295 EHEA_BCMC_UNTAGGED; 294 EHEA_BCMC_UNTAGGED;
295 if (mc_entry->macaddr == 0)
296 arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
296 arr[i++].macaddr = mc_entry->macaddr; 297 arr[i++].macaddr = mc_entry->macaddr;
297 298
298 arr[i].adh = adapter->handle; 299 arr[i].adh = adapter->handle;
299 arr[i].port_id = port->logical_port_id; 300 arr[i].port_id = port->logical_port_id;
300 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL | 301 arr[i].reg_type = EHEA_BCMC_MULTICAST |
301 EHEA_BCMC_MULTICAST |
302 EHEA_BCMC_VLANID_ALL; 302 EHEA_BCMC_VLANID_ALL;
303 if (mc_entry->macaddr == 0)
304 arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
303 arr[i++].macaddr = mc_entry->macaddr; 305 arr[i++].macaddr = mc_entry->macaddr;
304 num_registrations -= 2; 306 num_registrations -= 2;
305 } 307 }
@@ -1838,8 +1840,9 @@ static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1838 u64 hret; 1840 u64 hret;
1839 u8 reg_type; 1841 u8 reg_type;
1840 1842
1841 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST 1843 reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED;
1842 | EHEA_BCMC_UNTAGGED; 1844 if (mc_mac_addr == 0)
1845 reg_type |= EHEA_BCMC_SCOPE_ALL;
1843 1846
1844 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, 1847 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1845 port->logical_port_id, 1848 port->logical_port_id,
@@ -1847,8 +1850,9 @@ static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1847 if (hret) 1850 if (hret)
1848 goto out; 1851 goto out;
1849 1852
1850 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST 1853 reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL;
1851 | EHEA_BCMC_VLANID_ALL; 1854 if (mc_mac_addr == 0)
1855 reg_type |= EHEA_BCMC_SCOPE_ALL;
1852 1856
1853 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, 1857 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1854 port->logical_port_id, 1858 port->logical_port_id,
@@ -1898,7 +1902,7 @@ static void ehea_allmulti(struct net_device *dev, int enable)
1898 netdev_err(dev, 1902 netdev_err(dev,
1899 "failed enabling IFF_ALLMULTI\n"); 1903 "failed enabling IFF_ALLMULTI\n");
1900 } 1904 }
1901 } else 1905 } else {
1902 if (!enable) { 1906 if (!enable) {
1903 /* Disable ALLMULTI */ 1907 /* Disable ALLMULTI */
1904 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC); 1908 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
@@ -1908,6 +1912,7 @@ static void ehea_allmulti(struct net_device *dev, int enable)
1908 netdev_err(dev, 1912 netdev_err(dev,
1909 "failed disabling IFF_ALLMULTI\n"); 1913 "failed disabling IFF_ALLMULTI\n");
1910 } 1914 }
1915 }
1911} 1916}
1912 1917
1913static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr) 1918static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
@@ -1941,11 +1946,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
1941 struct netdev_hw_addr *ha; 1946 struct netdev_hw_addr *ha;
1942 int ret; 1947 int ret;
1943 1948
1944 if (port->promisc) { 1949 ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC));
1945 ehea_promiscuous(dev, 1);
1946 return;
1947 }
1948 ehea_promiscuous(dev, 0);
1949 1950
1950 if (dev->flags & IFF_ALLMULTI) { 1951 if (dev->flags & IFF_ALLMULTI) {
1951 ehea_allmulti(dev, 1); 1952 ehea_allmulti(dev, 1);
@@ -2463,6 +2464,7 @@ static int ehea_down(struct net_device *dev)
2463 return 0; 2464 return 0;
2464 2465
2465 ehea_drop_multicast_list(dev); 2466 ehea_drop_multicast_list(dev);
2467 ehea_allmulti(dev, 0);
2466 ehea_broadcast_reg_helper(port, H_DEREG_BCMC); 2468 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2467 2469
2468 ehea_free_interrupts(dev); 2470 ehea_free_interrupts(dev);
@@ -3261,6 +3263,7 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
3261 struct ehea_adapter *adapter; 3263 struct ehea_adapter *adapter;
3262 const u64 *adapter_handle; 3264 const u64 *adapter_handle;
3263 int ret; 3265 int ret;
3266 int i;
3264 3267
3265 if (!dev || !dev->dev.of_node) { 3268 if (!dev || !dev->dev.of_node) {
3266 pr_err("Invalid ibmebus device probed\n"); 3269 pr_err("Invalid ibmebus device probed\n");
@@ -3314,17 +3317,9 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
3314 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet, 3317 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3315 (unsigned long)adapter); 3318 (unsigned long)adapter);
3316 3319
3317 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3318 ehea_interrupt_neq, IRQF_DISABLED,
3319 "ehea_neq", adapter);
3320 if (ret) {
3321 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3322 goto out_kill_eq;
3323 }
3324
3325 ret = ehea_create_device_sysfs(dev); 3320 ret = ehea_create_device_sysfs(dev);
3326 if (ret) 3321 if (ret)
3327 goto out_free_irq; 3322 goto out_kill_eq;
3328 3323
3329 ret = ehea_setup_ports(adapter); 3324 ret = ehea_setup_ports(adapter);
3330 if (ret) { 3325 if (ret) {
@@ -3332,15 +3327,30 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
3332 goto out_rem_dev_sysfs; 3327 goto out_rem_dev_sysfs;
3333 } 3328 }
3334 3329
3330 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3331 ehea_interrupt_neq, IRQF_DISABLED,
3332 "ehea_neq", adapter);
3333 if (ret) {
3334 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3335 goto out_shutdown_ports;
3336 }
3337
3338 /* Handle any events that might be pending. */
3339 tasklet_hi_schedule(&adapter->neq_tasklet);
3340
3335 ret = 0; 3341 ret = 0;
3336 goto out; 3342 goto out;
3337 3343
3344out_shutdown_ports:
3345 for (i = 0; i < EHEA_MAX_PORTS; i++)
3346 if (adapter->port[i]) {
3347 ehea_shutdown_single_port(adapter->port[i]);
3348 adapter->port[i] = NULL;
3349 }
3350
3338out_rem_dev_sysfs: 3351out_rem_dev_sysfs:
3339 ehea_remove_device_sysfs(dev); 3352 ehea_remove_device_sysfs(dev);
3340 3353
3341out_free_irq:
3342 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3343
3344out_kill_eq: 3354out_kill_eq:
3345 ehea_destroy_eq(adapter->neq); 3355 ehea_destroy_eq(adapter->neq);
3346 3356
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
index 52c456ec4d6c..8364815c32ff 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
@@ -450,7 +450,7 @@ u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
450 void *cb_addr); 450 void *cb_addr);
451 451
452#define H_REGBCMC_PN EHEA_BMASK_IBM(48, 63) 452#define H_REGBCMC_PN EHEA_BMASK_IBM(48, 63)
453#define H_REGBCMC_REGTYPE EHEA_BMASK_IBM(61, 63) 453#define H_REGBCMC_REGTYPE EHEA_BMASK_IBM(60, 63)
454#define H_REGBCMC_MACADDR EHEA_BMASK_IBM(16, 63) 454#define H_REGBCMC_MACADDR EHEA_BMASK_IBM(16, 63)
455#define H_REGBCMC_VLANID EHEA_BMASK_IBM(52, 63) 455#define H_REGBCMC_VLANID EHEA_BMASK_IBM(52, 63)
456 456
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 76213162fbe3..79b07ec6726f 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -7,7 +7,7 @@ config NET_VENDOR_INTEL
7 default y 7 default y
8 depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \ 8 depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \
9 ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \ 9 ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \
10 GSC || BVME6000 || MVME16x || ARCH_ENP2611 || \ 10 GSC || BVME6000 || MVME16x || \
11 (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \ 11 (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \
12 EXPERIMENTAL 12 EXPERIMENTAL
13 ---help--- 13 ---help---
@@ -120,6 +120,17 @@ config IGB_DCA
120 driver. DCA is a method for warming the CPU cache before data 120 driver. DCA is a method for warming the CPU cache before data
121 is used, with the intent of lessening the impact of cache misses. 121 is used, with the intent of lessening the impact of cache misses.
122 122
123config IGB_PTP
124 bool "PTP Hardware Clock (PHC)"
125 default y
126 depends on IGB && PTP_1588_CLOCK
127 ---help---
128 Say Y here if you want to use PTP Hardware Clock (PHC) in the
129 driver. Only the basic clock operations have been implemented.
130
131 Every timestamp and clock read operations must consult the
132 overflow counter to form a correct time value.
133
123config IGBVF 134config IGBVF
124 tristate "Intel(R) 82576 Virtual Function Ethernet support" 135 tristate "Intel(R) 82576 Virtual Function Ethernet support"
125 depends on PCI 136 depends on PCI
@@ -182,6 +193,14 @@ config IXGBE
182 To compile this driver as a module, choose M here. The module 193 To compile this driver as a module, choose M here. The module
183 will be called ixgbe. 194 will be called ixgbe.
184 195
196config IXGBE_HWMON
197 bool "Intel(R) 10GbE PCI Express adapters HWMON support"
198 default y
199 depends on IXGBE && HWMON && !(IXGBE=y && HWMON=m)
200 ---help---
201 Say Y if you want to expose the thermal sensor data on some of
202 our cards, via a hwmon sysfs interface.
203
185config IXGBE_DCA 204config IXGBE_DCA
186 bool "Direct Cache Access (DCA) Support" 205 bool "Direct Cache Access (DCA) Support"
187 default y 206 default y
@@ -201,6 +220,17 @@ config IXGBE_DCB
201 220
202 If unsure, say N. 221 If unsure, say N.
203 222
223config IXGBE_PTP
224 bool "PTP Clock Support"
225 default n
226 depends on IXGBE && PTP_1588_CLOCK
227 ---help---
228 Say Y here if you want support for 1588 Timestamping with a
229 PHC device, using the PTP 1588 Clock support. This is
230 required to enable timestamping support for the device.
231
232 If unsure, say N.
233
204config IXGBEVF 234config IXGBEVF
205 tristate "Intel(R) 82599 Virtual Function Ethernet support" 235 tristate "Intel(R) 82599 Virtual Function Ethernet support"
206 depends on PCI_MSI 236 depends on PCI_MSI
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index e498effb85d9..ada720b42ff6 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1759,6 +1759,7 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1759 skb->data, skb->len, PCI_DMA_TODEVICE)); 1759 skb->data, skb->len, PCI_DMA_TODEVICE));
1760 /* check for mapping failure? */ 1760 /* check for mapping failure? */
1761 cb->u.tcb.tbd.size = cpu_to_le16(skb->len); 1761 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1762 skb_tx_timestamp(skb);
1762} 1763}
1763 1764
1764static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, 1765static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
@@ -2733,6 +2734,7 @@ static const struct ethtool_ops e100_ethtool_ops = {
2733 .set_phys_id = e100_set_phys_id, 2734 .set_phys_id = e100_set_phys_id,
2734 .get_ethtool_stats = e100_get_ethtool_stats, 2735 .get_ethtool_stats = e100_get_ethtool_stats,
2735 .get_sset_count = e100_get_sset_count, 2736 .get_sset_count = e100_get_sset_count,
2737 .get_ts_info = ethtool_op_get_ts_info,
2736}; 2738};
2737 2739
2738static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2740static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 4348b6fd44fa..95731c841044 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -493,7 +493,11 @@ out:
493static void e1000_down_and_stop(struct e1000_adapter *adapter) 493static void e1000_down_and_stop(struct e1000_adapter *adapter)
494{ 494{
495 set_bit(__E1000_DOWN, &adapter->flags); 495 set_bit(__E1000_DOWN, &adapter->flags);
496 cancel_work_sync(&adapter->reset_task); 496
497 /* Only kill reset task if adapter is not resetting */
498 if (!test_bit(__E1000_RESETTING, &adapter->flags))
499 cancel_work_sync(&adapter->reset_task);
500
497 cancel_delayed_work_sync(&adapter->watchdog_task); 501 cancel_delayed_work_sync(&adapter->watchdog_task);
498 cancel_delayed_work_sync(&adapter->phy_info_task); 502 cancel_delayed_work_sync(&adapter->phy_info_task);
499 cancel_delayed_work_sync(&adapter->fifo_stall_task); 503 cancel_delayed_work_sync(&adapter->fifo_stall_task);
@@ -827,9 +831,10 @@ static int e1000_set_features(struct net_device *netdev,
827 if (changed & NETIF_F_HW_VLAN_RX) 831 if (changed & NETIF_F_HW_VLAN_RX)
828 e1000_vlan_mode(netdev, features); 832 e1000_vlan_mode(netdev, features);
829 833
830 if (!(changed & NETIF_F_RXCSUM)) 834 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
831 return 0; 835 return 0;
832 836
837 netdev->features = features;
833 adapter->rx_csum = !!(features & NETIF_F_RXCSUM); 838 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
834 839
835 if (netif_running(netdev)) 840 if (netif_running(netdev))
@@ -1074,6 +1079,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1074 1079
1075 netdev->features |= netdev->hw_features; 1080 netdev->features |= netdev->hw_features;
1076 netdev->hw_features |= NETIF_F_RXCSUM; 1081 netdev->hw_features |= NETIF_F_RXCSUM;
1082 netdev->hw_features |= NETIF_F_RXALL;
1077 netdev->hw_features |= NETIF_F_RXFCS; 1083 netdev->hw_features |= NETIF_F_RXFCS;
1078 1084
1079 if (pci_using_dac) { 1085 if (pci_using_dac) {
@@ -1841,6 +1847,22 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1841 break; 1847 break;
1842 } 1848 }
1843 1849
1850 /* This is useful for sniffing bad packets. */
1851 if (adapter->netdev->features & NETIF_F_RXALL) {
1852 /* UPE and MPE will be handled by normal PROMISC logic
1853 * in e1000e_set_rx_mode */
1854 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1855 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1856 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1857
1858 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1859 E1000_RCTL_DPF | /* Allow filtered pause */
1860 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1861 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1862 * and that breaks VLANs.
1863 */
1864 }
1865
1844 ew32(RCTL, rctl); 1866 ew32(RCTL, rctl);
1845} 1867}
1846 1868
@@ -3243,6 +3265,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3243 nr_frags, mss); 3265 nr_frags, mss);
3244 3266
3245 if (count) { 3267 if (count) {
3268 skb_tx_timestamp(skb);
3269
3246 e1000_tx_queue(adapter, tx_ring, tx_flags, count); 3270 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3247 /* Make sure there is space in the ring for the next send. */ 3271 /* Make sure there is space in the ring for the next send. */
3248 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); 3272 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
@@ -3380,7 +3404,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
3380 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 3404 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3381 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); 3405 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3382 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i]; 3406 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
3383 struct my_u { u64 a; u64 b; }; 3407 struct my_u { __le64 a; __le64 b; };
3384 struct my_u *u = (struct my_u *)tx_desc; 3408 struct my_u *u = (struct my_u *)tx_desc;
3385 const char *type; 3409 const char *type;
3386 3410
@@ -3424,7 +3448,7 @@ rx_ring_summary:
3424 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { 3448 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3425 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); 3449 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3426 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i]; 3450 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
3427 struct my_u { u64 a; u64 b; }; 3451 struct my_u { __le64 a; __le64 b; };
3428 struct my_u *u = (struct my_u *)rx_desc; 3452 struct my_u *u = (struct my_u *)rx_desc;
3429 const char *type; 3453 const char *type;
3430 3454
@@ -4046,7 +4070,11 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4046 /* errors is only valid for DD + EOP descriptors */ 4070 /* errors is only valid for DD + EOP descriptors */
4047 if (unlikely((status & E1000_RXD_STAT_EOP) && 4071 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4048 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { 4072 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4049 u8 last_byte = *(skb->data + length - 1); 4073 u8 *mapped;
4074 u8 last_byte;
4075
4076 mapped = page_address(buffer_info->page);
4077 last_byte = *(mapped + length - 1);
4050 if (TBI_ACCEPT(hw, status, rx_desc->errors, length, 4078 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4051 last_byte)) { 4079 last_byte)) {
4052 spin_lock_irqsave(&adapter->stats_lock, 4080 spin_lock_irqsave(&adapter->stats_lock,
@@ -4057,6 +4085,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4057 irq_flags); 4085 irq_flags);
4058 length--; 4086 length--;
4059 } else { 4087 } else {
4088 if (netdev->features & NETIF_F_RXALL)
4089 goto process_skb;
4060 /* recycle both page and skb */ 4090 /* recycle both page and skb */
4061 buffer_info->skb = skb; 4091 buffer_info->skb = skb;
4062 /* an error means any chain goes out the window 4092 /* an error means any chain goes out the window
@@ -4069,6 +4099,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4069 } 4099 }
4070 4100
4071#define rxtop rx_ring->rx_skb_top 4101#define rxtop rx_ring->rx_skb_top
4102process_skb:
4072 if (!(status & E1000_RXD_STAT_EOP)) { 4103 if (!(status & E1000_RXD_STAT_EOP)) {
4073 /* this descriptor is only the beginning (or middle) */ 4104 /* this descriptor is only the beginning (or middle) */
4074 if (!rxtop) { 4105 if (!rxtop) {
@@ -4276,12 +4307,15 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4276 flags); 4307 flags);
4277 length--; 4308 length--;
4278 } else { 4309 } else {
4310 if (netdev->features & NETIF_F_RXALL)
4311 goto process_skb;
4279 /* recycle */ 4312 /* recycle */
4280 buffer_info->skb = skb; 4313 buffer_info->skb = skb;
4281 goto next_desc; 4314 goto next_desc;
4282 } 4315 }
4283 } 4316 }
4284 4317
4318process_skb:
4285 total_rx_bytes += (length - 4); /* don't count FCS */ 4319 total_rx_bytes += (length - 4); /* don't count FCS */
4286 total_rx_packets++; 4320 total_rx_packets++;
4287 4321
@@ -4365,30 +4399,6 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4365 break; 4399 break;
4366 } 4400 }
4367 4401
4368 /* Fix for errata 23, can't cross 64kB boundary */
4369 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4370 struct sk_buff *oldskb = skb;
4371 e_err(rx_err, "skb align check failed: %u bytes at "
4372 "%p\n", bufsz, skb->data);
4373 /* Try again, without freeing the previous */
4374 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4375 /* Failed allocation, critical failure */
4376 if (!skb) {
4377 dev_kfree_skb(oldskb);
4378 adapter->alloc_rx_buff_failed++;
4379 break;
4380 }
4381
4382 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4383 /* give up */
4384 dev_kfree_skb(skb);
4385 dev_kfree_skb(oldskb);
4386 break; /* while (cleaned_count--) */
4387 }
4388
4389 /* Use new allocation */
4390 dev_kfree_skb(oldskb);
4391 }
4392 buffer_info->skb = skb; 4402 buffer_info->skb = skb;
4393 buffer_info->length = adapter->rx_buffer_len; 4403 buffer_info->length = adapter->rx_buffer_len;
4394check_page: 4404check_page:
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index bac9dda31b6c..4dd18a1f45d2 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -228,9 +228,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw)
228 /* FWSM register */ 228 /* FWSM register */
229 mac->has_fwsm = true; 229 mac->has_fwsm = true;
230 /* ARC supported; valid only if manageability features are enabled. */ 230 /* ARC supported; valid only if manageability features are enabled. */
231 mac->arc_subsystem_valid = 231 mac->arc_subsystem_valid = !!(er32(FWSM) & E1000_FWSM_MODE_MASK);
232 (er32(FWSM) & E1000_FWSM_MODE_MASK)
233 ? true : false;
234 /* Adaptive IFS not supported */ 232 /* Adaptive IFS not supported */
235 mac->adaptive_ifs = false; 233 mac->adaptive_ifs = false;
236 234
@@ -766,6 +764,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
766{ 764{
767 u32 ctrl; 765 u32 ctrl;
768 s32 ret_val; 766 s32 ret_val;
767 u16 kum_reg_data;
769 768
770 /* 769 /*
771 * Prevent the PCI-E bus from sticking if there is no TLP connection 770 * Prevent the PCI-E bus from sticking if there is no TLP connection
@@ -791,6 +790,13 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
791 ew32(CTRL, ctrl | E1000_CTRL_RST); 790 ew32(CTRL, ctrl | E1000_CTRL_RST);
792 e1000_release_phy_80003es2lan(hw); 791 e1000_release_phy_80003es2lan(hw);
793 792
793 /* Disable IBIST slave mode (far-end loopback) */
794 e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
795 &kum_reg_data);
796 kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
797 e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
798 kum_reg_data);
799
794 ret_val = e1000e_get_auto_rd_done(hw); 800 ret_val = e1000e_get_auto_rd_done(hw);
795 if (ret_val) 801 if (ret_val)
796 /* We don't want to continue accessing MAC registers. */ 802 /* We don't want to continue accessing MAC registers. */
@@ -938,6 +944,14 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
938 else 944 else
939 reg |= (1 << 28); 945 reg |= (1 << 28);
940 ew32(TARC(1), reg); 946 ew32(TARC(1), reg);
947
948 /*
949 * Disable IPv6 extension header parsing because some malformed
950 * IPv6 headers can hang the Rx.
951 */
952 reg = er32(RFCTL);
953 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
954 ew32(RFCTL, reg);
941} 955}
942 956
943/** 957/**
@@ -1433,6 +1447,7 @@ static const struct e1000_mac_operations es2_mac_ops = {
1433 /* setup_physical_interface dependent on media type */ 1447 /* setup_physical_interface dependent on media type */
1434 .setup_led = e1000e_setup_led_generic, 1448 .setup_led = e1000e_setup_led_generic,
1435 .config_collision_dist = e1000e_config_collision_dist_generic, 1449 .config_collision_dist = e1000e_config_collision_dist_generic,
1450 .rar_set = e1000e_rar_set_generic,
1436}; 1451};
1437 1452
1438static const struct e1000_phy_operations es2_phy_ops = { 1453static const struct e1000_phy_operations es2_phy_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index b3fdc6977f2e..36db4df09aed 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -295,9 +295,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
295 * ARC supported; valid only if manageability features are 295 * ARC supported; valid only if manageability features are
296 * enabled. 296 * enabled.
297 */ 297 */
298 mac->arc_subsystem_valid = 298 mac->arc_subsystem_valid = !!(er32(FWSM) &
299 (er32(FWSM) & E1000_FWSM_MODE_MASK) 299 E1000_FWSM_MODE_MASK);
300 ? true : false;
301 break; 300 break;
302 case e1000_82574: 301 case e1000_82574:
303 case e1000_82583: 302 case e1000_82583:
@@ -798,7 +797,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
798 /* Check for pending operations. */ 797 /* Check for pending operations. */
799 for (i = 0; i < E1000_FLASH_UPDATES; i++) { 798 for (i = 0; i < E1000_FLASH_UPDATES; i++) {
800 usleep_range(1000, 2000); 799 usleep_range(1000, 2000);
801 if ((er32(EECD) & E1000_EECD_FLUPD) == 0) 800 if (!(er32(EECD) & E1000_EECD_FLUPD))
802 break; 801 break;
803 } 802 }
804 803
@@ -822,7 +821,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
822 821
823 for (i = 0; i < E1000_FLASH_UPDATES; i++) { 822 for (i = 0; i < E1000_FLASH_UPDATES; i++) {
824 usleep_range(1000, 2000); 823 usleep_range(1000, 2000);
825 if ((er32(EECD) & E1000_EECD_FLUPD) == 0) 824 if (!(er32(EECD) & E1000_EECD_FLUPD))
826 break; 825 break;
827 } 826 }
828 827
@@ -1000,7 +999,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
1000 **/ 999 **/
1001static s32 e1000_reset_hw_82571(struct e1000_hw *hw) 1000static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1002{ 1001{
1003 u32 ctrl, ctrl_ext; 1002 u32 ctrl, ctrl_ext, eecd;
1004 s32 ret_val; 1003 s32 ret_val;
1005 1004
1006 /* 1005 /*
@@ -1073,6 +1072,16 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1073 */ 1072 */
1074 1073
1075 switch (hw->mac.type) { 1074 switch (hw->mac.type) {
1075 case e1000_82571:
1076 case e1000_82572:
1077 /*
1078 * REQ and GNT bits need to be cleared when using AUTO_RD
1079 * to access the EEPROM.
1080 */
1081 eecd = er32(EECD);
1082 eecd &= ~(E1000_EECD_REQ | E1000_EECD_GNT);
1083 ew32(EECD, eecd);
1084 break;
1076 case e1000_82573: 1085 case e1000_82573:
1077 case e1000_82574: 1086 case e1000_82574:
1078 case e1000_82583: 1087 case e1000_82583:
@@ -1280,6 +1289,16 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
1280 ew32(CTRL_EXT, reg); 1289 ew32(CTRL_EXT, reg);
1281 } 1290 }
1282 1291
1292 /*
1293 * Disable IPv6 extension header parsing because some malformed
1294 * IPv6 headers can hang the Rx.
1295 */
1296 if (hw->mac.type <= e1000_82573) {
1297 reg = er32(RFCTL);
1298 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
1299 ew32(RFCTL, reg);
1300 }
1301
1283 /* PCI-Ex Control Registers */ 1302 /* PCI-Ex Control Registers */
1284 switch (hw->mac.type) { 1303 switch (hw->mac.type) {
1285 case e1000_82574: 1304 case e1000_82574:
@@ -1763,7 +1782,8 @@ void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
1763 * incoming packets directed to this port are dropped. 1782 * incoming packets directed to this port are dropped.
1764 * Eventually the LAA will be in RAR[0] and RAR[14]. 1783 * Eventually the LAA will be in RAR[0] and RAR[14].
1765 */ 1784 */
1766 e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1); 1785 hw->mac.ops.rar_set(hw, hw->mac.addr,
1786 hw->mac.rar_entry_count - 1);
1767} 1787}
1768 1788
1769/** 1789/**
@@ -1927,6 +1947,7 @@ static const struct e1000_mac_operations e82571_mac_ops = {
1927 .setup_led = e1000e_setup_led_generic, 1947 .setup_led = e1000e_setup_led_generic,
1928 .config_collision_dist = e1000e_config_collision_dist_generic, 1948 .config_collision_dist = e1000e_config_collision_dist_generic,
1929 .read_mac_addr = e1000_read_mac_addr_82571, 1949 .read_mac_addr = e1000_read_mac_addr_82571,
1950 .rar_set = e1000e_rar_set_generic,
1930}; 1951};
1931 1952
1932static const struct e1000_phy_operations e82_phy_ops_igp = { 1953static const struct e1000_phy_operations e82_phy_ops_igp = {
@@ -2061,9 +2082,11 @@ const struct e1000_info e1000_82574_info = {
2061 | FLAG_HAS_SMART_POWER_DOWN 2082 | FLAG_HAS_SMART_POWER_DOWN
2062 | FLAG_HAS_AMT 2083 | FLAG_HAS_AMT
2063 | FLAG_HAS_CTRLEXT_ON_LOAD, 2084 | FLAG_HAS_CTRLEXT_ON_LOAD,
2064 .flags2 = FLAG2_CHECK_PHY_HANG 2085 .flags2 = FLAG2_CHECK_PHY_HANG
2065 | FLAG2_DISABLE_ASPM_L0S 2086 | FLAG2_DISABLE_ASPM_L0S
2066 | FLAG2_NO_DISABLE_RX, 2087 | FLAG2_DISABLE_ASPM_L1
2088 | FLAG2_NO_DISABLE_RX
2089 | FLAG2_DMA_BURST,
2067 .pba = 32, 2090 .pba = 32,
2068 .max_hw_frame_size = DEFAULT_JUMBO, 2091 .max_hw_frame_size = DEFAULT_JUMBO,
2069 .get_variants = e1000_get_variants_82571, 2092 .get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 3a5025917163..351a4097b2ba 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -74,7 +74,9 @@
74#define E1000_WUS_BC E1000_WUFC_BC 74#define E1000_WUS_BC E1000_WUFC_BC
75 75
76/* Extended Device Control */ 76/* Extended Device Control */
77#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */
77#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */ 78#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
79#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */
78#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ 80#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
79#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ 81#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
80#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ 82#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
@@ -573,6 +575,7 @@
573#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ 575#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
574 576
575/* Link Partner Ability Register (Base Page) */ 577/* Link Partner Ability Register (Base Page) */
578#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */
576#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ 579#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
577#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ 580#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
578 581
@@ -739,6 +742,7 @@
739#define I82577_E_PHY_ID 0x01540050 742#define I82577_E_PHY_ID 0x01540050
740#define I82578_E_PHY_ID 0x004DD040 743#define I82578_E_PHY_ID 0x004DD040
741#define I82579_E_PHY_ID 0x01540090 744#define I82579_E_PHY_ID 0x01540090
745#define I217_E_PHY_ID 0x015400A0
742 746
743/* M88E1000 Specific Registers */ 747/* M88E1000 Specific Registers */
744#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ 748#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
@@ -850,4 +854,8 @@
850/* SerDes Control */ 854/* SerDes Control */
851#define E1000_GEN_POLL_TIMEOUT 640 855#define E1000_GEN_POLL_TIMEOUT 640
852 856
857/* FW Semaphore */
858#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
859#define E1000_FWSM_WLOCK_MAC_SHIFT 7
860
853#endif /* _E1000_DEFINES_H_ */ 861#endif /* _E1000_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index b83897f76ee3..6e6fffb34581 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -206,6 +206,7 @@ enum e1000_boards {
206 board_ich10lan, 206 board_ich10lan,
207 board_pchlan, 207 board_pchlan,
208 board_pch2lan, 208 board_pch2lan,
209 board_pch_lpt,
209}; 210};
210 211
211struct e1000_ps_page { 212struct e1000_ps_page {
@@ -528,6 +529,7 @@ extern const struct e1000_info e1000_ich9_info;
528extern const struct e1000_info e1000_ich10_info; 529extern const struct e1000_info e1000_ich10_info;
529extern const struct e1000_info e1000_pch_info; 530extern const struct e1000_info e1000_pch_info;
530extern const struct e1000_info e1000_pch2_info; 531extern const struct e1000_info e1000_pch2_info;
532extern const struct e1000_info e1000_pch_lpt_info;
531extern const struct e1000_info e1000_es2_info; 533extern const struct e1000_info e1000_es2_info;
532 534
533extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, 535extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
@@ -576,7 +578,7 @@ extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
576extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, 578extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
577 u8 *mc_addr_list, 579 u8 *mc_addr_list,
578 u32 mc_addr_count); 580 u32 mc_addr_count);
579extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); 581extern void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
580extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); 582extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
581extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); 583extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
582extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw); 584extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
@@ -673,11 +675,21 @@ static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data)
673 return hw->phy.ops.read_reg(hw, offset, data); 675 return hw->phy.ops.read_reg(hw, offset, data);
674} 676}
675 677
678static inline s32 e1e_rphy_locked(struct e1000_hw *hw, u32 offset, u16 *data)
679{
680 return hw->phy.ops.read_reg_locked(hw, offset, data);
681}
682
676static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) 683static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data)
677{ 684{
678 return hw->phy.ops.write_reg(hw, offset, data); 685 return hw->phy.ops.write_reg(hw, offset, data);
679} 686}
680 687
688static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
689{
690 return hw->phy.ops.write_reg_locked(hw, offset, data);
691}
692
681static inline s32 e1000_get_cable_length(struct e1000_hw *hw) 693static inline s32 e1000_get_cable_length(struct e1000_hw *hw)
682{ 694{
683 return hw->phy.ops.get_cable_length(hw); 695 return hw->phy.ops.get_cable_length(hw);
@@ -735,9 +747,46 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
735 return readl(hw->hw_addr + reg); 747 return readl(hw->hw_addr + reg);
736} 748}
737 749
750#define er32(reg) __er32(hw, E1000_##reg)
751
752/**
753 * __ew32_prepare - prepare to write to MAC CSR register on certain parts
754 * @hw: pointer to the HW structure
755 *
756 * When updating the MAC CSR registers, the Manageability Engine (ME) could
757 * be accessing the registers at the same time. Normally, this is handled in
758 * h/w by an arbiter but on some parts there is a bug that acknowledges Host
759 * accesses later than it should which could result in the register to have
760 * an incorrect value. Workaround this by checking the FWSM register which
761 * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
762 * and try again a number of times.
763 **/
764static inline s32 __ew32_prepare(struct e1000_hw *hw)
765{
766 s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
767
768 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
769 udelay(50);
770
771 return i;
772}
773
738static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) 774static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
739{ 775{
776 if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
777 __ew32_prepare(hw);
778
740 writel(val, hw->hw_addr + reg); 779 writel(val, hw->hw_addr + reg);
741} 780}
742 781
782#define ew32(reg, val) __ew32(hw, E1000_##reg, (val))
783
784#define e1e_flush() er32(STATUS)
785
786#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \
787 (__ew32((a), (reg + ((offset) << 2)), (value)))
788
789#define E1000_READ_REG_ARRAY(a, reg, offset) \
790 (readl((a)->hw_addr + reg + ((offset) << 2)))
791
743#endif /* _E1000_H_ */ 792#endif /* _E1000_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index db35dd5d96de..d863075df7a4 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -259,8 +259,7 @@ static int e1000_set_settings(struct net_device *netdev,
259 * cannot be changed 259 * cannot be changed
260 */ 260 */
261 if (hw->phy.ops.check_reset_block(hw)) { 261 if (hw->phy.ops.check_reset_block(hw)) {
262 e_err("Cannot change link characteristics when SoL/IDER is " 262 e_err("Cannot change link characteristics when SoL/IDER is active.\n");
263 "active.\n");
264 return -EINVAL; 263 return -EINVAL;
265 } 264 }
266 265
@@ -403,15 +402,15 @@ static void e1000_get_regs(struct net_device *netdev,
403 regs_buff[1] = er32(STATUS); 402 regs_buff[1] = er32(STATUS);
404 403
405 regs_buff[2] = er32(RCTL); 404 regs_buff[2] = er32(RCTL);
406 regs_buff[3] = er32(RDLEN); 405 regs_buff[3] = er32(RDLEN(0));
407 regs_buff[4] = er32(RDH); 406 regs_buff[4] = er32(RDH(0));
408 regs_buff[5] = er32(RDT); 407 regs_buff[5] = er32(RDT(0));
409 regs_buff[6] = er32(RDTR); 408 regs_buff[6] = er32(RDTR);
410 409
411 regs_buff[7] = er32(TCTL); 410 regs_buff[7] = er32(TCTL);
412 regs_buff[8] = er32(TDLEN); 411 regs_buff[8] = er32(TDLEN(0));
413 regs_buff[9] = er32(TDH); 412 regs_buff[9] = er32(TDH(0));
414 regs_buff[10] = er32(TDT); 413 regs_buff[10] = er32(TDT(0));
415 regs_buff[11] = er32(TIDV); 414 regs_buff[11] = er32(TIDV);
416 415
417 regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ 416 regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */
@@ -727,9 +726,8 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
727 (test[pat] & write)); 726 (test[pat] & write));
728 val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); 727 val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
729 if (val != (test[pat] & write & mask)) { 728 if (val != (test[pat] & write & mask)) {
730 e_err("pattern test reg %04X failed: got 0x%08X " 729 e_err("pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
731 "expected 0x%08X\n", reg + offset, val, 730 reg + offset, val, (test[pat] & write & mask));
732 (test[pat] & write & mask));
733 *data = reg; 731 *data = reg;
734 return 1; 732 return 1;
735 } 733 }
@@ -744,8 +742,8 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
744 __ew32(&adapter->hw, reg, write & mask); 742 __ew32(&adapter->hw, reg, write & mask);
745 val = __er32(&adapter->hw, reg); 743 val = __er32(&adapter->hw, reg);
746 if ((write & mask) != (val & mask)) { 744 if ((write & mask) != (val & mask)) {
747 e_err("set/check reg %04X test failed: got 0x%08X " 745 e_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
748 "expected 0x%08X\n", reg, (val & mask), (write & mask)); 746 reg, (val & mask), (write & mask));
749 *data = reg; 747 *data = reg;
750 return 1; 748 return 1;
751 } 749 }
@@ -775,6 +773,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
775 u32 i; 773 u32 i;
776 u32 toggle; 774 u32 toggle;
777 u32 mask; 775 u32 mask;
776 u32 wlock_mac = 0;
778 777
779 /* 778 /*
780 * The status register is Read Only, so a write should fail. 779 * The status register is Read Only, so a write should fail.
@@ -797,8 +796,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
797 ew32(STATUS, toggle); 796 ew32(STATUS, toggle);
798 after = er32(STATUS) & toggle; 797 after = er32(STATUS) & toggle;
799 if (value != after) { 798 if (value != after) {
800 e_err("failed STATUS register test got: 0x%08X expected: " 799 e_err("failed STATUS register test got: 0x%08X expected: 0x%08X\n",
801 "0x%08X\n", after, value); 800 after, value);
802 *data = 1; 801 *data = 1;
803 return 1; 802 return 1;
804 } 803 }
@@ -813,15 +812,15 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
813 } 812 }
814 813
815 REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF); 814 REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF);
816 REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); 815 REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
817 REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF); 816 REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF);
818 REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF); 817 REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF);
819 REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF); 818 REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF);
820 REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8); 819 REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8);
821 REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF); 820 REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF);
822 REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF); 821 REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
823 REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); 822 REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
824 REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF); 823 REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF);
825 824
826 REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); 825 REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);
827 826
@@ -830,29 +829,41 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
830 REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); 829 REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);
831 830
832 REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); 831 REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
833 REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 832 REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
834 if (!(adapter->flags & FLAG_IS_ICH)) 833 if (!(adapter->flags & FLAG_IS_ICH))
835 REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); 834 REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
836 REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 835 REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
837 REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); 836 REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
838 mask = 0x8003FFFF; 837 mask = 0x8003FFFF;
839 switch (mac->type) { 838 switch (mac->type) {
840 case e1000_ich10lan: 839 case e1000_ich10lan:
841 case e1000_pchlan: 840 case e1000_pchlan:
842 case e1000_pch2lan: 841 case e1000_pch2lan:
842 case e1000_pch_lpt:
843 mask |= (1 << 18); 843 mask |= (1 << 18);
844 break; 844 break;
845 default: 845 default:
846 break; 846 break;
847 } 847 }
848 for (i = 0; i < mac->rar_entry_count; i++) 848
849 if (mac->type == e1000_pch_lpt)
850 wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >>
851 E1000_FWSM_WLOCK_MAC_SHIFT;
852
853 for (i = 0; i < mac->rar_entry_count; i++) {
854 /* Cannot test write-protected SHRAL[n] registers */
855 if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
856 continue;
857
849 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), 858 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
850 mask, 0xFFFFFFFF); 859 mask, 0xFFFFFFFF);
860 }
851 861
852 for (i = 0; i < mac->mta_reg_count; i++) 862 for (i = 0; i < mac->mta_reg_count; i++)
853 REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); 863 REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
854 864
855 *data = 0; 865 *data = 0;
866
856 return 0; 867 return 0;
857} 868}
858 869
@@ -1104,11 +1115,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1104 tx_ring->next_to_use = 0; 1115 tx_ring->next_to_use = 0;
1105 tx_ring->next_to_clean = 0; 1116 tx_ring->next_to_clean = 0;
1106 1117
1107 ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); 1118 ew32(TDBAL(0), ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1108 ew32(TDBAH, ((u64) tx_ring->dma >> 32)); 1119 ew32(TDBAH(0), ((u64) tx_ring->dma >> 32));
1109 ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc)); 1120 ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc));
1110 ew32(TDH, 0); 1121 ew32(TDH(0), 0);
1111 ew32(TDT, 0); 1122 ew32(TDT(0), 0);
1112 ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR | 1123 ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR |
1113 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | 1124 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1114 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); 1125 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
@@ -1168,11 +1179,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1168 rctl = er32(RCTL); 1179 rctl = er32(RCTL);
1169 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) 1180 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
1170 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1181 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1171 ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF)); 1182 ew32(RDBAL(0), ((u64) rx_ring->dma & 0xFFFFFFFF));
1172 ew32(RDBAH, ((u64) rx_ring->dma >> 32)); 1183 ew32(RDBAH(0), ((u64) rx_ring->dma >> 32));
1173 ew32(RDLEN, rx_ring->size); 1184 ew32(RDLEN(0), rx_ring->size);
1174 ew32(RDH, 0); 1185 ew32(RDH(0), 0);
1175 ew32(RDT, 0); 1186 ew32(RDT(0), 0);
1176 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | 1187 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
1177 E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | 1188 E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
1178 E1000_RCTL_SBP | E1000_RCTL_SECRC | 1189 E1000_RCTL_SBP | E1000_RCTL_SECRC |
@@ -1534,7 +1545,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1534 int ret_val = 0; 1545 int ret_val = 0;
1535 unsigned long time; 1546 unsigned long time;
1536 1547
1537 ew32(RDT, rx_ring->count - 1); 1548 ew32(RDT(0), rx_ring->count - 1);
1538 1549
1539 /* 1550 /*
1540 * Calculate the loop count based on the largest descriptor ring 1551 * Calculate the loop count based on the largest descriptor ring
@@ -1561,7 +1572,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1561 if (k == tx_ring->count) 1572 if (k == tx_ring->count)
1562 k = 0; 1573 k = 0;
1563 } 1574 }
1564 ew32(TDT, k); 1575 ew32(TDT(0), k);
1565 e1e_flush(); 1576 e1e_flush();
1566 msleep(200); 1577 msleep(200);
1567 time = jiffies; /* set the start time for the receive */ 1578 time = jiffies; /* set the start time for the receive */
@@ -1791,8 +1802,7 @@ static void e1000_get_wol(struct net_device *netdev,
1791 wol->supported &= ~WAKE_UCAST; 1802 wol->supported &= ~WAKE_UCAST;
1792 1803
1793 if (adapter->wol & E1000_WUFC_EX) 1804 if (adapter->wol & E1000_WUFC_EX)
1794 e_err("Interface does not support directed (unicast) " 1805 e_err("Interface does not support directed (unicast) frame wake-up packets\n");
1795 "frame wake-up packets\n");
1796 } 1806 }
1797 1807
1798 if (adapter->wol & E1000_WUFC_EX) 1808 if (adapter->wol & E1000_WUFC_EX)
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index f82ecf536c8b..ed5b40985edb 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -36,16 +36,6 @@ struct e1000_adapter;
36 36
37#include "defines.h" 37#include "defines.h"
38 38
39#define er32(reg) __er32(hw, E1000_##reg)
40#define ew32(reg,val) __ew32(hw, E1000_##reg, (val))
41#define e1e_flush() er32(STATUS)
42
43#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \
44 (writel((value), ((a)->hw_addr + reg + ((offset) << 2))))
45
46#define E1000_READ_REG_ARRAY(a, reg, offset) \
47 (readl((a)->hw_addr + reg + ((offset) << 2)))
48
49enum e1e_registers { 39enum e1e_registers {
50 E1000_CTRL = 0x00000, /* Device Control - RW */ 40 E1000_CTRL = 0x00000, /* Device Control - RW */
51 E1000_STATUS = 0x00008, /* Device Status - RO */ 41 E1000_STATUS = 0x00008, /* Device Status - RO */
@@ -61,6 +51,7 @@ enum e1e_registers {
61 E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ 51 E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */
62 E1000_FCT = 0x00030, /* Flow Control Type - RW */ 52 E1000_FCT = 0x00030, /* Flow Control Type - RW */
63 E1000_VET = 0x00038, /* VLAN Ether Type - RW */ 53 E1000_VET = 0x00038, /* VLAN Ether Type - RW */
54 E1000_FEXTNVM3 = 0x0003C, /* Future Extended NVM 3 - RW */
64 E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */ 55 E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */
65 E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */ 56 E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */
66 E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */ 57 E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */
@@ -94,31 +85,40 @@ enum e1e_registers {
94 E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ 85 E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */
95 E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ 86 E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */
96 E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */ 87 E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */
97 E1000_RDBAL = 0x02800, /* Rx Descriptor Base Address Low - RW */ 88/*
98 E1000_RDBAH = 0x02804, /* Rx Descriptor Base Address High - RW */ 89 * Convenience macros
99 E1000_RDLEN = 0x02808, /* Rx Descriptor Length - RW */
100 E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */
101 E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */
102 E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
103 E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
104#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
105 E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
106
107/* Convenience macros
108 * 90 *
109 * Note: "_n" is the queue number of the register to be written to. 91 * Note: "_n" is the queue number of the register to be written to.
110 * 92 *
111 * Example usage: 93 * Example usage:
112 * E1000_RDBAL_REG(current_rx_queue) 94 * E1000_RDBAL(current_rx_queue)
113 *
114 */ 95 */
115#define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8)) 96 E1000_RDBAL_BASE = 0x02800, /* Rx Descriptor Base Address Low - RW */
97#define E1000_RDBAL(_n) (E1000_RDBAL_BASE + (_n << 8))
98 E1000_RDBAH_BASE = 0x02804, /* Rx Descriptor Base Address High - RW */
99#define E1000_RDBAH(_n) (E1000_RDBAH_BASE + (_n << 8))
100 E1000_RDLEN_BASE = 0x02808, /* Rx Descriptor Length - RW */
101#define E1000_RDLEN(_n) (E1000_RDLEN_BASE + (_n << 8))
102 E1000_RDH_BASE = 0x02810, /* Rx Descriptor Head - RW */
103#define E1000_RDH(_n) (E1000_RDH_BASE + (_n << 8))
104 E1000_RDT_BASE = 0x02818, /* Rx Descriptor Tail - RW */
105#define E1000_RDT(_n) (E1000_RDT_BASE + (_n << 8))
106 E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
107 E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
108#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
109 E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
110
116 E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */ 111 E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */
117 E1000_TDBAL = 0x03800, /* Tx Descriptor Base Address Low - RW */ 112 E1000_TDBAL_BASE = 0x03800, /* Tx Descriptor Base Address Low - RW */
118 E1000_TDBAH = 0x03804, /* Tx Descriptor Base Address High - RW */ 113#define E1000_TDBAL(_n) (E1000_TDBAL_BASE + (_n << 8))
119 E1000_TDLEN = 0x03808, /* Tx Descriptor Length - RW */ 114 E1000_TDBAH_BASE = 0x03804, /* Tx Descriptor Base Address High - RW */
120 E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */ 115#define E1000_TDBAH(_n) (E1000_TDBAH_BASE + (_n << 8))
121 E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */ 116 E1000_TDLEN_BASE = 0x03808, /* Tx Descriptor Length - RW */
117#define E1000_TDLEN(_n) (E1000_TDLEN_BASE + (_n << 8))
118 E1000_TDH_BASE = 0x03810, /* Tx Descriptor Head - RW */
119#define E1000_TDH(_n) (E1000_TDH_BASE + (_n << 8))
120 E1000_TDT_BASE = 0x03818, /* Tx Descriptor Tail - RW */
121#define E1000_TDT(_n) (E1000_TDT_BASE + (_n << 8))
122 E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */ 122 E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */
123 E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */ 123 E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */
124#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8)) 124#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8))
@@ -200,6 +200,14 @@ enum e1e_registers {
200#define E1000_RA (E1000_RAL(0)) 200#define E1000_RA (E1000_RAL(0))
201 E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */ 201 E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */
202#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8)) 202#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8))
203 E1000_SHRAL_PCH_LPT_BASE = 0x05408,
204#define E1000_SHRAL_PCH_LPT(_n) (E1000_SHRAL_PCH_LPT_BASE + ((_n) * 8))
205 E1000_SHRAH_PCH_LTP_BASE = 0x0540C,
206#define E1000_SHRAH_PCH_LPT(_n) (E1000_SHRAH_PCH_LTP_BASE + ((_n) * 8))
207 E1000_SHRAL_BASE = 0x05438, /* Shared Receive Address Low - RW */
208#define E1000_SHRAL(_n) (E1000_SHRAL_BASE + ((_n) * 8))
209 E1000_SHRAH_BASE = 0x0543C, /* Shared Receive Address High - RW */
210#define E1000_SHRAH(_n) (E1000_SHRAH_BASE + ((_n) * 8))
203 E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */ 211 E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */
204 E1000_WUC = 0x05800, /* Wakeup Control - RW */ 212 E1000_WUC = 0x05800, /* Wakeup Control - RW */
205 E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */ 213 E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */
@@ -402,6 +410,8 @@ enum e1e_registers {
402#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0 410#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0
403#define E1000_DEV_ID_PCH2_LV_LM 0x1502 411#define E1000_DEV_ID_PCH2_LV_LM 0x1502
404#define E1000_DEV_ID_PCH2_LV_V 0x1503 412#define E1000_DEV_ID_PCH2_LV_V 0x1503
413#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A
414#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B
405 415
406#define E1000_REVISION_4 4 416#define E1000_REVISION_4 4
407 417
@@ -422,6 +432,7 @@ enum e1000_mac_type {
422 e1000_ich10lan, 432 e1000_ich10lan,
423 e1000_pchlan, 433 e1000_pchlan,
424 e1000_pch2lan, 434 e1000_pch2lan,
435 e1000_pch_lpt,
425}; 436};
426 437
427enum e1000_media_type { 438enum e1000_media_type {
@@ -459,6 +470,7 @@ enum e1000_phy_type {
459 e1000_phy_82578, 470 e1000_phy_82578,
460 e1000_phy_82577, 471 e1000_phy_82577,
461 e1000_phy_82579, 472 e1000_phy_82579,
473 e1000_phy_i217,
462}; 474};
463 475
464enum e1000_bus_width { 476enum e1000_bus_width {
@@ -782,6 +794,7 @@ struct e1000_mac_operations {
782 s32 (*setup_led)(struct e1000_hw *); 794 s32 (*setup_led)(struct e1000_hw *);
783 void (*write_vfta)(struct e1000_hw *, u32, u32); 795 void (*write_vfta)(struct e1000_hw *, u32, u32);
784 void (*config_collision_dist)(struct e1000_hw *); 796 void (*config_collision_dist)(struct e1000_hw *);
797 void (*rar_set)(struct e1000_hw *, u8 *, u32);
785 s32 (*read_mac_addr)(struct e1000_hw *); 798 s32 (*read_mac_addr)(struct e1000_hw *);
786}; 799};
787 800
@@ -966,6 +979,7 @@ struct e1000_dev_spec_ich8lan {
966 struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; 979 struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
967 bool nvm_k1_enabled; 980 bool nvm_k1_enabled;
968 bool eee_disable; 981 bool eee_disable;
982 u16 eee_lp_ability;
969}; 983};
970 984
971struct e1000_hw { 985struct e1000_hw {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 64c76443a7aa..bbf70ba367da 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -105,6 +105,9 @@
105#define E1000_FEXTNVM_SW_CONFIG 1 105#define E1000_FEXTNVM_SW_CONFIG 1
106#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ 106#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
107 107
108#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000
109#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000
110
108#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 111#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
109#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 112#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
110#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 113#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
@@ -112,6 +115,8 @@
112#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 115#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
113 116
114#define E1000_ICH_RAR_ENTRIES 7 117#define E1000_ICH_RAR_ENTRIES 7
118#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
119#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
115 120
116#define PHY_PAGE_SHIFT 5 121#define PHY_PAGE_SHIFT 5
117#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ 122#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
@@ -127,14 +132,22 @@
127 132
128#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */ 133#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
129 134
135/* SMBus Control Phy Register */
136#define CV_SMB_CTRL PHY_REG(769, 23)
137#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
138
130/* SMBus Address Phy Register */ 139/* SMBus Address Phy Register */
131#define HV_SMB_ADDR PHY_REG(768, 26) 140#define HV_SMB_ADDR PHY_REG(768, 26)
132#define HV_SMB_ADDR_MASK 0x007F 141#define HV_SMB_ADDR_MASK 0x007F
133#define HV_SMB_ADDR_PEC_EN 0x0200 142#define HV_SMB_ADDR_PEC_EN 0x0200
134#define HV_SMB_ADDR_VALID 0x0080 143#define HV_SMB_ADDR_VALID 0x0080
144#define HV_SMB_ADDR_FREQ_MASK 0x1100
145#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8
146#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12
135 147
136/* PHY Power Management Control */ 148/* PHY Power Management Control */
137#define HV_PM_CTRL PHY_REG(770, 17) 149#define HV_PM_CTRL PHY_REG(770, 17)
150#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
138 151
139/* PHY Low Power Idle Control */ 152/* PHY Low Power Idle Control */
140#define I82579_LPI_CTRL PHY_REG(772, 20) 153#define I82579_LPI_CTRL PHY_REG(772, 20)
@@ -147,11 +160,26 @@
147#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ 160#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
148#define I82579_MSE_THRESHOLD 0x084F /* Mean Square Error Threshold */ 161#define I82579_MSE_THRESHOLD 0x084F /* Mean Square Error Threshold */
149#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ 162#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
163#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
164#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
165#define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */
166
167/* Intel Rapid Start Technology Support */
168#define I217_PROXY_CTRL PHY_REG(BM_WUC_PAGE, 70)
169#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
170#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
171#define I217_SxCTRL_MASK 0x1000
172#define I217_CGFREG PHY_REG(772, 29)
173#define I217_CGFREG_MASK 0x0002
174#define I217_MEMPWR PHY_REG(772, 26)
175#define I217_MEMPWR_MASK 0x0010
150 176
151/* Strapping Option Register - RO */ 177/* Strapping Option Register - RO */
152#define E1000_STRAP 0x0000C 178#define E1000_STRAP 0x0000C
153#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 179#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
154#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 180#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
181#define E1000_STRAP_SMT_FREQ_MASK 0x00003000
182#define E1000_STRAP_SMT_FREQ_SHIFT 12
155 183
156/* OEM Bits Phy Register */ 184/* OEM Bits Phy Register */
157#define HV_OEM_BITS PHY_REG(768, 25) 185#define HV_OEM_BITS PHY_REG(768, 25)
@@ -255,6 +283,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
255static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); 283static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
256static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); 284static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
257static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); 285static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
286static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
287static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
258static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); 288static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
259static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); 289static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
260 290
@@ -283,18 +313,161 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
283#define ew16flash(reg, val) __ew16flash(hw, (reg), (val)) 313#define ew16flash(reg, val) __ew16flash(hw, (reg), (val))
284#define ew32flash(reg, val) __ew32flash(hw, (reg), (val)) 314#define ew32flash(reg, val) __ew32flash(hw, (reg), (val))
285 315
286static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw) 316/**
317 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
318 * @hw: pointer to the HW structure
319 *
320 * Test access to the PHY registers by reading the PHY ID registers. If
321 * the PHY ID is already known (e.g. resume path) compare it with known ID,
322 * otherwise assume the read PHY ID is correct if it is valid.
323 *
324 * Assumes the sw/fw/hw semaphore is already acquired.
325 **/
326static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
287{ 327{
288 u32 ctrl; 328 u16 phy_reg;
329 u32 phy_id;
289 330
290 ctrl = er32(CTRL); 331 e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
291 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; 332 phy_id = (u32)(phy_reg << 16);
292 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; 333 e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
293 ew32(CTRL, ctrl); 334 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
294 e1e_flush(); 335
295 udelay(10); 336 if (hw->phy.id) {
296 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; 337 if (hw->phy.id == phy_id)
297 ew32(CTRL, ctrl); 338 return true;
339 } else {
340 if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK))
341 hw->phy.id = phy_id;
342 return true;
343 }
344
345 return false;
346}
347
348/**
349 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
350 * @hw: pointer to the HW structure
351 *
352 * Workarounds/flow necessary for PHY initialization during driver load
353 * and resume paths.
354 **/
355static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
356{
357 u32 mac_reg, fwsm = er32(FWSM);
358 s32 ret_val;
359 u16 phy_reg;
360
361 ret_val = hw->phy.ops.acquire(hw);
362 if (ret_val) {
363 e_dbg("Failed to initialize PHY flow\n");
364 return ret_val;
365 }
366
367 /*
368 * The MAC-PHY interconnect may be in SMBus mode. If the PHY is
369 * inaccessible and resetting the PHY is not blocked, toggle the
370 * LANPHYPC Value bit to force the interconnect to PCIe mode.
371 */
372 switch (hw->mac.type) {
373 case e1000_pch_lpt:
374 if (e1000_phy_is_accessible_pchlan(hw))
375 break;
376
377 /*
378 * Before toggling LANPHYPC, see if PHY is accessible by
379 * forcing MAC to SMBus mode first.
380 */
381 mac_reg = er32(CTRL_EXT);
382 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
383 ew32(CTRL_EXT, mac_reg);
384
385 /* fall-through */
386 case e1000_pch2lan:
387 /*
388 * Gate automatic PHY configuration by hardware on
389 * non-managed 82579
390 */
391 if ((hw->mac.type == e1000_pch2lan) &&
392 !(fwsm & E1000_ICH_FWSM_FW_VALID))
393 e1000_gate_hw_phy_config_ich8lan(hw, true);
394
395 if (e1000_phy_is_accessible_pchlan(hw)) {
396 if (hw->mac.type == e1000_pch_lpt) {
397 /* Unforce SMBus mode in PHY */
398 e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
399 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
400 e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
401
402 /* Unforce SMBus mode in MAC */
403 mac_reg = er32(CTRL_EXT);
404 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
405 ew32(CTRL_EXT, mac_reg);
406 }
407 break;
408 }
409
410 /* fall-through */
411 case e1000_pchlan:
412 if ((hw->mac.type == e1000_pchlan) &&
413 (fwsm & E1000_ICH_FWSM_FW_VALID))
414 break;
415
416 if (hw->phy.ops.check_reset_block(hw)) {
417 e_dbg("Required LANPHYPC toggle blocked by ME\n");
418 break;
419 }
420
421 e_dbg("Toggling LANPHYPC\n");
422
423 /* Set Phy Config Counter to 50msec */
424 mac_reg = er32(FEXTNVM3);
425 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
426 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
427 ew32(FEXTNVM3, mac_reg);
428
429 /* Toggle LANPHYPC Value bit */
430 mac_reg = er32(CTRL);
431 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
432 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
433 ew32(CTRL, mac_reg);
434 e1e_flush();
435 udelay(10);
436 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
437 ew32(CTRL, mac_reg);
438 e1e_flush();
439 if (hw->mac.type < e1000_pch_lpt) {
440 msleep(50);
441 } else {
442 u16 count = 20;
443 do {
444 usleep_range(5000, 10000);
445 } while (!(er32(CTRL_EXT) &
446 E1000_CTRL_EXT_LPCD) && count--);
447 }
448 break;
449 default:
450 break;
451 }
452
453 hw->phy.ops.release(hw);
454
455 /*
456 * Reset the PHY before any access to it. Doing so, ensures
457 * that the PHY is in a known good state before we read/write
458 * PHY registers. The generic reset is sufficient here,
459 * because we haven't determined the PHY type yet.
460 */
461 ret_val = e1000e_phy_hw_reset_generic(hw);
462
463 /* Ungate automatic PHY configuration on non-managed 82579 */
464 if ((hw->mac.type == e1000_pch2lan) &&
465 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
466 usleep_range(10000, 20000);
467 e1000_gate_hw_phy_config_ich8lan(hw, false);
468 }
469
470 return ret_val;
298} 471}
299 472
300/** 473/**
@@ -324,70 +497,41 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
324 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 497 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
325 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 498 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
326 499
327 if (!hw->phy.ops.check_reset_block(hw)) { 500 phy->id = e1000_phy_unknown;
328 u32 fwsm = er32(FWSM);
329
330 /*
331 * The MAC-PHY interconnect may still be in SMBus mode after
332 * Sx->S0. If resetting the PHY is not blocked, toggle the
333 * LANPHYPC Value bit to force the interconnect to PCIe mode.
334 */
335 e1000_toggle_lanphypc_value_ich8lan(hw);
336 msleep(50);
337
338 /*
339 * Gate automatic PHY configuration by hardware on
340 * non-managed 82579
341 */
342 if ((hw->mac.type == e1000_pch2lan) &&
343 !(fwsm & E1000_ICH_FWSM_FW_VALID))
344 e1000_gate_hw_phy_config_ich8lan(hw, true);
345
346 /*
347 * Reset the PHY before any access to it. Doing so, ensures
348 * that the PHY is in a known good state before we read/write
349 * PHY registers. The generic reset is sufficient here,
350 * because we haven't determined the PHY type yet.
351 */
352 ret_val = e1000e_phy_hw_reset_generic(hw);
353 if (ret_val)
354 return ret_val;
355 501
356 /* Ungate automatic PHY configuration on non-managed 82579 */ 502 ret_val = e1000_init_phy_workarounds_pchlan(hw);
357 if ((hw->mac.type == e1000_pch2lan) && 503 if (ret_val)
358 !(fwsm & E1000_ICH_FWSM_FW_VALID)) { 504 return ret_val;
359 usleep_range(10000, 20000);
360 e1000_gate_hw_phy_config_ich8lan(hw, false);
361 }
362 }
363 505
364 phy->id = e1000_phy_unknown; 506 if (phy->id == e1000_phy_unknown)
365 switch (hw->mac.type) { 507 switch (hw->mac.type) {
366 default: 508 default:
367 ret_val = e1000e_get_phy_id(hw); 509 ret_val = e1000e_get_phy_id(hw);
368 if (ret_val) 510 if (ret_val)
369 return ret_val; 511 return ret_val;
370 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) 512 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
513 break;
514 /* fall-through */
515 case e1000_pch2lan:
516 case e1000_pch_lpt:
517 /*
518 * In case the PHY needs to be in mdio slow mode,
519 * set slow mode and try to get the PHY id again.
520 */
521 ret_val = e1000_set_mdio_slow_mode_hv(hw);
522 if (ret_val)
523 return ret_val;
524 ret_val = e1000e_get_phy_id(hw);
525 if (ret_val)
526 return ret_val;
371 break; 527 break;
372 /* fall-through */ 528 }
373 case e1000_pch2lan:
374 /*
375 * In case the PHY needs to be in mdio slow mode,
376 * set slow mode and try to get the PHY id again.
377 */
378 ret_val = e1000_set_mdio_slow_mode_hv(hw);
379 if (ret_val)
380 return ret_val;
381 ret_val = e1000e_get_phy_id(hw);
382 if (ret_val)
383 return ret_val;
384 break;
385 }
386 phy->type = e1000e_get_phy_type_from_id(phy->id); 529 phy->type = e1000e_get_phy_type_from_id(phy->id);
387 530
388 switch (phy->type) { 531 switch (phy->type) {
389 case e1000_phy_82577: 532 case e1000_phy_82577:
390 case e1000_phy_82579: 533 case e1000_phy_82579:
534 case e1000_phy_i217:
391 phy->ops.check_polarity = e1000_check_polarity_82577; 535 phy->ops.check_polarity = e1000_check_polarity_82577;
392 phy->ops.force_speed_duplex = 536 phy->ops.force_speed_duplex =
393 e1000_phy_force_speed_duplex_82577; 537 e1000_phy_force_speed_duplex_82577;
@@ -572,7 +716,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
572 /* Adaptive IFS supported */ 716 /* Adaptive IFS supported */
573 mac->adaptive_ifs = true; 717 mac->adaptive_ifs = true;
574 718
575 /* LED operations */ 719 /* LED and other operations */
576 switch (mac->type) { 720 switch (mac->type) {
577 case e1000_ich8lan: 721 case e1000_ich8lan:
578 case e1000_ich9lan: 722 case e1000_ich9lan:
@@ -591,8 +735,12 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
591 mac->ops.led_on = e1000_led_on_ich8lan; 735 mac->ops.led_on = e1000_led_on_ich8lan;
592 mac->ops.led_off = e1000_led_off_ich8lan; 736 mac->ops.led_off = e1000_led_off_ich8lan;
593 break; 737 break;
594 case e1000_pchlan:
595 case e1000_pch2lan: 738 case e1000_pch2lan:
739 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
740 mac->ops.rar_set = e1000_rar_set_pch2lan;
741 /* fall-through */
742 case e1000_pch_lpt:
743 case e1000_pchlan:
596 /* check management mode */ 744 /* check management mode */
597 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; 745 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
598 /* ID LED init */ 746 /* ID LED init */
@@ -609,12 +757,20 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
609 break; 757 break;
610 } 758 }
611 759
760 if (mac->type == e1000_pch_lpt) {
761 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
762 mac->ops.rar_set = e1000_rar_set_pch_lpt;
763 }
764
612 /* Enable PCS Lock-loss workaround for ICH8 */ 765 /* Enable PCS Lock-loss workaround for ICH8 */
613 if (mac->type == e1000_ich8lan) 766 if (mac->type == e1000_ich8lan)
614 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); 767 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
615 768
616 /* Gate automatic PHY configuration by hardware on managed 82579 */ 769 /*
617 if ((mac->type == e1000_pch2lan) && 770 * Gate automatic PHY configuration by hardware on managed
771 * 82579 and i217
772 */
773 if ((mac->type == e1000_pch2lan || mac->type == e1000_pch_lpt) &&
618 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) 774 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
619 e1000_gate_hw_phy_config_ich8lan(hw, true); 775 e1000_gate_hw_phy_config_ich8lan(hw, true);
620 776
@@ -630,22 +786,50 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
630 **/ 786 **/
631static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) 787static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
632{ 788{
789 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
633 s32 ret_val = 0; 790 s32 ret_val = 0;
634 u16 phy_reg; 791 u16 phy_reg;
635 792
636 if (hw->phy.type != e1000_phy_82579) 793 if ((hw->phy.type != e1000_phy_82579) &&
794 (hw->phy.type != e1000_phy_i217))
637 return 0; 795 return 0;
638 796
639 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); 797 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
640 if (ret_val) 798 if (ret_val)
641 return ret_val; 799 return ret_val;
642 800
643 if (hw->dev_spec.ich8lan.eee_disable) 801 if (dev_spec->eee_disable)
644 phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK; 802 phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
645 else 803 else
646 phy_reg |= I82579_LPI_CTRL_ENABLE_MASK; 804 phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
647 805
648 return e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); 806 ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
807 if (ret_val)
808 return ret_val;
809
810 if ((hw->phy.type == e1000_phy_i217) && !dev_spec->eee_disable) {
811 /* Save off link partner's EEE ability */
812 ret_val = hw->phy.ops.acquire(hw);
813 if (ret_val)
814 return ret_val;
815 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
816 I217_EEE_LP_ABILITY);
817 if (ret_val)
818 goto release;
819 e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability);
820
821 /*
822 * EEE is not supported in 100Half, so ignore partner's EEE
823 * in 100 ability if full-duplex is not advertised.
824 */
825 e1e_rphy_locked(hw, PHY_LP_ABILITY, &phy_reg);
826 if (!(phy_reg & NWAY_LPAR_100TX_FD_CAPS))
827 dev_spec->eee_lp_ability &= ~I217_EEE_100_SUPPORTED;
828release:
829 hw->phy.ops.release(hw);
830 }
831
832 return 0;
649} 833}
650 834
651/** 835/**
@@ -687,6 +871,9 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
687 return ret_val; 871 return ret_val;
688 } 872 }
689 873
874 /* Clear link partner's EEE ability */
875 hw->dev_spec.ich8lan.eee_lp_ability = 0;
876
690 if (!link) 877 if (!link)
691 return 0; /* No link detected */ 878 return 0; /* No link detected */
692 879
@@ -782,6 +969,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
782 break; 969 break;
783 case e1000_pchlan: 970 case e1000_pchlan:
784 case e1000_pch2lan: 971 case e1000_pch2lan:
972 case e1000_pch_lpt:
785 rc = e1000_init_phy_params_pchlan(hw); 973 rc = e1000_init_phy_params_pchlan(hw);
786 break; 974 break;
787 default: 975 default:
@@ -967,6 +1155,145 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
967} 1155}
968 1156
969/** 1157/**
1158 * e1000_rar_set_pch2lan - Set receive address register
1159 * @hw: pointer to the HW structure
1160 * @addr: pointer to the receive address
1161 * @index: receive address array register
1162 *
1163 * Sets the receive address array register at index to the address passed
1164 * in by addr. For 82579, RAR[0] is the base address register that is to
1165 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1166 * Use SHRA[0-3] in place of those reserved for ME.
1167 **/
1168static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1169{
1170 u32 rar_low, rar_high;
1171
1172 /*
1173 * HW expects these in little endian so we reverse the byte order
1174 * from network order (big endian) to little endian
1175 */
1176 rar_low = ((u32)addr[0] |
1177 ((u32)addr[1] << 8) |
1178 ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1179
1180 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1181
1182 /* If MAC address zero, no need to set the AV bit */
1183 if (rar_low || rar_high)
1184 rar_high |= E1000_RAH_AV;
1185
1186 if (index == 0) {
1187 ew32(RAL(index), rar_low);
1188 e1e_flush();
1189 ew32(RAH(index), rar_high);
1190 e1e_flush();
1191 return;
1192 }
1193
1194 if (index < hw->mac.rar_entry_count) {
1195 s32 ret_val;
1196
1197 ret_val = e1000_acquire_swflag_ich8lan(hw);
1198 if (ret_val)
1199 goto out;
1200
1201 ew32(SHRAL(index - 1), rar_low);
1202 e1e_flush();
1203 ew32(SHRAH(index - 1), rar_high);
1204 e1e_flush();
1205
1206 e1000_release_swflag_ich8lan(hw);
1207
1208 /* verify the register updates */
1209 if ((er32(SHRAL(index - 1)) == rar_low) &&
1210 (er32(SHRAH(index - 1)) == rar_high))
1211 return;
1212
1213 e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1214 (index - 1), er32(FWSM));
1215 }
1216
1217out:
1218 e_dbg("Failed to write receive address at index %d\n", index);
1219}
1220
1221/**
1222 * e1000_rar_set_pch_lpt - Set receive address registers
1223 * @hw: pointer to the HW structure
1224 * @addr: pointer to the receive address
1225 * @index: receive address array register
1226 *
1227 * Sets the receive address register array at index to the address passed
1228 * in by addr. For LPT, RAR[0] is the base address register that is to
1229 * contain the MAC address. SHRA[0-10] are the shared receive address
1230 * registers that are shared between the Host and manageability engine (ME).
1231 **/
1232static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1233{
1234 u32 rar_low, rar_high;
1235 u32 wlock_mac;
1236
1237 /*
1238 * HW expects these in little endian so we reverse the byte order
1239 * from network order (big endian) to little endian
1240 */
1241 rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
1242 ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1243
1244 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1245
1246 /* If MAC address zero, no need to set the AV bit */
1247 if (rar_low || rar_high)
1248 rar_high |= E1000_RAH_AV;
1249
1250 if (index == 0) {
1251 ew32(RAL(index), rar_low);
1252 e1e_flush();
1253 ew32(RAH(index), rar_high);
1254 e1e_flush();
1255 return;
1256 }
1257
1258 /*
1259 * The manageability engine (ME) can lock certain SHRAR registers that
1260 * it is using - those registers are unavailable for use.
1261 */
1262 if (index < hw->mac.rar_entry_count) {
1263 wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
1264 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1265
1266 /* Check if all SHRAR registers are locked */
1267 if (wlock_mac == 1)
1268 goto out;
1269
1270 if ((wlock_mac == 0) || (index <= wlock_mac)) {
1271 s32 ret_val;
1272
1273 ret_val = e1000_acquire_swflag_ich8lan(hw);
1274
1275 if (ret_val)
1276 goto out;
1277
1278 ew32(SHRAL_PCH_LPT(index - 1), rar_low);
1279 e1e_flush();
1280 ew32(SHRAH_PCH_LPT(index - 1), rar_high);
1281 e1e_flush();
1282
1283 e1000_release_swflag_ich8lan(hw);
1284
1285 /* verify the register updates */
1286 if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1287 (er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
1288 return;
1289 }
1290 }
1291
1292out:
1293 e_dbg("Failed to write receive address at index %d\n", index);
1294}
1295
1296/**
970 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked 1297 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
971 * @hw: pointer to the HW structure 1298 * @hw: pointer to the HW structure
972 * 1299 *
@@ -994,6 +1321,8 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
994{ 1321{
995 u16 phy_data; 1322 u16 phy_data;
996 u32 strap = er32(STRAP); 1323 u32 strap = er32(STRAP);
1324 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
1325 E1000_STRAP_SMT_FREQ_SHIFT;
997 s32 ret_val = 0; 1326 s32 ret_val = 0;
998 1327
999 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; 1328 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
@@ -1006,6 +1335,19 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1006 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); 1335 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1007 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; 1336 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1008 1337
1338 if (hw->phy.type == e1000_phy_i217) {
1339 /* Restore SMBus frequency */
1340 if (freq--) {
1341 phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
1342 phy_data |= (freq & (1 << 0)) <<
1343 HV_SMB_ADDR_FREQ_LOW_SHIFT;
1344 phy_data |= (freq & (1 << 1)) <<
1345 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
1346 } else {
1347 e_dbg("Unsupported SMB frequency in PHY\n");
1348 }
1349 }
1350
1009 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); 1351 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1010} 1352}
1011 1353
@@ -1043,6 +1385,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1043 /* Fall-thru */ 1385 /* Fall-thru */
1044 case e1000_pchlan: 1386 case e1000_pchlan:
1045 case e1000_pch2lan: 1387 case e1000_pch2lan:
1388 case e1000_pch_lpt:
1046 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; 1389 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1047 break; 1390 break;
1048 default: 1391 default:
@@ -1062,10 +1405,9 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1062 * extended configuration before SW configuration 1405 * extended configuration before SW configuration
1063 */ 1406 */
1064 data = er32(EXTCNF_CTRL); 1407 data = er32(EXTCNF_CTRL);
1065 if (!(hw->mac.type == e1000_pch2lan)) { 1408 if ((hw->mac.type < e1000_pch2lan) &&
1066 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) 1409 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
1067 goto release; 1410 goto release;
1068 }
1069 1411
1070 cnf_size = er32(EXTCNF_SIZE); 1412 cnf_size = er32(EXTCNF_SIZE);
1071 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; 1413 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
@@ -1076,9 +1418,9 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1076 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 1418 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1077 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 1419 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1078 1420
1079 if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && 1421 if (((hw->mac.type == e1000_pchlan) &&
1080 (hw->mac.type == e1000_pchlan)) || 1422 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
1081 (hw->mac.type == e1000_pch2lan)) { 1423 (hw->mac.type > e1000_pchlan)) {
1082 /* 1424 /*
1083 * HW configures the SMBus address and LEDs when the 1425 * HW configures the SMBus address and LEDs when the
1084 * OEM and LCD Write Enable bits are set in the NVM. 1426 * OEM and LCD Write Enable bits are set in the NVM.
@@ -1121,8 +1463,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1121 reg_addr &= PHY_REG_MASK; 1463 reg_addr &= PHY_REG_MASK;
1122 reg_addr |= phy_page; 1464 reg_addr |= phy_page;
1123 1465
1124 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, 1466 ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data);
1125 reg_data);
1126 if (ret_val) 1467 if (ret_val)
1127 goto release; 1468 goto release;
1128 } 1469 }
@@ -1159,8 +1500,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1159 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ 1500 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1160 if (link) { 1501 if (link) {
1161 if (hw->phy.type == e1000_phy_82578) { 1502 if (hw->phy.type == e1000_phy_82578) {
1162 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, 1503 ret_val = e1e_rphy_locked(hw, BM_CS_STATUS,
1163 &status_reg); 1504 &status_reg);
1164 if (ret_val) 1505 if (ret_val)
1165 goto release; 1506 goto release;
1166 1507
@@ -1175,8 +1516,7 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1175 } 1516 }
1176 1517
1177 if (hw->phy.type == e1000_phy_82577) { 1518 if (hw->phy.type == e1000_phy_82577) {
1178 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, 1519 ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg);
1179 &status_reg);
1180 if (ret_val) 1520 if (ret_val)
1181 goto release; 1521 goto release;
1182 1522
@@ -1191,15 +1531,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1191 } 1531 }
1192 1532
1193 /* Link stall fix for link up */ 1533 /* Link stall fix for link up */
1194 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), 1534 ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100);
1195 0x0100);
1196 if (ret_val) 1535 if (ret_val)
1197 goto release; 1536 goto release;
1198 1537
1199 } else { 1538 } else {
1200 /* Link stall fix for link down */ 1539 /* Link stall fix for link down */
1201 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), 1540 ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100);
1202 0x4100);
1203 if (ret_val) 1541 if (ret_val)
1204 goto release; 1542 goto release;
1205 } 1543 }
@@ -1279,14 +1617,14 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1279 u32 mac_reg; 1617 u32 mac_reg;
1280 u16 oem_reg; 1618 u16 oem_reg;
1281 1619
1282 if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan)) 1620 if (hw->mac.type < e1000_pchlan)
1283 return ret_val; 1621 return ret_val;
1284 1622
1285 ret_val = hw->phy.ops.acquire(hw); 1623 ret_val = hw->phy.ops.acquire(hw);
1286 if (ret_val) 1624 if (ret_val)
1287 return ret_val; 1625 return ret_val;
1288 1626
1289 if (!(hw->mac.type == e1000_pch2lan)) { 1627 if (hw->mac.type == e1000_pchlan) {
1290 mac_reg = er32(EXTCNF_CTRL); 1628 mac_reg = er32(EXTCNF_CTRL);
1291 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) 1629 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1292 goto release; 1630 goto release;
@@ -1298,7 +1636,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1298 1636
1299 mac_reg = er32(PHY_CTRL); 1637 mac_reg = er32(PHY_CTRL);
1300 1638
1301 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); 1639 ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg);
1302 if (ret_val) 1640 if (ret_val)
1303 goto release; 1641 goto release;
1304 1642
@@ -1310,10 +1648,6 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1310 1648
1311 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) 1649 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1312 oem_reg |= HV_OEM_BITS_LPLU; 1650 oem_reg |= HV_OEM_BITS_LPLU;
1313
1314 /* Set Restart auto-neg to activate the bits */
1315 if (!hw->phy.ops.check_reset_block(hw))
1316 oem_reg |= HV_OEM_BITS_RESTART_AN;
1317 } else { 1651 } else {
1318 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | 1652 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
1319 E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) 1653 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
@@ -1324,7 +1658,12 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1324 oem_reg |= HV_OEM_BITS_LPLU; 1658 oem_reg |= HV_OEM_BITS_LPLU;
1325 } 1659 }
1326 1660
1327 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); 1661 /* Set Restart auto-neg to activate the bits */
1662 if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
1663 !hw->phy.ops.check_reset_block(hw))
1664 oem_reg |= HV_OEM_BITS_RESTART_AN;
1665
1666 ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg);
1328 1667
1329release: 1668release:
1330 hw->phy.ops.release(hw); 1669 hw->phy.ops.release(hw);
@@ -1420,11 +1759,10 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1420 ret_val = hw->phy.ops.acquire(hw); 1759 ret_val = hw->phy.ops.acquire(hw);
1421 if (ret_val) 1760 if (ret_val)
1422 return ret_val; 1761 return ret_val;
1423 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data); 1762 ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data);
1424 if (ret_val) 1763 if (ret_val)
1425 goto release; 1764 goto release;
1426 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG, 1765 ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
1427 phy_data & 0x00FF);
1428release: 1766release:
1429 hw->phy.ops.release(hw); 1767 hw->phy.ops.release(hw);
1430 1768
@@ -1483,7 +1821,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1483 u32 mac_reg; 1821 u32 mac_reg;
1484 u16 i; 1822 u16 i;
1485 1823
1486 if (hw->mac.type != e1000_pch2lan) 1824 if (hw->mac.type < e1000_pch2lan)
1487 return 0; 1825 return 0;
1488 1826
1489 /* disable Rx path while enabling/disabling workaround */ 1827 /* disable Rx path while enabling/disabling workaround */
@@ -1656,20 +1994,18 @@ static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1656 ret_val = hw->phy.ops.acquire(hw); 1994 ret_val = hw->phy.ops.acquire(hw);
1657 if (ret_val) 1995 if (ret_val)
1658 return ret_val; 1996 return ret_val;
1659 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, 1997 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_THRESHOLD);
1660 I82579_MSE_THRESHOLD);
1661 if (ret_val) 1998 if (ret_val)
1662 goto release; 1999 goto release;
1663 /* set MSE higher to enable link to stay up when noise is high */ 2000 /* set MSE higher to enable link to stay up when noise is high */
1664 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0034); 2001 ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0034);
1665 if (ret_val) 2002 if (ret_val)
1666 goto release; 2003 goto release;
1667 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, 2004 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_LINK_DOWN);
1668 I82579_MSE_LINK_DOWN);
1669 if (ret_val) 2005 if (ret_val)
1670 goto release; 2006 goto release;
1671 /* drop link after 5 times MSE threshold was reached */ 2007 /* drop link after 5 times MSE threshold was reached */
1672 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0005); 2008 ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0005);
1673release: 2009release:
1674 hw->phy.ops.release(hw); 2010 hw->phy.ops.release(hw);
1675 2011
@@ -1707,8 +2043,18 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1707 return ret_val; 2043 return ret_val;
1708 2044
1709 if (status_reg & HV_M_STATUS_SPEED_1000) { 2045 if (status_reg & HV_M_STATUS_SPEED_1000) {
2046 u16 pm_phy_reg;
2047
1710 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; 2048 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1711 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; 2049 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2050 /* LV 1G Packet drop issue wa */
2051 ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
2052 if (ret_val)
2053 return ret_val;
2054 pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
2055 ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
2056 if (ret_val)
2057 return ret_val;
1712 } else { 2058 } else {
1713 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; 2059 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1714 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; 2060 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
@@ -1732,7 +2078,7 @@ static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1732{ 2078{
1733 u32 extcnf_ctrl; 2079 u32 extcnf_ctrl;
1734 2080
1735 if (hw->mac.type != e1000_pch2lan) 2081 if (hw->mac.type < e1000_pch2lan)
1736 return; 2082 return;
1737 2083
1738 extcnf_ctrl = er32(EXTCNF_CTRL); 2084 extcnf_ctrl = er32(EXTCNF_CTRL);
@@ -1834,12 +2180,10 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1834 ret_val = hw->phy.ops.acquire(hw); 2180 ret_val = hw->phy.ops.acquire(hw);
1835 if (ret_val) 2181 if (ret_val)
1836 return ret_val; 2182 return ret_val;
1837 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, 2183 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
1838 I82579_LPI_UPDATE_TIMER); 2184 I82579_LPI_UPDATE_TIMER);
1839 if (!ret_val) 2185 if (!ret_val)
1840 ret_val = hw->phy.ops.write_reg_locked(hw, 2186 ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x1387);
1841 I82579_EMI_DATA,
1842 0x1387);
1843 hw->phy.ops.release(hw); 2187 hw->phy.ops.release(hw);
1844 } 2188 }
1845 2189
@@ -2212,7 +2556,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2212 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 2556 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2213 2557
2214 /* Check if the flash descriptor is valid */ 2558 /* Check if the flash descriptor is valid */
2215 if (hsfsts.hsf_status.fldesvalid == 0) { 2559 if (!hsfsts.hsf_status.fldesvalid) {
2216 e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n"); 2560 e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n");
2217 return -E1000_ERR_NVM; 2561 return -E1000_ERR_NVM;
2218 } 2562 }
@@ -2232,7 +2576,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2232 * completed. 2576 * completed.
2233 */ 2577 */
2234 2578
2235 if (hsfsts.hsf_status.flcinprog == 0) { 2579 if (!hsfsts.hsf_status.flcinprog) {
2236 /* 2580 /*
2237 * There is no cycle running at present, 2581 * There is no cycle running at present,
2238 * so we can start a cycle. 2582 * so we can start a cycle.
@@ -2250,7 +2594,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2250 */ 2594 */
2251 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { 2595 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2252 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 2596 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2253 if (hsfsts.hsf_status.flcinprog == 0) { 2597 if (!hsfsts.hsf_status.flcinprog) {
2254 ret_val = 0; 2598 ret_val = 0;
2255 break; 2599 break;
2256 } 2600 }
@@ -2292,12 +2636,12 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2292 /* wait till FDONE bit is set to 1 */ 2636 /* wait till FDONE bit is set to 1 */
2293 do { 2637 do {
2294 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 2638 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2295 if (hsfsts.hsf_status.flcdone == 1) 2639 if (hsfsts.hsf_status.flcdone)
2296 break; 2640 break;
2297 udelay(1); 2641 udelay(1);
2298 } while (i++ < timeout); 2642 } while (i++ < timeout);
2299 2643
2300 if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) 2644 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
2301 return 0; 2645 return 0;
2302 2646
2303 return -E1000_ERR_NVM; 2647 return -E1000_ERR_NVM;
@@ -2408,10 +2752,10 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2408 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 2752 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2409 */ 2753 */
2410 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 2754 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2411 if (hsfsts.hsf_status.flcerr == 1) { 2755 if (hsfsts.hsf_status.flcerr) {
2412 /* Repeat for some time before giving up. */ 2756 /* Repeat for some time before giving up. */
2413 continue; 2757 continue;
2414 } else if (hsfsts.hsf_status.flcdone == 0) { 2758 } else if (!hsfsts.hsf_status.flcdone) {
2415 e_dbg("Timeout error - flash cycle did not complete.\n"); 2759 e_dbg("Timeout error - flash cycle did not complete.\n");
2416 break; 2760 break;
2417 } 2761 }
@@ -2641,7 +2985,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2641 if (ret_val) 2985 if (ret_val)
2642 return ret_val; 2986 return ret_val;
2643 2987
2644 if ((data & 0x40) == 0) { 2988 if (!(data & 0x40)) {
2645 data |= 0x40; 2989 data |= 0x40;
2646 ret_val = e1000_write_nvm(hw, 0x19, 1, &data); 2990 ret_val = e1000_write_nvm(hw, 0x19, 1, &data);
2647 if (ret_val) 2991 if (ret_val)
@@ -2759,10 +3103,10 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2759 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. 3103 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
2760 */ 3104 */
2761 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 3105 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2762 if (hsfsts.hsf_status.flcerr == 1) 3106 if (hsfsts.hsf_status.flcerr)
2763 /* Repeat for some time before giving up. */ 3107 /* Repeat for some time before giving up. */
2764 continue; 3108 continue;
2765 if (hsfsts.hsf_status.flcdone == 0) { 3109 if (!hsfsts.hsf_status.flcdone) {
2766 e_dbg("Timeout error - flash cycle did not complete.\n"); 3110 e_dbg("Timeout error - flash cycle did not complete.\n");
2767 break; 3111 break;
2768 } 3112 }
@@ -2914,10 +3258,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
2914 * a few more times else Done 3258 * a few more times else Done
2915 */ 3259 */
2916 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 3260 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2917 if (hsfsts.hsf_status.flcerr == 1) 3261 if (hsfsts.hsf_status.flcerr)
2918 /* repeat for some time before giving up */ 3262 /* repeat for some time before giving up */
2919 continue; 3263 continue;
2920 else if (hsfsts.hsf_status.flcdone == 0) 3264 else if (!hsfsts.hsf_status.flcdone)
2921 return ret_val; 3265 return ret_val;
2922 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); 3266 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
2923 } 3267 }
@@ -3059,8 +3403,8 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3059static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) 3403static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3060{ 3404{
3061 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3405 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3062 u16 reg; 3406 u16 kum_cfg;
3063 u32 ctrl, kab; 3407 u32 ctrl, reg;
3064 s32 ret_val; 3408 s32 ret_val;
3065 3409
3066 /* 3410 /*
@@ -3094,12 +3438,12 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3094 } 3438 }
3095 3439
3096 if (hw->mac.type == e1000_pchlan) { 3440 if (hw->mac.type == e1000_pchlan) {
3097 /* Save the NVM K1 bit setting*/ 3441 /* Save the NVM K1 bit setting */
3098 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg); 3442 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
3099 if (ret_val) 3443 if (ret_val)
3100 return ret_val; 3444 return ret_val;
3101 3445
3102 if (reg & E1000_NVM_K1_ENABLE) 3446 if (kum_cfg & E1000_NVM_K1_ENABLE)
3103 dev_spec->nvm_k1_enabled = true; 3447 dev_spec->nvm_k1_enabled = true;
3104 else 3448 else
3105 dev_spec->nvm_k1_enabled = false; 3449 dev_spec->nvm_k1_enabled = false;
@@ -3129,6 +3473,14 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3129 /* cannot issue a flush here because it hangs the hardware */ 3473 /* cannot issue a flush here because it hangs the hardware */
3130 msleep(20); 3474 msleep(20);
3131 3475
3476 /* Set Phy Config Counter to 50msec */
3477 if (hw->mac.type == e1000_pch2lan) {
3478 reg = er32(FEXTNVM3);
3479 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
3480 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
3481 ew32(FEXTNVM3, reg);
3482 }
3483
3132 if (!ret_val) 3484 if (!ret_val)
3133 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); 3485 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
3134 3486
@@ -3153,9 +3505,9 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3153 ew32(IMC, 0xffffffff); 3505 ew32(IMC, 0xffffffff);
3154 er32(ICR); 3506 er32(ICR);
3155 3507
3156 kab = er32(KABGTXD); 3508 reg = er32(KABGTXD);
3157 kab |= E1000_KABGTXD_BGSQLBIAS; 3509 reg |= E1000_KABGTXD_BGSQLBIAS;
3158 ew32(KABGTXD, kab); 3510 ew32(KABGTXD, reg);
3159 3511
3160 return 0; 3512 return 0;
3161} 3513}
@@ -3308,6 +3660,13 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3308 */ 3660 */
3309 reg = er32(RFCTL); 3661 reg = er32(RFCTL);
3310 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); 3662 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3663
3664 /*
3665 * Disable IPv6 extension header parsing because some malformed
3666 * IPv6 headers can hang the Rx.
3667 */
3668 if (hw->mac.type == e1000_ich8lan)
3669 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
3311 ew32(RFCTL, reg); 3670 ew32(RFCTL, reg);
3312} 3671}
3313 3672
@@ -3358,6 +3717,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3358 ew32(FCTTV, hw->fc.pause_time); 3717 ew32(FCTTV, hw->fc.pause_time);
3359 if ((hw->phy.type == e1000_phy_82578) || 3718 if ((hw->phy.type == e1000_phy_82578) ||
3360 (hw->phy.type == e1000_phy_82579) || 3719 (hw->phy.type == e1000_phy_82579) ||
3720 (hw->phy.type == e1000_phy_i217) ||
3361 (hw->phy.type == e1000_phy_82577)) { 3721 (hw->phy.type == e1000_phy_82577)) {
3362 ew32(FCRTV_PCH, hw->fc.refresh_time); 3722 ew32(FCRTV_PCH, hw->fc.refresh_time);
3363 3723
@@ -3421,6 +3781,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3421 break; 3781 break;
3422 case e1000_phy_82577: 3782 case e1000_phy_82577:
3423 case e1000_phy_82579: 3783 case e1000_phy_82579:
3784 case e1000_phy_i217:
3424 ret_val = e1000_copper_link_setup_82577(hw); 3785 ret_val = e1000_copper_link_setup_82577(hw);
3425 if (ret_val) 3786 if (ret_val)
3426 return ret_val; 3787 return ret_val;
@@ -3667,14 +4028,88 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3667 * the LPLU setting in the NVM or custom setting. For PCH and newer parts, 4028 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
3668 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also 4029 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
3669 * needs to be written. 4030 * needs to be written.
4031 * Parts that support (and are linked to a partner which support) EEE in
4032 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4033 * than 10Mbps w/o EEE.
3670 **/ 4034 **/
3671void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) 4035void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3672{ 4036{
4037 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3673 u32 phy_ctrl; 4038 u32 phy_ctrl;
3674 s32 ret_val; 4039 s32 ret_val;
3675 4040
3676 phy_ctrl = er32(PHY_CTRL); 4041 phy_ctrl = er32(PHY_CTRL);
3677 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; 4042 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4043 if (hw->phy.type == e1000_phy_i217) {
4044 u16 phy_reg;
4045
4046 ret_val = hw->phy.ops.acquire(hw);
4047 if (ret_val)
4048 goto out;
4049
4050 if (!dev_spec->eee_disable) {
4051 u16 eee_advert;
4052
4053 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
4054 I217_EEE_ADVERTISEMENT);
4055 if (ret_val)
4056 goto release;
4057 e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert);
4058
4059 /*
4060 * Disable LPLU if both link partners support 100BaseT
4061 * EEE and 100Full is advertised on both ends of the
4062 * link.
4063 */
4064 if ((eee_advert & I217_EEE_100_SUPPORTED) &&
4065 (dev_spec->eee_lp_ability &
4066 I217_EEE_100_SUPPORTED) &&
4067 (hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
4068 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4069 E1000_PHY_CTRL_NOND0A_LPLU);
4070 }
4071
4072 /*
4073 * For i217 Intel Rapid Start Technology support,
4074 * when the system is going into Sx and no manageability engine
4075 * is present, the driver must configure proxy to reset only on
4076 * power good. LPI (Low Power Idle) state must also reset only
4077 * on power good, as well as the MTA (Multicast table array).
4078 * The SMBus release must also be disabled on LCD reset.
4079 */
4080 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
4081
4082 /* Enable proxy to reset only on power good. */
4083 e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
4084 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4085 e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);
4086
4087 /*
4088 * Set bit enable LPI (EEE) to reset only on
4089 * power good.
4090 */
4091 e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
4092 phy_reg |= I217_SxCTRL_MASK;
4093 e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
4094
4095 /* Disable the SMB release on LCD reset. */
4096 e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
4097 phy_reg &= ~I217_MEMPWR;
4098 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
4099 }
4100
4101 /*
4102 * Enable MTA to reset for Intel Rapid Start Technology
4103 * Support
4104 */
4105 e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
4106 phy_reg |= I217_CGFREG_MASK;
4107 e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
4108
4109release:
4110 hw->phy.ops.release(hw);
4111 }
4112out:
3678 ew32(PHY_CTRL, phy_ctrl); 4113 ew32(PHY_CTRL, phy_ctrl);
3679 4114
3680 if (hw->mac.type == e1000_ich8lan) 4115 if (hw->mac.type == e1000_ich8lan)
@@ -3682,7 +4117,11 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3682 4117
3683 if (hw->mac.type >= e1000_pchlan) { 4118 if (hw->mac.type >= e1000_pchlan) {
3684 e1000_oem_bits_config_ich8lan(hw, false); 4119 e1000_oem_bits_config_ich8lan(hw, false);
3685 e1000_phy_hw_reset_ich8lan(hw); 4120
4121 /* Reset PHY to activate OEM bits on 82577/8 */
4122 if (hw->mac.type == e1000_pchlan)
4123 e1000e_phy_hw_reset_generic(hw);
4124
3686 ret_val = hw->phy.ops.acquire(hw); 4125 ret_val = hw->phy.ops.acquire(hw);
3687 if (ret_val) 4126 if (ret_val)
3688 return; 4127 return;
@@ -3699,44 +4138,61 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3699 * on which PHY resets are not blocked, if the PHY registers cannot be 4138 * on which PHY resets are not blocked, if the PHY registers cannot be
3700 * accessed properly by the s/w toggle the LANPHYPC value to power cycle 4139 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
3701 * the PHY. 4140 * the PHY.
4141 * On i217, setup Intel Rapid Start Technology.
3702 **/ 4142 **/
3703void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) 4143void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
3704{ 4144{
3705 u16 phy_id1, phy_id2;
3706 s32 ret_val; 4145 s32 ret_val;
3707 4146
3708 if ((hw->mac.type != e1000_pch2lan) || 4147 if (hw->mac.type < e1000_pch2lan)
3709 hw->phy.ops.check_reset_block(hw))
3710 return; 4148 return;
3711 4149
3712 ret_val = hw->phy.ops.acquire(hw); 4150 ret_val = e1000_init_phy_workarounds_pchlan(hw);
3713 if (ret_val) { 4151 if (ret_val) {
3714 e_dbg("Failed to acquire PHY semaphore in resume\n"); 4152 e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val);
3715 return; 4153 return;
3716 } 4154 }
3717 4155
3718 /* Test access to the PHY registers by reading the ID regs */ 4156 /*
3719 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1); 4157 * For i217 Intel Rapid Start Technology support when the system
3720 if (ret_val) 4158 * is transitioning from Sx and no manageability engine is present
3721 goto release; 4159 * configure SMBus to restore on reset, disable proxy, and enable
3722 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2); 4160 * the reset on MTA (Multicast table array).
3723 if (ret_val) 4161 */
3724 goto release; 4162 if (hw->phy.type == e1000_phy_i217) {
3725 4163 u16 phy_reg;
3726 if (hw->phy.id == ((u32)(phy_id1 << 16) |
3727 (u32)(phy_id2 & PHY_REVISION_MASK)))
3728 goto release;
3729 4164
3730 e1000_toggle_lanphypc_value_ich8lan(hw); 4165 ret_val = hw->phy.ops.acquire(hw);
4166 if (ret_val) {
4167 e_dbg("Failed to setup iRST\n");
4168 return;
4169 }
3731 4170
3732 hw->phy.ops.release(hw); 4171 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
3733 msleep(50); 4172 /*
3734 e1000_phy_hw_reset(hw); 4173 * Restore clear on SMB if no manageability engine
3735 msleep(50); 4174 * is present
3736 return; 4175 */
4176 ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
4177 if (ret_val)
4178 goto release;
4179 phy_reg |= I217_MEMPWR_MASK;
4180 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
3737 4181
4182 /* Disable Proxy */
4183 e1e_wphy_locked(hw, I217_PROXY_CTRL, 0);
4184 }
4185 /* Enable reset on MTA */
4186 ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
4187 if (ret_val)
4188 goto release;
4189 phy_reg &= ~I217_CGFREG_MASK;
4190 e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
3738release: 4191release:
3739 hw->phy.ops.release(hw); 4192 if (ret_val)
4193 e_dbg("Error %d in resume workarounds\n", ret_val);
4194 hw->phy.ops.release(hw);
4195 }
3740} 4196}
3741 4197
3742/** 4198/**
@@ -3916,7 +4372,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
3916 4372
3917 /* If EEPROM is not marked present, init the IGP 3 PHY manually */ 4373 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
3918 if (hw->mac.type <= e1000_ich9lan) { 4374 if (hw->mac.type <= e1000_ich9lan) {
3919 if (((er32(EECD) & E1000_EECD_PRES) == 0) && 4375 if (!(er32(EECD) & E1000_EECD_PRES) &&
3920 (hw->phy.type == e1000_phy_igp_3)) { 4376 (hw->phy.type == e1000_phy_igp_3)) {
3921 e1000e_phy_init_script_igp3(hw); 4377 e1000e_phy_init_script_igp3(hw);
3922 } 4378 }
@@ -3977,6 +4433,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
3977 /* Clear PHY statistics registers */ 4433 /* Clear PHY statistics registers */
3978 if ((hw->phy.type == e1000_phy_82578) || 4434 if ((hw->phy.type == e1000_phy_82578) ||
3979 (hw->phy.type == e1000_phy_82579) || 4435 (hw->phy.type == e1000_phy_82579) ||
4436 (hw->phy.type == e1000_phy_i217) ||
3980 (hw->phy.type == e1000_phy_82577)) { 4437 (hw->phy.type == e1000_phy_82577)) {
3981 ret_val = hw->phy.ops.acquire(hw); 4438 ret_val = hw->phy.ops.acquire(hw);
3982 if (ret_val) 4439 if (ret_val)
@@ -4021,6 +4478,7 @@ static const struct e1000_mac_operations ich8_mac_ops = {
4021 .setup_physical_interface= e1000_setup_copper_link_ich8lan, 4478 .setup_physical_interface= e1000_setup_copper_link_ich8lan,
4022 /* id_led_init dependent on mac type */ 4479 /* id_led_init dependent on mac type */
4023 .config_collision_dist = e1000e_config_collision_dist_generic, 4480 .config_collision_dist = e1000e_config_collision_dist_generic,
4481 .rar_set = e1000e_rar_set_generic,
4024}; 4482};
4025 4483
4026static const struct e1000_phy_operations ich8_phy_ops = { 4484static const struct e1000_phy_operations ich8_phy_ops = {
@@ -4135,3 +4593,22 @@ const struct e1000_info e1000_pch2_info = {
4135 .phy_ops = &ich8_phy_ops, 4593 .phy_ops = &ich8_phy_ops,
4136 .nvm_ops = &ich8_nvm_ops, 4594 .nvm_ops = &ich8_nvm_ops,
4137}; 4595};
4596
4597const struct e1000_info e1000_pch_lpt_info = {
4598 .mac = e1000_pch_lpt,
4599 .flags = FLAG_IS_ICH
4600 | FLAG_HAS_WOL
4601 | FLAG_HAS_CTRLEXT_ON_LOAD
4602 | FLAG_HAS_AMT
4603 | FLAG_HAS_FLASH
4604 | FLAG_HAS_JUMBO_FRAMES
4605 | FLAG_APME_IN_WUC,
4606 .flags2 = FLAG2_HAS_PHY_STATS
4607 | FLAG2_HAS_EEE,
4608 .pba = 26,
4609 .max_hw_frame_size = DEFAULT_JUMBO,
4610 .get_variants = e1000_get_variants_ich8lan,
4611 .mac_ops = &ich8_mac_ops,
4612 .phy_ops = &ich8_phy_ops,
4613 .nvm_ops = &ich8_nvm_ops,
4614};
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index decad98c1059..026e8b3ab52e 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -143,12 +143,12 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
143 /* Setup the receive address */ 143 /* Setup the receive address */
144 e_dbg("Programming MAC Address into RAR[0]\n"); 144 e_dbg("Programming MAC Address into RAR[0]\n");
145 145
146 e1000e_rar_set(hw, hw->mac.addr, 0); 146 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
147 147
148 /* Zero out the other (rar_entry_count - 1) receive addresses */ 148 /* Zero out the other (rar_entry_count - 1) receive addresses */
149 e_dbg("Clearing RAR[1-%u]\n", rar_count - 1); 149 e_dbg("Clearing RAR[1-%u]\n", rar_count - 1);
150 for (i = 1; i < rar_count; i++) 150 for (i = 1; i < rar_count; i++)
151 e1000e_rar_set(hw, mac_addr, i); 151 hw->mac.ops.rar_set(hw, mac_addr, i);
152} 152}
153 153
154/** 154/**
@@ -215,13 +215,13 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
215 * same as the normal permanent MAC address stored by the HW into the 215 * same as the normal permanent MAC address stored by the HW into the
216 * RAR. Do this by mapping this address into RAR0. 216 * RAR. Do this by mapping this address into RAR0.
217 */ 217 */
218 e1000e_rar_set(hw, alt_mac_addr, 0); 218 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
219 219
220 return 0; 220 return 0;
221} 221}
222 222
223/** 223/**
224 * e1000e_rar_set - Set receive address register 224 * e1000e_rar_set_generic - Set receive address register
225 * @hw: pointer to the HW structure 225 * @hw: pointer to the HW structure
226 * @addr: pointer to the receive address 226 * @addr: pointer to the receive address
227 * @index: receive address array register 227 * @index: receive address array register
@@ -229,7 +229,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
229 * Sets the receive address array register at index to the address passed 229 * Sets the receive address array register at index to the address passed
230 * in by addr. 230 * in by addr.
231 **/ 231 **/
232void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) 232void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
233{ 233{
234 u32 rar_low, rar_high; 234 u32 rar_low, rar_high;
235 235
@@ -681,7 +681,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
681 return ret_val; 681 return ret_val;
682 } 682 }
683 683
684 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) 684 if (!(nvm_data & NVM_WORD0F_PAUSE_MASK))
685 hw->fc.requested_mode = e1000_fc_none; 685 hw->fc.requested_mode = e1000_fc_none;
686 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR) 686 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
687 hw->fc.requested_mode = e1000_fc_tx_pause; 687 hw->fc.requested_mode = e1000_fc_tx_pause;
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index 473f8e711510..bacc950fc684 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -85,7 +85,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
85 85
86 /* Check that the host interface is enabled. */ 86 /* Check that the host interface is enabled. */
87 hicr = er32(HICR); 87 hicr = er32(HICR);
88 if ((hicr & E1000_HICR_EN) == 0) { 88 if (!(hicr & E1000_HICR_EN)) {
89 e_dbg("E1000_HOST_EN bit disabled.\n"); 89 e_dbg("E1000_HOST_EN bit disabled.\n");
90 return -E1000_ERR_HOST_INTERFACE_COMMAND; 90 return -E1000_ERR_HOST_INTERFACE_COMMAND;
91 } 91 }
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 19ab2154802c..a4b0435b00dc 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -56,7 +56,7 @@
56 56
57#define DRV_EXTRAVERSION "-k" 57#define DRV_EXTRAVERSION "-k"
58 58
59#define DRV_VERSION "1.9.5" DRV_EXTRAVERSION 59#define DRV_VERSION "2.0.0" DRV_EXTRAVERSION
60char e1000e_driver_name[] = "e1000e"; 60char e1000e_driver_name[] = "e1000e";
61const char e1000e_driver_version[] = DRV_VERSION; 61const char e1000e_driver_version[] = DRV_VERSION;
62 62
@@ -79,6 +79,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
79 [board_ich10lan] = &e1000_ich10_info, 79 [board_ich10lan] = &e1000_ich10_info,
80 [board_pchlan] = &e1000_pch_info, 80 [board_pchlan] = &e1000_pch_info,
81 [board_pch2lan] = &e1000_pch2_info, 81 [board_pch2lan] = &e1000_pch2_info,
82 [board_pch_lpt] = &e1000_pch_lpt_info,
82}; 83};
83 84
84struct e1000_reg_info { 85struct e1000_reg_info {
@@ -110,14 +111,14 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
110 111
111 /* Rx Registers */ 112 /* Rx Registers */
112 {E1000_RCTL, "RCTL"}, 113 {E1000_RCTL, "RCTL"},
113 {E1000_RDLEN, "RDLEN"}, 114 {E1000_RDLEN(0), "RDLEN"},
114 {E1000_RDH, "RDH"}, 115 {E1000_RDH(0), "RDH"},
115 {E1000_RDT, "RDT"}, 116 {E1000_RDT(0), "RDT"},
116 {E1000_RDTR, "RDTR"}, 117 {E1000_RDTR, "RDTR"},
117 {E1000_RXDCTL(0), "RXDCTL"}, 118 {E1000_RXDCTL(0), "RXDCTL"},
118 {E1000_ERT, "ERT"}, 119 {E1000_ERT, "ERT"},
119 {E1000_RDBAL, "RDBAL"}, 120 {E1000_RDBAL(0), "RDBAL"},
120 {E1000_RDBAH, "RDBAH"}, 121 {E1000_RDBAH(0), "RDBAH"},
121 {E1000_RDFH, "RDFH"}, 122 {E1000_RDFH, "RDFH"},
122 {E1000_RDFT, "RDFT"}, 123 {E1000_RDFT, "RDFT"},
123 {E1000_RDFHS, "RDFHS"}, 124 {E1000_RDFHS, "RDFHS"},
@@ -126,11 +127,11 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
126 127
127 /* Tx Registers */ 128 /* Tx Registers */
128 {E1000_TCTL, "TCTL"}, 129 {E1000_TCTL, "TCTL"},
129 {E1000_TDBAL, "TDBAL"}, 130 {E1000_TDBAL(0), "TDBAL"},
130 {E1000_TDBAH, "TDBAH"}, 131 {E1000_TDBAH(0), "TDBAH"},
131 {E1000_TDLEN, "TDLEN"}, 132 {E1000_TDLEN(0), "TDLEN"},
132 {E1000_TDH, "TDH"}, 133 {E1000_TDH(0), "TDH"},
133 {E1000_TDT, "TDT"}, 134 {E1000_TDT(0), "TDT"},
134 {E1000_TIDV, "TIDV"}, 135 {E1000_TIDV, "TIDV"},
135 {E1000_TXDCTL(0), "TXDCTL"}, 136 {E1000_TXDCTL(0), "TXDCTL"},
136 {E1000_TADV, "TADV"}, 137 {E1000_TADV, "TADV"},
@@ -538,43 +539,15 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
538 adapter->hw_csum_good++; 539 adapter->hw_csum_good++;
539} 540}
540 541
541/**
542 * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa()
543 * @hw: pointer to the HW structure
544 * @tail: address of tail descriptor register
545 * @i: value to write to tail descriptor register
546 *
547 * When updating the tail register, the ME could be accessing Host CSR
548 * registers at the same time. Normally, this is handled in h/w by an
549 * arbiter but on some parts there is a bug that acknowledges Host accesses
550 * later than it should which could result in the descriptor register to
551 * have an incorrect value. Workaround this by checking the FWSM register
552 * which has bit 24 set while ME is accessing Host CSR registers, wait
553 * if it is set and try again a number of times.
554 **/
555static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, void __iomem *tail,
556 unsigned int i)
557{
558 unsigned int j = 0;
559
560 while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) &&
561 (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI))
562 udelay(50);
563
564 writel(i, tail);
565
566 if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail)))
567 return E1000_ERR_SWFW_SYNC;
568
569 return 0;
570}
571
572static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) 542static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
573{ 543{
574 struct e1000_adapter *adapter = rx_ring->adapter; 544 struct e1000_adapter *adapter = rx_ring->adapter;
575 struct e1000_hw *hw = &adapter->hw; 545 struct e1000_hw *hw = &adapter->hw;
546 s32 ret_val = __ew32_prepare(hw);
576 547
577 if (e1000e_update_tail_wa(hw, rx_ring->tail, i)) { 548 writel(i, rx_ring->tail);
549
550 if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
578 u32 rctl = er32(RCTL); 551 u32 rctl = er32(RCTL);
579 ew32(RCTL, rctl & ~E1000_RCTL_EN); 552 ew32(RCTL, rctl & ~E1000_RCTL_EN);
580 e_err("ME firmware caused invalid RDT - resetting\n"); 553 e_err("ME firmware caused invalid RDT - resetting\n");
@@ -586,8 +559,11 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
586{ 559{
587 struct e1000_adapter *adapter = tx_ring->adapter; 560 struct e1000_adapter *adapter = tx_ring->adapter;
588 struct e1000_hw *hw = &adapter->hw; 561 struct e1000_hw *hw = &adapter->hw;
562 s32 ret_val = __ew32_prepare(hw);
563
564 writel(i, tx_ring->tail);
589 565
590 if (e1000e_update_tail_wa(hw, tx_ring->tail, i)) { 566 if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
591 u32 tctl = er32(TCTL); 567 u32 tctl = er32(TCTL);
592 ew32(TCTL, tctl & ~E1000_TCTL_EN); 568 ew32(TCTL, tctl & ~E1000_TCTL_EN);
593 e_err("ME firmware caused invalid TDT - resetting\n"); 569 e_err("ME firmware caused invalid TDT - resetting\n");
@@ -1053,7 +1029,8 @@ static void e1000_print_hw_hang(struct work_struct *work)
1053 1029
1054 if (!adapter->tx_hang_recheck && 1030 if (!adapter->tx_hang_recheck &&
1055 (adapter->flags2 & FLAG2_DMA_BURST)) { 1031 (adapter->flags2 & FLAG2_DMA_BURST)) {
1056 /* May be block on write-back, flush and detect again 1032 /*
1033 * May be block on write-back, flush and detect again
1057 * flush pending descriptor writebacks to memory 1034 * flush pending descriptor writebacks to memory
1058 */ 1035 */
1059 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 1036 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
@@ -1108,6 +1085,10 @@ static void e1000_print_hw_hang(struct work_struct *work)
1108 phy_1000t_status, 1085 phy_1000t_status,
1109 phy_ext_status, 1086 phy_ext_status,
1110 pci_status); 1087 pci_status);
1088
1089 /* Suggest workaround for known h/w issue */
1090 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
1091 e_err("Try turning off Tx pause (flow control) via ethtool\n");
1111} 1092}
1112 1093
1113/** 1094/**
@@ -1645,7 +1626,10 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
1645 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 1626 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1646 1627
1647 writel(0, rx_ring->head); 1628 writel(0, rx_ring->head);
1648 writel(0, rx_ring->tail); 1629 if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
1630 e1000e_update_rdt_wa(rx_ring, 0);
1631 else
1632 writel(0, rx_ring->tail);
1649} 1633}
1650 1634
1651static void e1000e_downshift_workaround(struct work_struct *work) 1635static void e1000e_downshift_workaround(struct work_struct *work)
@@ -2318,7 +2302,10 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
2318 tx_ring->next_to_clean = 0; 2302 tx_ring->next_to_clean = 0;
2319 2303
2320 writel(0, tx_ring->head); 2304 writel(0, tx_ring->head);
2321 writel(0, tx_ring->tail); 2305 if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
2306 e1000e_update_tdt_wa(tx_ring, 0);
2307 else
2308 writel(0, tx_ring->tail);
2322} 2309}
2323 2310
2324/** 2311/**
@@ -2530,33 +2517,31 @@ err:
2530} 2517}
2531 2518
2532/** 2519/**
2533 * e1000_clean - NAPI Rx polling callback 2520 * e1000e_poll - NAPI Rx polling callback
2534 * @napi: struct associated with this polling callback 2521 * @napi: struct associated with this polling callback
2535 * @budget: amount of packets driver is allowed to process this poll 2522 * @weight: number of packets driver is allowed to process this poll
2536 **/ 2523 **/
2537static int e1000_clean(struct napi_struct *napi, int budget) 2524static int e1000e_poll(struct napi_struct *napi, int weight)
2538{ 2525{
2539 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); 2526 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
2527 napi);
2540 struct e1000_hw *hw = &adapter->hw; 2528 struct e1000_hw *hw = &adapter->hw;
2541 struct net_device *poll_dev = adapter->netdev; 2529 struct net_device *poll_dev = adapter->netdev;
2542 int tx_cleaned = 1, work_done = 0; 2530 int tx_cleaned = 1, work_done = 0;
2543 2531
2544 adapter = netdev_priv(poll_dev); 2532 adapter = netdev_priv(poll_dev);
2545 2533
2546 if (adapter->msix_entries && 2534 if (!adapter->msix_entries ||
2547 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) 2535 (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2548 goto clean_rx; 2536 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
2549
2550 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
2551 2537
2552clean_rx: 2538 adapter->clean_rx(adapter->rx_ring, &work_done, weight);
2553 adapter->clean_rx(adapter->rx_ring, &work_done, budget);
2554 2539
2555 if (!tx_cleaned) 2540 if (!tx_cleaned)
2556 work_done = budget; 2541 work_done = weight;
2557 2542
2558 /* If budget not fully consumed, exit the polling mode */ 2543 /* If weight not fully consumed, exit the polling mode */
2559 if (work_done < budget) { 2544 if (work_done < weight) {
2560 if (adapter->itr_setting & 3) 2545 if (adapter->itr_setting & 3)
2561 e1000_set_itr(adapter); 2546 e1000_set_itr(adapter);
2562 napi_complete(napi); 2547 napi_complete(napi);
@@ -2800,13 +2785,13 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2800 /* Setup the HW Tx Head and Tail descriptor pointers */ 2785 /* Setup the HW Tx Head and Tail descriptor pointers */
2801 tdba = tx_ring->dma; 2786 tdba = tx_ring->dma;
2802 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); 2787 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2803 ew32(TDBAL, (tdba & DMA_BIT_MASK(32))); 2788 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
2804 ew32(TDBAH, (tdba >> 32)); 2789 ew32(TDBAH(0), (tdba >> 32));
2805 ew32(TDLEN, tdlen); 2790 ew32(TDLEN(0), tdlen);
2806 ew32(TDH, 0); 2791 ew32(TDH(0), 0);
2807 ew32(TDT, 0); 2792 ew32(TDT(0), 0);
2808 tx_ring->head = adapter->hw.hw_addr + E1000_TDH; 2793 tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
2809 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT; 2794 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
2810 2795
2811 /* Set the Tx Interrupt Delay register */ 2796 /* Set the Tx Interrupt Delay register */
2812 ew32(TIDV, adapter->tx_int_delay); 2797 ew32(TIDV, adapter->tx_int_delay);
@@ -2879,8 +2864,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2879 u32 rctl, rfctl; 2864 u32 rctl, rfctl;
2880 u32 pages = 0; 2865 u32 pages = 0;
2881 2866
2882 /* Workaround Si errata on 82579 - configure jumbo frame flow */ 2867 /* Workaround Si errata on PCHx - configure jumbo frame flow */
2883 if (hw->mac.type == e1000_pch2lan) { 2868 if (hw->mac.type >= e1000_pch2lan) {
2884 s32 ret_val; 2869 s32 ret_val;
2885 2870
2886 if (adapter->netdev->mtu > ETH_DATA_LEN) 2871 if (adapter->netdev->mtu > ETH_DATA_LEN)
@@ -2955,6 +2940,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2955 /* Enable Extended Status in all Receive Descriptors */ 2940 /* Enable Extended Status in all Receive Descriptors */
2956 rfctl = er32(RFCTL); 2941 rfctl = er32(RFCTL);
2957 rfctl |= E1000_RFCTL_EXTEN; 2942 rfctl |= E1000_RFCTL_EXTEN;
2943 ew32(RFCTL, rfctl);
2958 2944
2959 /* 2945 /*
2960 * 82571 and greater support packet-split where the protocol 2946 * 82571 and greater support packet-split where the protocol
@@ -2980,13 +2966,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2980 if (adapter->rx_ps_pages) { 2966 if (adapter->rx_ps_pages) {
2981 u32 psrctl = 0; 2967 u32 psrctl = 0;
2982 2968
2983 /*
2984 * disable packet split support for IPv6 extension headers,
2985 * because some malformed IPv6 headers can hang the Rx
2986 */
2987 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2988 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2989
2990 /* Enable Packet split descriptors */ 2969 /* Enable Packet split descriptors */
2991 rctl |= E1000_RCTL_DTYP_PS; 2970 rctl |= E1000_RCTL_DTYP_PS;
2992 2971
@@ -3025,7 +3004,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
3025 */ 3004 */
3026 } 3005 }
3027 3006
3028 ew32(RFCTL, rfctl);
3029 ew32(RCTL, rctl); 3007 ew32(RCTL, rctl);
3030 /* just started the receive unit, no need to restart */ 3008 /* just started the receive unit, no need to restart */
3031 adapter->flags &= ~FLAG_RX_RESTART_NOW; 3009 adapter->flags &= ~FLAG_RX_RESTART_NOW;
@@ -3110,13 +3088,13 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
3110 * the Base and Length of the Rx Descriptor Ring 3088 * the Base and Length of the Rx Descriptor Ring
3111 */ 3089 */
3112 rdba = rx_ring->dma; 3090 rdba = rx_ring->dma;
3113 ew32(RDBAL, (rdba & DMA_BIT_MASK(32))); 3091 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
3114 ew32(RDBAH, (rdba >> 32)); 3092 ew32(RDBAH(0), (rdba >> 32));
3115 ew32(RDLEN, rdlen); 3093 ew32(RDLEN(0), rdlen);
3116 ew32(RDH, 0); 3094 ew32(RDH(0), 0);
3117 ew32(RDT, 0); 3095 ew32(RDT(0), 0);
3118 rx_ring->head = adapter->hw.hw_addr + E1000_RDH; 3096 rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
3119 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT; 3097 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
3120 3098
3121 /* Enable Receive Checksum Offload for TCP and UDP */ 3099 /* Enable Receive Checksum Offload for TCP and UDP */
3122 rxcsum = er32(RXCSUM); 3100 rxcsum = er32(RXCSUM);
@@ -3229,7 +3207,7 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
3229 netdev_for_each_uc_addr(ha, netdev) { 3207 netdev_for_each_uc_addr(ha, netdev) {
3230 if (!rar_entries) 3208 if (!rar_entries)
3231 break; 3209 break;
3232 e1000e_rar_set(hw, ha->addr, rar_entries--); 3210 hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
3233 count++; 3211 count++;
3234 } 3212 }
3235 } 3213 }
@@ -3510,6 +3488,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3510 fc->refresh_time = 0x1000; 3488 fc->refresh_time = 0x1000;
3511 break; 3489 break;
3512 case e1000_pch2lan: 3490 case e1000_pch2lan:
3491 case e1000_pch_lpt:
3513 fc->high_water = 0x05C20; 3492 fc->high_water = 0x05C20;
3514 fc->low_water = 0x05048; 3493 fc->low_water = 0x05048;
3515 fc->pause_time = 0x0650; 3494 fc->pause_time = 0x0650;
@@ -3799,7 +3778,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3799 /* fire an unusual interrupt on the test handler */ 3778 /* fire an unusual interrupt on the test handler */
3800 ew32(ICS, E1000_ICS_RXSEQ); 3779 ew32(ICS, E1000_ICS_RXSEQ);
3801 e1e_flush(); 3780 e1e_flush();
3802 msleep(50); 3781 msleep(100);
3803 3782
3804 e1000_irq_disable(adapter); 3783 e1000_irq_disable(adapter);
3805 3784
@@ -4038,6 +4017,7 @@ static int e1000_close(struct net_device *netdev)
4038static int e1000_set_mac(struct net_device *netdev, void *p) 4017static int e1000_set_mac(struct net_device *netdev, void *p)
4039{ 4018{
4040 struct e1000_adapter *adapter = netdev_priv(netdev); 4019 struct e1000_adapter *adapter = netdev_priv(netdev);
4020 struct e1000_hw *hw = &adapter->hw;
4041 struct sockaddr *addr = p; 4021 struct sockaddr *addr = p;
4042 4022
4043 if (!is_valid_ether_addr(addr->sa_data)) 4023 if (!is_valid_ether_addr(addr->sa_data))
@@ -4046,7 +4026,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
4046 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 4026 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4047 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); 4027 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
4048 4028
4049 e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 4029 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
4050 4030
4051 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { 4031 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
4052 /* activate the work around */ 4032 /* activate the work around */
@@ -4060,9 +4040,8 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
4060 * are dropped. Eventually the LAA will be in RAR[0] and 4040 * are dropped. Eventually the LAA will be in RAR[0] and
4061 * RAR[14] 4041 * RAR[14]
4062 */ 4042 */
4063 e1000e_rar_set(&adapter->hw, 4043 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr,
4064 adapter->hw.mac.addr, 4044 adapter->hw.mac.rar_entry_count - 1);
4065 adapter->hw.mac.rar_entry_count - 1);
4066 } 4045 }
4067 4046
4068 return 0; 4047 return 0;
@@ -4641,7 +4620,7 @@ link_up:
4641 * reset from the other port. Set the appropriate LAA in RAR[0] 4620 * reset from the other port. Set the appropriate LAA in RAR[0]
4642 */ 4621 */
4643 if (e1000e_get_laa_state_82571(hw)) 4622 if (e1000e_get_laa_state_82571(hw))
4644 e1000e_rar_set(hw, adapter->hw.mac.addr, 0); 4623 hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0);
4645 4624
4646 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) 4625 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
4647 e1000e_check_82574_phy_workaround(adapter); 4626 e1000e_check_82574_phy_workaround(adapter);
@@ -5151,6 +5130,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5151 /* if count is 0 then mapping error has occurred */ 5130 /* if count is 0 then mapping error has occurred */
5152 count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss); 5131 count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss);
5153 if (count) { 5132 if (count) {
5133 skb_tx_timestamp(skb);
5134
5154 netdev_sent_queue(netdev, skb->len); 5135 netdev_sent_queue(netdev, skb->len);
5155 e1000_tx_queue(tx_ring, tx_flags, count); 5136 e1000_tx_queue(tx_ring, tx_flags, count);
5156 /* Make sure there is space in the ring for the next send. */ 5137 /* Make sure there is space in the ring for the next send. */
@@ -5285,22 +5266,14 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5285 return -EINVAL; 5266 return -EINVAL;
5286 } 5267 }
5287 5268
5288 /* Jumbo frame workaround on 82579 requires CRC be stripped */ 5269 /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
5289 if ((adapter->hw.mac.type == e1000_pch2lan) && 5270 if ((adapter->hw.mac.type >= e1000_pch2lan) &&
5290 !(adapter->flags2 & FLAG2_CRC_STRIPPING) && 5271 !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
5291 (new_mtu > ETH_DATA_LEN)) { 5272 (new_mtu > ETH_DATA_LEN)) {
5292 e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n"); 5273 e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n");
5293 return -EINVAL; 5274 return -EINVAL;
5294 } 5275 }
5295 5276
5296 /* 82573 Errata 17 */
5297 if (((adapter->hw.mac.type == e1000_82573) ||
5298 (adapter->hw.mac.type == e1000_82574)) &&
5299 (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
5300 adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
5301 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
5302 }
5303
5304 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 5277 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
5305 usleep_range(1000, 2000); 5278 usleep_range(1000, 2000);
5306 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ 5279 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
@@ -5694,7 +5667,7 @@ static int __e1000_resume(struct pci_dev *pdev)
5694 return err; 5667 return err;
5695 } 5668 }
5696 5669
5697 if (hw->mac.type == e1000_pch2lan) 5670 if (hw->mac.type >= e1000_pch2lan)
5698 e1000_resume_workarounds_pchlan(&adapter->hw); 5671 e1000_resume_workarounds_pchlan(&adapter->hw);
5699 5672
5700 e1000e_power_up_phy(adapter); 5673 e1000e_power_up_phy(adapter);
@@ -6226,7 +6199,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6226 netdev->netdev_ops = &e1000e_netdev_ops; 6199 netdev->netdev_ops = &e1000e_netdev_ops;
6227 e1000e_set_ethtool_ops(netdev); 6200 e1000e_set_ethtool_ops(netdev);
6228 netdev->watchdog_timeo = 5 * HZ; 6201 netdev->watchdog_timeo = 5 * HZ;
6229 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); 6202 netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
6230 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); 6203 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
6231 6204
6232 netdev->mem_start = mmio_start; 6205 netdev->mem_start = mmio_start;
@@ -6593,6 +6566,9 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6593 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, 6566 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
6594 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, 6567 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
6595 6568
6569 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
6570 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
6571
6596 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ 6572 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
6597}; 6573};
6598MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 6574MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index ff796e42c3eb..55cc1565bc2f 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -106,7 +106,7 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
106/* 106/*
107 * Interrupt Throttle Rate (interrupts/sec) 107 * Interrupt Throttle Rate (interrupts/sec)
108 * 108 *
109 * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) 109 * Valid Range: 100-100000 or one of: 0=off, 1=dynamic, 3=dynamic conservative
110 */ 110 */
111E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); 111E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
112#define DEFAULT_ITR 3 112#define DEFAULT_ITR 3
@@ -166,8 +166,8 @@ E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lea
166 * 166 *
167 * Default Value: 1 (enabled) 167 * Default Value: 1 (enabled)
168 */ 168 */
169E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \ 169E1000_PARAM(CrcStripping,
170 "the CRC"); 170 "Enable CRC Stripping, disable if your BMC needs the CRC");
171 171
172struct e1000_option { 172struct e1000_option {
173 enum { enable_option, range_option, list_option } type; 173 enum { enable_option, range_option, list_option } type;
@@ -344,53 +344,60 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
344 344
345 if (num_InterruptThrottleRate > bd) { 345 if (num_InterruptThrottleRate > bd) {
346 adapter->itr = InterruptThrottleRate[bd]; 346 adapter->itr = InterruptThrottleRate[bd];
347 switch (adapter->itr) { 347
348 case 0: 348 /*
349 e_info("%s turned off\n", opt.name); 349 * Make sure a message is printed for non-special
350 break; 350 * values. And in case of an invalid option, display
351 case 1: 351 * warning, use default and go through itr/itr_setting
352 e_info("%s set to dynamic mode\n", opt.name); 352 * adjustment logic below
353 adapter->itr_setting = adapter->itr; 353 */
354 adapter->itr = 20000; 354 if ((adapter->itr > 4) &&
355 break; 355 e1000_validate_option(&adapter->itr, &opt, adapter))
356 case 3: 356 adapter->itr = opt.def;
357 e_info("%s set to dynamic conservative mode\n",
358 opt.name);
359 adapter->itr_setting = adapter->itr;
360 adapter->itr = 20000;
361 break;
362 case 4:
363 e_info("%s set to simplified (2000-8000 ints) "
364 "mode\n", opt.name);
365 adapter->itr_setting = 4;
366 break;
367 default:
368 /*
369 * Save the setting, because the dynamic bits
370 * change itr.
371 */
372 if (e1000_validate_option(&adapter->itr, &opt,
373 adapter) &&
374 (adapter->itr == 3)) {
375 /*
376 * In case of invalid user value,
377 * default to conservative mode.
378 */
379 adapter->itr_setting = adapter->itr;
380 adapter->itr = 20000;
381 } else {
382 /*
383 * Clear the lower two bits because
384 * they are used as control.
385 */
386 adapter->itr_setting =
387 adapter->itr & ~3;
388 }
389 break;
390 }
391 } else { 357 } else {
392 adapter->itr_setting = opt.def; 358 /*
359 * If no option specified, use default value and go
360 * through the logic below to adjust itr/itr_setting
361 */
362 adapter->itr = opt.def;
363
364 /*
365 * Make sure a message is printed for non-special
366 * default values
367 */
368 if (adapter->itr > 4)
369 e_info("%s set to default %d\n", opt.name,
370 adapter->itr);
371 }
372
373 adapter->itr_setting = adapter->itr;
374 switch (adapter->itr) {
375 case 0:
376 e_info("%s turned off\n", opt.name);
377 break;
378 case 1:
379 e_info("%s set to dynamic mode\n", opt.name);
380 adapter->itr = 20000;
381 break;
382 case 3:
383 e_info("%s set to dynamic conservative mode\n",
384 opt.name);
393 adapter->itr = 20000; 385 adapter->itr = 20000;
386 break;
387 case 4:
388 e_info("%s set to simplified (2000-8000 ints) mode\n",
389 opt.name);
390 break;
391 default:
392 /*
393 * Save the setting, because the dynamic bits
394 * change itr.
395 *
396 * Clear the lower two bits because
397 * they are used as control.
398 */
399 adapter->itr_setting &= ~3;
400 break;
394 } 401 }
395 } 402 }
396 { /* Interrupt Mode */ 403 { /* Interrupt Mode */
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 35b45578c604..0334d013bc3c 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -639,6 +639,45 @@ s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
639} 639}
640 640
641/** 641/**
642 * e1000_set_master_slave_mode - Setup PHY for Master/slave mode
643 * @hw: pointer to the HW structure
644 *
645 * Sets up Master/slave mode
646 **/
647static s32 e1000_set_master_slave_mode(struct e1000_hw *hw)
648{
649 s32 ret_val;
650 u16 phy_data;
651
652 /* Resolve Master/Slave mode */
653 ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &phy_data);
654 if (ret_val)
655 return ret_val;
656
657 /* load defaults for future use */
658 hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ?
659 ((phy_data & CR_1000T_MS_VALUE) ?
660 e1000_ms_force_master : e1000_ms_force_slave) : e1000_ms_auto;
661
662 switch (hw->phy.ms_type) {
663 case e1000_ms_force_master:
664 phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
665 break;
666 case e1000_ms_force_slave:
667 phy_data |= CR_1000T_MS_ENABLE;
668 phy_data &= ~(CR_1000T_MS_VALUE);
669 break;
670 case e1000_ms_auto:
671 phy_data &= ~CR_1000T_MS_ENABLE;
672 /* fall-through */
673 default:
674 break;
675 }
676
677 return e1e_wphy(hw, PHY_1000T_CTRL, phy_data);
678}
679
680/**
642 * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link 681 * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
643 * @hw: pointer to the HW structure 682 * @hw: pointer to the HW structure
644 * 683 *
@@ -659,7 +698,11 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
659 /* Enable downshift */ 698 /* Enable downshift */
660 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; 699 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
661 700
662 return e1e_wphy(hw, I82577_CFG_REG, phy_data); 701 ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
702 if (ret_val)
703 return ret_val;
704
705 return e1000_set_master_slave_mode(hw);
663} 706}
664 707
665/** 708/**
@@ -718,12 +761,28 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
718 * 1 - Enabled 761 * 1 - Enabled
719 */ 762 */
720 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; 763 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
721 if (phy->disable_polarity_correction == 1) 764 if (phy->disable_polarity_correction)
722 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; 765 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
723 766
724 /* Enable downshift on BM (disabled by default) */ 767 /* Enable downshift on BM (disabled by default) */
725 if (phy->type == e1000_phy_bm) 768 if (phy->type == e1000_phy_bm) {
769 /* For 82574/82583, first disable then enable downshift */
770 if (phy->id == BME1000_E_PHY_ID_R2) {
771 phy_data &= ~BME1000_PSCR_ENABLE_DOWNSHIFT;
772 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL,
773 phy_data);
774 if (ret_val)
775 return ret_val;
776 /* Commit the changes. */
777 ret_val = e1000e_commit_phy(hw);
778 if (ret_val) {
779 e_dbg("Error committing the PHY changes\n");
780 return ret_val;
781 }
782 }
783
726 phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; 784 phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT;
785 }
727 786
728 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 787 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
729 if (ret_val) 788 if (ret_val)
@@ -879,31 +938,7 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
879 return ret_val; 938 return ret_val;
880 } 939 }
881 940
882 ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); 941 ret_val = e1000_set_master_slave_mode(hw);
883 if (ret_val)
884 return ret_val;
885
886 /* load defaults for future use */
887 phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
888 ((data & CR_1000T_MS_VALUE) ?
889 e1000_ms_force_master :
890 e1000_ms_force_slave) :
891 e1000_ms_auto;
892
893 switch (phy->ms_type) {
894 case e1000_ms_force_master:
895 data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
896 break;
897 case e1000_ms_force_slave:
898 data |= CR_1000T_MS_ENABLE;
899 data &= ~(CR_1000T_MS_VALUE);
900 break;
901 case e1000_ms_auto:
902 data &= ~CR_1000T_MS_ENABLE;
903 default:
904 break;
905 }
906 ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data);
907 } 942 }
908 943
909 return ret_val; 944 return ret_val;
@@ -1090,7 +1125,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1090 * If autoneg_advertised is zero, we assume it was not defaulted 1125 * If autoneg_advertised is zero, we assume it was not defaulted
1091 * by the calling code so we set to advertise full capability. 1126 * by the calling code so we set to advertise full capability.
1092 */ 1127 */
1093 if (phy->autoneg_advertised == 0) 1128 if (!phy->autoneg_advertised)
1094 phy->autoneg_advertised = phy->autoneg_mask; 1129 phy->autoneg_advertised = phy->autoneg_mask;
1095 1130
1096 e_dbg("Reconfiguring auto-neg advertisement params\n"); 1131 e_dbg("Reconfiguring auto-neg advertisement params\n");
@@ -1596,7 +1631,7 @@ s32 e1000e_check_downshift(struct e1000_hw *hw)
1596 ret_val = e1e_rphy(hw, offset, &phy_data); 1631 ret_val = e1e_rphy(hw, offset, &phy_data);
1597 1632
1598 if (!ret_val) 1633 if (!ret_val)
1599 phy->speed_downgraded = (phy_data & mask); 1634 phy->speed_downgraded = !!(phy_data & mask);
1600 1635
1601 return ret_val; 1636 return ret_val;
1602} 1637}
@@ -1925,8 +1960,8 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
1925 if (ret_val) 1960 if (ret_val)
1926 return ret_val; 1961 return ret_val;
1927 1962
1928 phy->polarity_correction = (phy_data & 1963 phy->polarity_correction = !!(phy_data &
1929 M88E1000_PSCR_POLARITY_REVERSAL); 1964 M88E1000_PSCR_POLARITY_REVERSAL);
1930 1965
1931 ret_val = e1000_check_polarity_m88(hw); 1966 ret_val = e1000_check_polarity_m88(hw);
1932 if (ret_val) 1967 if (ret_val)
@@ -1936,7 +1971,7 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
1936 if (ret_val) 1971 if (ret_val)
1937 return ret_val; 1972 return ret_val;
1938 1973
1939 phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX); 1974 phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX);
1940 1975
1941 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { 1976 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
1942 ret_val = e1000_get_cable_length(hw); 1977 ret_val = e1000_get_cable_length(hw);
@@ -1999,7 +2034,7 @@ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw)
1999 if (ret_val) 2034 if (ret_val)
2000 return ret_val; 2035 return ret_val;
2001 2036
2002 phy->is_mdix = (data & IGP01E1000_PSSR_MDIX); 2037 phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX);
2003 2038
2004 if ((data & IGP01E1000_PSSR_SPEED_MASK) == 2039 if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
2005 IGP01E1000_PSSR_SPEED_1000MBPS) { 2040 IGP01E1000_PSSR_SPEED_1000MBPS) {
@@ -2052,8 +2087,7 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
2052 ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); 2087 ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data);
2053 if (ret_val) 2088 if (ret_val)
2054 return ret_val; 2089 return ret_val;
2055 phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE) 2090 phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE);
2056 ? false : true;
2057 2091
2058 if (phy->polarity_correction) { 2092 if (phy->polarity_correction) {
2059 ret_val = e1000_check_polarity_ife(hw); 2093 ret_val = e1000_check_polarity_ife(hw);
@@ -2070,7 +2104,7 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
2070 if (ret_val) 2104 if (ret_val)
2071 return ret_val; 2105 return ret_val;
2072 2106
2073 phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false; 2107 phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS);
2074 2108
2075 /* The following parameters are undefined for 10/100 operation. */ 2109 /* The following parameters are undefined for 10/100 operation. */
2076 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; 2110 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
@@ -2320,6 +2354,9 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
2320 case I82579_E_PHY_ID: 2354 case I82579_E_PHY_ID:
2321 phy_type = e1000_phy_82579; 2355 phy_type = e1000_phy_82579;
2322 break; 2356 break;
2357 case I217_E_PHY_ID:
2358 phy_type = e1000_phy_i217;
2359 break;
2323 default: 2360 default:
2324 phy_type = e1000_phy_unknown; 2361 phy_type = e1000_phy_unknown;
2325 break; 2362 break;
@@ -2979,7 +3016,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2979 if ((hw->phy.type == e1000_phy_82578) && 3016 if ((hw->phy.type == e1000_phy_82578) &&
2980 (hw->phy.revision >= 1) && 3017 (hw->phy.revision >= 1) &&
2981 (hw->phy.addr == 2) && 3018 (hw->phy.addr == 2) &&
2982 ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) { 3019 !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
2983 u16 data2 = 0x7EFF; 3020 u16 data2 = 0x7EFF;
2984 ret_val = e1000_access_phy_debug_regs_hv(hw, 3021 ret_val = e1000_access_phy_debug_regs_hv(hw,
2985 (1 << 6) | 0x3, 3022 (1 << 6) | 0x3,
@@ -3265,7 +3302,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
3265 if (ret_val) 3302 if (ret_val)
3266 return ret_val; 3303 return ret_val;
3267 3304
3268 phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false; 3305 phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX);
3269 3306
3270 if ((data & I82577_PHY_STATUS2_SPEED_MASK) == 3307 if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
3271 I82577_PHY_STATUS2_SPEED_1000MBPS) { 3308 I82577_PHY_STATUS2_SPEED_1000MBPS) {
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 6565c463185c..97c197fd4a8e 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -33,5 +33,7 @@
33obj-$(CONFIG_IGB) += igb.o 33obj-$(CONFIG_IGB) += igb.o
34 34
35igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \ 35igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
36 e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o 36 e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
37 e1000_i210.o
37 38
39igb-$(CONFIG_IGB_PTP) += igb_ptp.o
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 08bdc33715ee..e65083958421 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -36,6 +36,7 @@
36 36
37#include "e1000_mac.h" 37#include "e1000_mac.h"
38#include "e1000_82575.h" 38#include "e1000_82575.h"
39#include "e1000_i210.h"
39 40
40static s32 igb_get_invariants_82575(struct e1000_hw *); 41static s32 igb_get_invariants_82575(struct e1000_hw *);
41static s32 igb_acquire_phy_82575(struct e1000_hw *); 42static s32 igb_acquire_phy_82575(struct e1000_hw *);
@@ -52,6 +53,8 @@ static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16);
52static s32 igb_reset_hw_82575(struct e1000_hw *); 53static s32 igb_reset_hw_82575(struct e1000_hw *);
53static s32 igb_reset_hw_82580(struct e1000_hw *); 54static s32 igb_reset_hw_82580(struct e1000_hw *);
54static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); 55static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
56static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool);
57static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool);
55static s32 igb_setup_copper_link_82575(struct e1000_hw *); 58static s32 igb_setup_copper_link_82575(struct e1000_hw *);
56static s32 igb_setup_serdes_link_82575(struct e1000_hw *); 59static s32 igb_setup_serdes_link_82575(struct e1000_hw *);
57static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); 60static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
@@ -96,6 +99,8 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
96 break; 99 break;
97 case e1000_82580: 100 case e1000_82580:
98 case e1000_i350: 101 case e1000_i350:
102 case e1000_i210:
103 case e1000_i211:
99 reg = rd32(E1000_MDICNFG); 104 reg = rd32(E1000_MDICNFG);
100 ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); 105 ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
101 break; 106 break;
@@ -150,6 +155,17 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
150 case E1000_DEV_ID_I350_SGMII: 155 case E1000_DEV_ID_I350_SGMII:
151 mac->type = e1000_i350; 156 mac->type = e1000_i350;
152 break; 157 break;
158 case E1000_DEV_ID_I210_COPPER:
159 case E1000_DEV_ID_I210_COPPER_OEM1:
160 case E1000_DEV_ID_I210_COPPER_IT:
161 case E1000_DEV_ID_I210_FIBER:
162 case E1000_DEV_ID_I210_SERDES:
163 case E1000_DEV_ID_I210_SGMII:
164 mac->type = e1000_i210;
165 break;
166 case E1000_DEV_ID_I211_COPPER:
167 mac->type = e1000_i211;
168 break;
153 default: 169 default:
154 return -E1000_ERR_MAC_INIT; 170 return -E1000_ERR_MAC_INIT;
155 break; 171 break;
@@ -182,26 +198,44 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
182 /* Set mta register count */ 198 /* Set mta register count */
183 mac->mta_reg_count = 128; 199 mac->mta_reg_count = 128;
184 /* Set rar entry count */ 200 /* Set rar entry count */
185 mac->rar_entry_count = E1000_RAR_ENTRIES_82575; 201 switch (mac->type) {
186 if (mac->type == e1000_82576) 202 case e1000_82576:
187 mac->rar_entry_count = E1000_RAR_ENTRIES_82576; 203 mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
188 if (mac->type == e1000_82580) 204 break;
205 case e1000_82580:
189 mac->rar_entry_count = E1000_RAR_ENTRIES_82580; 206 mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
190 if (mac->type == e1000_i350) 207 break;
208 case e1000_i350:
209 case e1000_i210:
210 case e1000_i211:
191 mac->rar_entry_count = E1000_RAR_ENTRIES_I350; 211 mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
212 break;
213 default:
214 mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
215 break;
216 }
192 /* reset */ 217 /* reset */
193 if (mac->type >= e1000_82580) 218 if (mac->type >= e1000_82580)
194 mac->ops.reset_hw = igb_reset_hw_82580; 219 mac->ops.reset_hw = igb_reset_hw_82580;
195 else 220 else
196 mac->ops.reset_hw = igb_reset_hw_82575; 221 mac->ops.reset_hw = igb_reset_hw_82575;
222
223 if (mac->type >= e1000_i210) {
224 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
225 mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
226 } else {
227 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
228 mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
229 }
230
197 /* Set if part includes ASF firmware */ 231 /* Set if part includes ASF firmware */
198 mac->asf_firmware_present = true; 232 mac->asf_firmware_present = true;
199 /* Set if manageability features are enabled. */ 233 /* Set if manageability features are enabled. */
200 mac->arc_subsystem_valid = 234 mac->arc_subsystem_valid =
201 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) 235 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
202 ? true : false; 236 ? true : false;
203 /* enable EEE on i350 parts */ 237 /* enable EEE on i350 parts and later parts */
204 if (mac->type == e1000_i350) 238 if (mac->type >= e1000_i350)
205 dev_spec->eee_disable = false; 239 dev_spec->eee_disable = false;
206 else 240 else
207 dev_spec->eee_disable = true; 241 dev_spec->eee_disable = true;
@@ -213,26 +247,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
213 247
214 /* NVM initialization */ 248 /* NVM initialization */
215 eecd = rd32(E1000_EECD); 249 eecd = rd32(E1000_EECD);
216
217 nvm->opcode_bits = 8;
218 nvm->delay_usec = 1;
219 switch (nvm->override) {
220 case e1000_nvm_override_spi_large:
221 nvm->page_size = 32;
222 nvm->address_bits = 16;
223 break;
224 case e1000_nvm_override_spi_small:
225 nvm->page_size = 8;
226 nvm->address_bits = 8;
227 break;
228 default:
229 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
230 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
231 break;
232 }
233
234 nvm->type = e1000_nvm_eeprom_spi;
235
236 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 250 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
237 E1000_EECD_SIZE_EX_SHIFT); 251 E1000_EECD_SIZE_EX_SHIFT);
238 252
@@ -242,6 +256,33 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
242 */ 256 */
243 size += NVM_WORD_SIZE_BASE_SHIFT; 257 size += NVM_WORD_SIZE_BASE_SHIFT;
244 258
259 nvm->word_size = 1 << size;
260 if (hw->mac.type < e1000_i210) {
261 nvm->opcode_bits = 8;
262 nvm->delay_usec = 1;
263 switch (nvm->override) {
264 case e1000_nvm_override_spi_large:
265 nvm->page_size = 32;
266 nvm->address_bits = 16;
267 break;
268 case e1000_nvm_override_spi_small:
269 nvm->page_size = 8;
270 nvm->address_bits = 8;
271 break;
272 default:
273 nvm->page_size = eecd
274 & E1000_EECD_ADDR_BITS ? 32 : 8;
275 nvm->address_bits = eecd
276 & E1000_EECD_ADDR_BITS ? 16 : 8;
277 break;
278 }
279 if (nvm->word_size == (1 << 15))
280 nvm->page_size = 128;
281
282 nvm->type = e1000_nvm_eeprom_spi;
283 } else
284 nvm->type = e1000_nvm_flash_hw;
285
245 /* 286 /*
246 * Check for invalid size 287 * Check for invalid size
247 */ 288 */
@@ -249,32 +290,60 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
249 pr_notice("The NVM size is not valid, defaulting to 32K\n"); 290 pr_notice("The NVM size is not valid, defaulting to 32K\n");
250 size = 15; 291 size = 15;
251 } 292 }
252 nvm->word_size = 1 << size;
253 if (nvm->word_size == (1 << 15))
254 nvm->page_size = 128;
255 293
256 /* NVM Function Pointers */ 294 /* NVM Function Pointers */
257 nvm->ops.acquire = igb_acquire_nvm_82575;
258 if (nvm->word_size < (1 << 15))
259 nvm->ops.read = igb_read_nvm_eerd;
260 else
261 nvm->ops.read = igb_read_nvm_spi;
262
263 nvm->ops.release = igb_release_nvm_82575;
264 switch (hw->mac.type) { 295 switch (hw->mac.type) {
265 case e1000_82580: 296 case e1000_82580:
266 nvm->ops.validate = igb_validate_nvm_checksum_82580; 297 nvm->ops.validate = igb_validate_nvm_checksum_82580;
267 nvm->ops.update = igb_update_nvm_checksum_82580; 298 nvm->ops.update = igb_update_nvm_checksum_82580;
299 nvm->ops.acquire = igb_acquire_nvm_82575;
300 nvm->ops.release = igb_release_nvm_82575;
301 if (nvm->word_size < (1 << 15))
302 nvm->ops.read = igb_read_nvm_eerd;
303 else
304 nvm->ops.read = igb_read_nvm_spi;
305 nvm->ops.write = igb_write_nvm_spi;
268 break; 306 break;
269 case e1000_i350: 307 case e1000_i350:
270 nvm->ops.validate = igb_validate_nvm_checksum_i350; 308 nvm->ops.validate = igb_validate_nvm_checksum_i350;
271 nvm->ops.update = igb_update_nvm_checksum_i350; 309 nvm->ops.update = igb_update_nvm_checksum_i350;
310 nvm->ops.acquire = igb_acquire_nvm_82575;
311 nvm->ops.release = igb_release_nvm_82575;
312 if (nvm->word_size < (1 << 15))
313 nvm->ops.read = igb_read_nvm_eerd;
314 else
315 nvm->ops.read = igb_read_nvm_spi;
316 nvm->ops.write = igb_write_nvm_spi;
317 break;
318 case e1000_i210:
319 nvm->ops.validate = igb_validate_nvm_checksum_i210;
320 nvm->ops.update = igb_update_nvm_checksum_i210;
321 nvm->ops.acquire = igb_acquire_nvm_i210;
322 nvm->ops.release = igb_release_nvm_i210;
323 nvm->ops.read = igb_read_nvm_srrd_i210;
324 nvm->ops.valid_led_default = igb_valid_led_default_i210;
325 break;
326 case e1000_i211:
327 nvm->ops.acquire = igb_acquire_nvm_i210;
328 nvm->ops.release = igb_release_nvm_i210;
329 nvm->ops.read = igb_read_nvm_i211;
330 nvm->ops.valid_led_default = igb_valid_led_default_i210;
331 nvm->ops.validate = NULL;
332 nvm->ops.update = NULL;
333 nvm->ops.write = NULL;
272 break; 334 break;
273 default: 335 default:
274 nvm->ops.validate = igb_validate_nvm_checksum; 336 nvm->ops.validate = igb_validate_nvm_checksum;
275 nvm->ops.update = igb_update_nvm_checksum; 337 nvm->ops.update = igb_update_nvm_checksum;
338 nvm->ops.acquire = igb_acquire_nvm_82575;
339 nvm->ops.release = igb_release_nvm_82575;
340 if (nvm->word_size < (1 << 15))
341 nvm->ops.read = igb_read_nvm_eerd;
342 else
343 nvm->ops.read = igb_read_nvm_spi;
344 nvm->ops.write = igb_write_nvm_spi;
345 break;
276 } 346 }
277 nvm->ops.write = igb_write_nvm_spi;
278 347
279 /* if part supports SR-IOV then initialize mailbox parameters */ 348 /* if part supports SR-IOV then initialize mailbox parameters */
280 switch (mac->type) { 349 switch (mac->type) {
@@ -312,9 +381,13 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
312 if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { 381 if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
313 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; 382 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
314 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; 383 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
315 } else if (hw->mac.type >= e1000_82580) { 384 } else if ((hw->mac.type == e1000_82580)
385 || (hw->mac.type == e1000_i350)) {
316 phy->ops.read_reg = igb_read_phy_reg_82580; 386 phy->ops.read_reg = igb_read_phy_reg_82580;
317 phy->ops.write_reg = igb_write_phy_reg_82580; 387 phy->ops.write_reg = igb_write_phy_reg_82580;
388 } else if (hw->phy.type >= e1000_phy_i210) {
389 phy->ops.read_reg = igb_read_phy_reg_gs40g;
390 phy->ops.write_reg = igb_write_phy_reg_gs40g;
318 } else { 391 } else {
319 phy->ops.read_reg = igb_read_phy_reg_igp; 392 phy->ops.read_reg = igb_read_phy_reg_igp;
320 phy->ops.write_reg = igb_write_phy_reg_igp; 393 phy->ops.write_reg = igb_write_phy_reg_igp;
@@ -343,6 +416,14 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
343 else 416 else
344 phy->ops.get_cable_length = igb_get_cable_length_m88; 417 phy->ops.get_cable_length = igb_get_cable_length_m88;
345 418
419 if (phy->id == I210_I_PHY_ID) {
420 phy->ops.get_cable_length =
421 igb_get_cable_length_m88_gen2;
422 phy->ops.set_d0_lplu_state =
423 igb_set_d0_lplu_state_82580;
424 phy->ops.set_d3_lplu_state =
425 igb_set_d3_lplu_state_82580;
426 }
346 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; 427 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
347 break; 428 break;
348 case IGP03E1000_E_PHY_ID: 429 case IGP03E1000_E_PHY_ID:
@@ -359,6 +440,17 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
359 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580; 440 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
360 phy->ops.get_cable_length = igb_get_cable_length_82580; 441 phy->ops.get_cable_length = igb_get_cable_length_82580;
361 phy->ops.get_phy_info = igb_get_phy_info_82580; 442 phy->ops.get_phy_info = igb_get_phy_info_82580;
443 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
444 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
445 break;
446 case I210_I_PHY_ID:
447 phy->type = e1000_phy_i210;
448 phy->ops.get_phy_info = igb_get_phy_info_m88;
449 phy->ops.check_polarity = igb_check_polarity_m88;
450 phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
451 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
452 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
453 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
362 break; 454 break;
363 default: 455 default:
364 return -E1000_ERR_PHY; 456 return -E1000_ERR_PHY;
@@ -385,7 +477,7 @@ static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
385 else if (hw->bus.func == E1000_FUNC_3) 477 else if (hw->bus.func == E1000_FUNC_3)
386 mask = E1000_SWFW_PHY3_SM; 478 mask = E1000_SWFW_PHY3_SM;
387 479
388 return igb_acquire_swfw_sync_82575(hw, mask); 480 return hw->mac.ops.acquire_swfw_sync(hw, mask);
389} 481}
390 482
391/** 483/**
@@ -406,7 +498,7 @@ static void igb_release_phy_82575(struct e1000_hw *hw)
406 else if (hw->bus.func == E1000_FUNC_3) 498 else if (hw->bus.func == E1000_FUNC_3)
407 mask = E1000_SWFW_PHY3_SM; 499 mask = E1000_SWFW_PHY3_SM;
408 500
409 igb_release_swfw_sync_82575(hw, mask); 501 hw->mac.ops.release_swfw_sync(hw, mask);
410} 502}
411 503
412/** 504/**
@@ -510,6 +602,8 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
510 break; 602 break;
511 case e1000_82580: 603 case e1000_82580:
512 case e1000_i350: 604 case e1000_i350:
605 case e1000_i210:
606 case e1000_i211:
513 mdic = rd32(E1000_MDICNFG); 607 mdic = rd32(E1000_MDICNFG);
514 mdic &= E1000_MDICNFG_PHY_MASK; 608 mdic &= E1000_MDICNFG_PHY_MASK;
515 phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; 609 phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
@@ -674,6 +768,96 @@ out:
674} 768}
675 769
676/** 770/**
771 * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
772 * @hw: pointer to the HW structure
773 * @active: true to enable LPLU, false to disable
774 *
775 * Sets the LPLU D0 state according to the active flag. When
776 * activating LPLU this function also disables smart speed
777 * and vice versa. LPLU will not be activated unless the
778 * device autonegotiation advertisement meets standards of
779 * either 10 or 10/100 or 10/100/1000 at all duplexes.
780 * This is a function pointer entry point only called by
781 * PHY setup routines.
782 **/
783static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
784{
785 struct e1000_phy_info *phy = &hw->phy;
786 s32 ret_val = 0;
787 u16 data;
788
789 data = rd32(E1000_82580_PHY_POWER_MGMT);
790
791 if (active) {
792 data |= E1000_82580_PM_D0_LPLU;
793
794 /* When LPLU is enabled, we should disable SmartSpeed */
795 data &= ~E1000_82580_PM_SPD;
796 } else {
797 data &= ~E1000_82580_PM_D0_LPLU;
798
799 /*
800 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
801 * during Dx states where the power conservation is most
802 * important. During driver activity we should enable
803 * SmartSpeed, so performance is maintained.
804 */
805 if (phy->smart_speed == e1000_smart_speed_on)
806 data |= E1000_82580_PM_SPD;
807 else if (phy->smart_speed == e1000_smart_speed_off)
808 data &= ~E1000_82580_PM_SPD; }
809
810 wr32(E1000_82580_PHY_POWER_MGMT, data);
811 return ret_val;
812}
813
814/**
815 * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3
816 * @hw: pointer to the HW structure
817 * @active: boolean used to enable/disable lplu
818 *
819 * Success returns 0, Failure returns 1
820 *
821 * The low power link up (lplu) state is set to the power management level D3
822 * and SmartSpeed is disabled when active is true, else clear lplu for D3
823 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
824 * is used during Dx states where the power conservation is most important.
825 * During driver activity, SmartSpeed should be enabled so performance is
826 * maintained.
827 **/
828s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
829{
830 struct e1000_phy_info *phy = &hw->phy;
831 s32 ret_val = 0;
832 u16 data;
833
834 data = rd32(E1000_82580_PHY_POWER_MGMT);
835
836 if (!active) {
837 data &= ~E1000_82580_PM_D3_LPLU;
838 /*
839 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
840 * during Dx states where the power conservation is most
841 * important. During driver activity we should enable
842 * SmartSpeed, so performance is maintained.
843 */
844 if (phy->smart_speed == e1000_smart_speed_on)
845 data |= E1000_82580_PM_SPD;
846 else if (phy->smart_speed == e1000_smart_speed_off)
847 data &= ~E1000_82580_PM_SPD;
848 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
849 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
850 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
851 data |= E1000_82580_PM_D3_LPLU;
852 /* When LPLU is enabled, we should disable SmartSpeed */
853 data &= ~E1000_82580_PM_SPD;
854 }
855
856 wr32(E1000_82580_PHY_POWER_MGMT, data);
857 return ret_val;
858}
859
860/**
677 * igb_acquire_nvm_82575 - Request for access to EEPROM 861 * igb_acquire_nvm_82575 - Request for access to EEPROM
678 * @hw: pointer to the HW structure 862 * @hw: pointer to the HW structure
679 * 863 *
@@ -686,14 +870,14 @@ static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
686{ 870{
687 s32 ret_val; 871 s32 ret_val;
688 872
689 ret_val = igb_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); 873 ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
690 if (ret_val) 874 if (ret_val)
691 goto out; 875 goto out;
692 876
693 ret_val = igb_acquire_nvm(hw); 877 ret_val = igb_acquire_nvm(hw);
694 878
695 if (ret_val) 879 if (ret_val)
696 igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); 880 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
697 881
698out: 882out:
699 return ret_val; 883 return ret_val;
@@ -709,7 +893,7 @@ out:
709static void igb_release_nvm_82575(struct e1000_hw *hw) 893static void igb_release_nvm_82575(struct e1000_hw *hw)
710{ 894{
711 igb_release_nvm(hw); 895 igb_release_nvm(hw);
712 igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); 896 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
713} 897}
714 898
715/** 899/**
@@ -1080,7 +1264,6 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
1080 * is no link. 1264 * is no link.
1081 */ 1265 */
1082 igb_clear_hw_cntrs_82575(hw); 1266 igb_clear_hw_cntrs_82575(hw);
1083
1084 return ret_val; 1267 return ret_val;
1085} 1268}
1086 1269
@@ -1117,6 +1300,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1117 } 1300 }
1118 } 1301 }
1119 switch (hw->phy.type) { 1302 switch (hw->phy.type) {
1303 case e1000_phy_i210:
1120 case e1000_phy_m88: 1304 case e1000_phy_m88:
1121 if (hw->phy.id == I347AT4_E_PHY_ID || 1305 if (hw->phy.id == I347AT4_E_PHY_ID ||
1122 hw->phy.id == M88E1112_E_PHY_ID) 1306 hw->phy.id == M88E1112_E_PHY_ID)
@@ -1757,7 +1941,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
1757 1941
1758 /* Determine whether or not a global dev reset is requested */ 1942 /* Determine whether or not a global dev reset is requested */
1759 if (global_device_reset && 1943 if (global_device_reset &&
1760 igb_acquire_swfw_sync_82575(hw, swmbsw_mask)) 1944 hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask))
1761 global_device_reset = false; 1945 global_device_reset = false;
1762 1946
1763 if (global_device_reset && 1947 if (global_device_reset &&
@@ -1803,7 +1987,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
1803 1987
1804 /* Release semaphore */ 1988 /* Release semaphore */
1805 if (global_device_reset) 1989 if (global_device_reset)
1806 igb_release_swfw_sync_82575(hw, swmbsw_mask); 1990 hw->mac.ops.release_swfw_sync(hw, swmbsw_mask);
1807 1991
1808 return ret_val; 1992 return ret_val;
1809} 1993}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index b927d79ab536..e85c453f5428 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -55,10 +55,11 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
55#define E1000_SRRCTL_DROP_EN 0x80000000 55#define E1000_SRRCTL_DROP_EN 0x80000000
56#define E1000_SRRCTL_TIMESTAMP 0x40000000 56#define E1000_SRRCTL_TIMESTAMP 0x40000000
57 57
58
58#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 59#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
59#define E1000_MRQC_ENABLE_VMDQ 0x00000003 60#define E1000_MRQC_ENABLE_VMDQ 0x00000003
60#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
61#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 61#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
62#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
62#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 63#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
63#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 64#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
64 65
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 89eb1f85b9fa..ec7e4fe3e3ee 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -301,6 +301,8 @@
301 * transactions */ 301 * transactions */
302#define E1000_DMACR_DMAC_LX_SHIFT 28 302#define E1000_DMACR_DMAC_LX_SHIFT 28
303#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ 303#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
304/* DMA Coalescing BMC-to-OS Watchdog Enable */
305#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000
304 306
305#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit 307#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit
306 * Threshold */ 308 * Threshold */
@@ -458,6 +460,7 @@
458#define E1000_ERR_INVALID_ARGUMENT 16 460#define E1000_ERR_INVALID_ARGUMENT 16
459#define E1000_ERR_NO_SPACE 17 461#define E1000_ERR_NO_SPACE 17
460#define E1000_ERR_NVM_PBA_SECTION 18 462#define E1000_ERR_NVM_PBA_SECTION 18
463#define E1000_ERR_INVM_VALUE_NOT_FOUND 19
461 464
462/* Loop limit on how long we wait for auto-negotiation to complete */ 465/* Loop limit on how long we wait for auto-negotiation to complete */
463#define COPPER_LINK_UP_LIMIT 10 466#define COPPER_LINK_UP_LIMIT 10
@@ -595,6 +598,25 @@
595#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ 598#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
596#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ 599#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
597#define E1000_EECD_SIZE_EX_SHIFT 11 600#define E1000_EECD_SIZE_EX_SHIFT 11
601#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
602#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
603#define E1000_FLUDONE_ATTEMPTS 20000
604#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
605#define E1000_I210_FIFO_SEL_RX 0x00
606#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i))
607#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0)
608#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06
609#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01
610#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
611#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
612#define E1000_FLUDONE_ATTEMPTS 20000
613#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
614#define E1000_I210_FIFO_SEL_RX 0x00
615#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i))
616#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0)
617#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06
618#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01
619
598 620
599/* Offset to data in NVM read/write registers */ 621/* Offset to data in NVM read/write registers */
600#define E1000_NVM_RW_REG_DATA 16 622#define E1000_NVM_RW_REG_DATA 16
@@ -613,6 +635,16 @@
613#define NVM_CHECKSUM_REG 0x003F 635#define NVM_CHECKSUM_REG 0x003F
614#define NVM_COMPATIBILITY_REG_3 0x0003 636#define NVM_COMPATIBILITY_REG_3 0x0003
615#define NVM_COMPATIBILITY_BIT_MASK 0x8000 637#define NVM_COMPATIBILITY_BIT_MASK 0x8000
638#define NVM_MAC_ADDR 0x0000
639#define NVM_SUB_DEV_ID 0x000B
640#define NVM_SUB_VEN_ID 0x000C
641#define NVM_DEV_ID 0x000D
642#define NVM_VEN_ID 0x000E
643#define NVM_INIT_CTRL_2 0x000F
644#define NVM_INIT_CTRL_4 0x0013
645#define NVM_LED_1_CFG 0x001C
646#define NVM_LED_0_2_CFG 0x001F
647
616 648
617#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ 649#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
618#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ 650#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
@@ -639,6 +671,7 @@
639 671
640#define NVM_PBA_OFFSET_0 8 672#define NVM_PBA_OFFSET_0 8
641#define NVM_PBA_OFFSET_1 9 673#define NVM_PBA_OFFSET_1 9
674#define NVM_RESERVED_WORD 0xFFFF
642#define NVM_PBA_PTR_GUARD 0xFAFA 675#define NVM_PBA_PTR_GUARD 0xFAFA
643#define NVM_WORD_SIZE_BASE_SHIFT 6 676#define NVM_WORD_SIZE_BASE_SHIFT 6
644 677
@@ -696,6 +729,7 @@
696#define I82580_I_PHY_ID 0x015403A0 729#define I82580_I_PHY_ID 0x015403A0
697#define I350_I_PHY_ID 0x015403B0 730#define I350_I_PHY_ID 0x015403B0
698#define M88_VENDOR 0x0141 731#define M88_VENDOR 0x0141
732#define I210_I_PHY_ID 0x01410C00
699 733
700/* M88E1000 Specific Registers */ 734/* M88E1000 Specific Registers */
701#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ 735#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
@@ -815,6 +849,7 @@
815#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */ 849#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */
816#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */ 850#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */
817#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */ 851#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */
852#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */
818#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ 853#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
819 854
820/* SerDes Control */ 855/* SerDes Control */
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index f67cbd3fa307..c2a51dcda550 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -63,6 +63,13 @@ struct e1000_hw;
63#define E1000_DEV_ID_I350_FIBER 0x1522 63#define E1000_DEV_ID_I350_FIBER 0x1522
64#define E1000_DEV_ID_I350_SERDES 0x1523 64#define E1000_DEV_ID_I350_SERDES 0x1523
65#define E1000_DEV_ID_I350_SGMII 0x1524 65#define E1000_DEV_ID_I350_SGMII 0x1524
66#define E1000_DEV_ID_I210_COPPER 0x1533
67#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534
68#define E1000_DEV_ID_I210_COPPER_IT 0x1535
69#define E1000_DEV_ID_I210_FIBER 0x1536
70#define E1000_DEV_ID_I210_SERDES 0x1537
71#define E1000_DEV_ID_I210_SGMII 0x1538
72#define E1000_DEV_ID_I211_COPPER 0x1539
66 73
67#define E1000_REVISION_2 2 74#define E1000_REVISION_2 2
68#define E1000_REVISION_4 4 75#define E1000_REVISION_4 4
@@ -83,6 +90,8 @@ enum e1000_mac_type {
83 e1000_82576, 90 e1000_82576,
84 e1000_82580, 91 e1000_82580,
85 e1000_i350, 92 e1000_i350,
93 e1000_i210,
94 e1000_i211,
86 e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ 95 e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
87}; 96};
88 97
@@ -117,6 +126,7 @@ enum e1000_phy_type {
117 e1000_phy_igp_3, 126 e1000_phy_igp_3,
118 e1000_phy_ife, 127 e1000_phy_ife,
119 e1000_phy_82580, 128 e1000_phy_82580,
129 e1000_phy_i210,
120}; 130};
121 131
122enum e1000_bus_type { 132enum e1000_bus_type {
@@ -313,6 +323,9 @@ struct e1000_mac_operations {
313 void (*rar_set)(struct e1000_hw *, u8 *, u32); 323 void (*rar_set)(struct e1000_hw *, u8 *, u32);
314 s32 (*read_mac_addr)(struct e1000_hw *); 324 s32 (*read_mac_addr)(struct e1000_hw *);
315 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); 325 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
326 s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
327 void (*release_swfw_sync)(struct e1000_hw *, u16);
328
316}; 329};
317 330
318struct e1000_phy_operations { 331struct e1000_phy_operations {
@@ -338,6 +351,7 @@ struct e1000_nvm_operations {
338 s32 (*write)(struct e1000_hw *, u16, u16, u16 *); 351 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
339 s32 (*update)(struct e1000_hw *); 352 s32 (*update)(struct e1000_hw *);
340 s32 (*validate)(struct e1000_hw *); 353 s32 (*validate)(struct e1000_hw *);
354 s32 (*valid_led_default)(struct e1000_hw *, u16 *);
341}; 355};
342 356
343struct e1000_info { 357struct e1000_info {
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
new file mode 100644
index 000000000000..77a5f939bc74
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -0,0 +1,603 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26******************************************************************************/
27
28/* e1000_i210
29 * e1000_i211
30 */
31
32#include <linux/types.h>
33#include <linux/if_ether.h>
34
35#include "e1000_hw.h"
36#include "e1000_i210.h"
37
38static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw);
39static void igb_put_hw_semaphore_i210(struct e1000_hw *hw);
40static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
41 u16 *data);
42static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw);
43
44/**
45 * igb_acquire_nvm_i210 - Request for access to EEPROM
46 * @hw: pointer to the HW structure
47 *
48 * Acquire the necessary semaphores for exclusive access to the EEPROM.
49 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
50 * Return successful if access grant bit set, else clear the request for
51 * EEPROM access and return -E1000_ERR_NVM (-1).
52 **/
53s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
54{
55 return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
56}
57
58/**
59 * igb_release_nvm_i210 - Release exclusive access to EEPROM
60 * @hw: pointer to the HW structure
61 *
62 * Stop any current commands to the EEPROM and clear the EEPROM request bit,
63 * then release the semaphores acquired.
64 **/
65void igb_release_nvm_i210(struct e1000_hw *hw)
66{
67 igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
68}
69
70/**
71 * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
72 * @hw: pointer to the HW structure
73 * @mask: specifies which semaphore to acquire
74 *
75 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
76 * will also specify which port we're acquiring the lock for.
77 **/
78s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
79{
80 u32 swfw_sync;
81 u32 swmask = mask;
82 u32 fwmask = mask << 16;
83 s32 ret_val = E1000_SUCCESS;
84 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
85
86 while (i < timeout) {
87 if (igb_get_hw_semaphore_i210(hw)) {
88 ret_val = -E1000_ERR_SWFW_SYNC;
89 goto out;
90 }
91
92 swfw_sync = rd32(E1000_SW_FW_SYNC);
93 if (!(swfw_sync & fwmask))
94 break;
95
96 /*
97 * Firmware currently using resource (fwmask)
98 */
99 igb_put_hw_semaphore_i210(hw);
100 mdelay(5);
101 i++;
102 }
103
104 if (i == timeout) {
105 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
106 ret_val = -E1000_ERR_SWFW_SYNC;
107 goto out;
108 }
109
110 swfw_sync |= swmask;
111 wr32(E1000_SW_FW_SYNC, swfw_sync);
112
113 igb_put_hw_semaphore_i210(hw);
114out:
115 return ret_val;
116}
117
118/**
119 * igb_release_swfw_sync_i210 - Release SW/FW semaphore
120 * @hw: pointer to the HW structure
121 * @mask: specifies which semaphore to acquire
122 *
123 * Release the SW/FW semaphore used to access the PHY or NVM. The mask
124 * will also specify which port we're releasing the lock for.
125 **/
126void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
127{
128 u32 swfw_sync;
129
130 while (igb_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
131 ; /* Empty */
132
133 swfw_sync = rd32(E1000_SW_FW_SYNC);
134 swfw_sync &= ~mask;
135 wr32(E1000_SW_FW_SYNC, swfw_sync);
136
137 igb_put_hw_semaphore_i210(hw);
138}
139
140/**
141 * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
142 * @hw: pointer to the HW structure
143 *
144 * Acquire the HW semaphore to access the PHY or NVM
145 **/
146static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
147{
148 u32 swsm;
149 s32 ret_val = E1000_SUCCESS;
150 s32 timeout = hw->nvm.word_size + 1;
151 s32 i = 0;
152
153 /* Get the FW semaphore. */
154 for (i = 0; i < timeout; i++) {
155 swsm = rd32(E1000_SWSM);
156 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
157
158 /* Semaphore acquired if bit latched */
159 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
160 break;
161
162 udelay(50);
163 }
164
165 if (i == timeout) {
166 /* Release semaphores */
167 igb_put_hw_semaphore(hw);
168 hw_dbg("Driver can't access the NVM\n");
169 ret_val = -E1000_ERR_NVM;
170 goto out;
171 }
172
173out:
174 return ret_val;
175}
176
177/**
178 * igb_put_hw_semaphore_i210 - Release hardware semaphore
179 * @hw: pointer to the HW structure
180 *
181 * Release hardware semaphore used to access the PHY or NVM
182 **/
183static void igb_put_hw_semaphore_i210(struct e1000_hw *hw)
184{
185 u32 swsm;
186
187 swsm = rd32(E1000_SWSM);
188
189 swsm &= ~E1000_SWSM_SWESMBI;
190
191 wr32(E1000_SWSM, swsm);
192}
193
194/**
195 * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
196 * @hw: pointer to the HW structure
197 * @offset: offset of word in the Shadow Ram to read
198 * @words: number of words to read
199 * @data: word read from the Shadow Ram
200 *
201 * Reads a 16 bit word from the Shadow Ram using the EERD register.
202 * Uses necessary synchronization semaphores.
203 **/
204s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
205 u16 *data)
206{
207 s32 status = E1000_SUCCESS;
208 u16 i, count;
209
210 /* We cannot hold synchronization semaphores for too long,
211 * because of forceful takeover procedure. However it is more efficient
212 * to read in bursts than synchronizing access for each word. */
213 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
214 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
215 E1000_EERD_EEWR_MAX_COUNT : (words - i);
216 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
217 status = igb_read_nvm_eerd(hw, offset, count,
218 data + i);
219 hw->nvm.ops.release(hw);
220 } else {
221 status = E1000_ERR_SWFW_SYNC;
222 }
223
224 if (status != E1000_SUCCESS)
225 break;
226 }
227
228 return status;
229}
230
231/**
232 * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
233 * @hw: pointer to the HW structure
234 * @offset: offset within the Shadow RAM to be written to
235 * @words: number of words to write
236 * @data: 16 bit word(s) to be written to the Shadow RAM
237 *
238 * Writes data to Shadow RAM at offset using EEWR register.
239 *
240 * If e1000_update_nvm_checksum is not called after this function , the
241 * data will not be committed to FLASH and also Shadow RAM will most likely
242 * contain an invalid checksum.
243 *
244 * If error code is returned, data and Shadow RAM may be inconsistent - buffer
245 * partially written.
246 **/
247s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
248 u16 *data)
249{
250 s32 status = E1000_SUCCESS;
251 u16 i, count;
252
253 /* We cannot hold synchronization semaphores for too long,
254 * because of forceful takeover procedure. However it is more efficient
255 * to write in bursts than synchronizing access for each word. */
256 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
257 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
258 E1000_EERD_EEWR_MAX_COUNT : (words - i);
259 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
260 status = igb_write_nvm_srwr(hw, offset, count,
261 data + i);
262 hw->nvm.ops.release(hw);
263 } else {
264 status = E1000_ERR_SWFW_SYNC;
265 }
266
267 if (status != E1000_SUCCESS)
268 break;
269 }
270
271 return status;
272}
273
274/**
275 * igb_write_nvm_srwr - Write to Shadow Ram using EEWR
276 * @hw: pointer to the HW structure
277 * @offset: offset within the Shadow Ram to be written to
278 * @words: number of words to write
279 * @data: 16 bit word(s) to be written to the Shadow Ram
280 *
281 * Writes data to Shadow Ram at offset using EEWR register.
282 *
283 * If igb_update_nvm_checksum is not called after this function , the
284 * Shadow Ram will most likely contain an invalid checksum.
285 **/
286static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
287 u16 *data)
288{
289 struct e1000_nvm_info *nvm = &hw->nvm;
290 u32 i, k, eewr = 0;
291 u32 attempts = 100000;
292 s32 ret_val = E1000_SUCCESS;
293
294 /*
295 * A check for invalid values: offset too large, too many words,
296 * too many words for the offset, and not enough words.
297 */
298 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
299 (words == 0)) {
300 hw_dbg("nvm parameter(s) out of bounds\n");
301 ret_val = -E1000_ERR_NVM;
302 goto out;
303 }
304
305 for (i = 0; i < words; i++) {
306 eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
307 (data[i] << E1000_NVM_RW_REG_DATA) |
308 E1000_NVM_RW_REG_START;
309
310 wr32(E1000_SRWR, eewr);
311
312 for (k = 0; k < attempts; k++) {
313 if (E1000_NVM_RW_REG_DONE &
314 rd32(E1000_SRWR)) {
315 ret_val = E1000_SUCCESS;
316 break;
317 }
318 udelay(5);
319 }
320
321 if (ret_val != E1000_SUCCESS) {
322 hw_dbg("Shadow RAM write EEWR timed out\n");
323 break;
324 }
325 }
326
327out:
328 return ret_val;
329}
330
331/**
332 * igb_read_nvm_i211 - Read NVM wrapper function for I211
333 * @hw: pointer to the HW structure
334 * @address: the word address (aka eeprom offset) to read
335 * @data: pointer to the data read
336 *
337 * Wrapper function to return data formerly found in the NVM.
338 **/
339s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
340 u16 *data)
341{
342 s32 ret_val = E1000_SUCCESS;
343
344 /* Only the MAC addr is required to be present in the iNVM */
345 switch (offset) {
346 case NVM_MAC_ADDR:
347 ret_val = igb_read_invm_i211(hw, offset, &data[0]);
348 ret_val |= igb_read_invm_i211(hw, offset+1, &data[1]);
349 ret_val |= igb_read_invm_i211(hw, offset+2, &data[2]);
350 if (ret_val != E1000_SUCCESS)
351 hw_dbg("MAC Addr not found in iNVM\n");
352 break;
353 case NVM_ID_LED_SETTINGS:
354 case NVM_INIT_CTRL_2:
355 case NVM_INIT_CTRL_4:
356 case NVM_LED_1_CFG:
357 case NVM_LED_0_2_CFG:
358 igb_read_invm_i211(hw, offset, data);
359 break;
360 case NVM_COMPAT:
361 *data = ID_LED_DEFAULT_I210;
362 break;
363 case NVM_SUB_DEV_ID:
364 *data = hw->subsystem_device_id;
365 break;
366 case NVM_SUB_VEN_ID:
367 *data = hw->subsystem_vendor_id;
368 break;
369 case NVM_DEV_ID:
370 *data = hw->device_id;
371 break;
372 case NVM_VEN_ID:
373 *data = hw->vendor_id;
374 break;
375 default:
376 hw_dbg("NVM word 0x%02x is not mapped.\n", offset);
377 *data = NVM_RESERVED_WORD;
378 break;
379 }
380 return ret_val;
381}
382
383/**
384 * igb_read_invm_i211 - Reads OTP
385 * @hw: pointer to the HW structure
386 * @address: the word address (aka eeprom offset) to read
387 * @data: pointer to the data read
388 *
389 * Reads 16-bit words from the OTP. Return error when the word is not
390 * stored in OTP.
391 **/
392s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data)
393{
394 s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
395 u32 invm_dword;
396 u16 i;
397 u8 record_type, word_address;
398
399 for (i = 0; i < E1000_INVM_SIZE; i++) {
400 invm_dword = rd32(E1000_INVM_DATA_REG(i));
401 /* Get record type */
402 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
403 if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
404 break;
405 if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
406 i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
407 if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
408 i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
409 if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
410 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
411 if (word_address == (u8)address) {
412 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
413 hw_dbg("Read INVM Word 0x%02x = %x",
414 address, *data);
415 status = E1000_SUCCESS;
416 break;
417 }
418 }
419 }
420 if (status != E1000_SUCCESS)
421 hw_dbg("Requested word 0x%02x not found in OTP\n", address);
422 return status;
423}
424
425/**
426 * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
427 * @hw: pointer to the HW structure
428 *
429 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
430 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
431 **/
432s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
433{
434 s32 status = E1000_SUCCESS;
435 s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
436
437 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
438
439 /*
440 * Replace the read function with semaphore grabbing with
441 * the one that skips this for a while.
442 * We have semaphore taken already here.
443 */
444 read_op_ptr = hw->nvm.ops.read;
445 hw->nvm.ops.read = igb_read_nvm_eerd;
446
447 status = igb_validate_nvm_checksum(hw);
448
449 /* Revert original read operation. */
450 hw->nvm.ops.read = read_op_ptr;
451
452 hw->nvm.ops.release(hw);
453 } else {
454 status = E1000_ERR_SWFW_SYNC;
455 }
456
457 return status;
458}
459
460
461/**
462 * igb_update_nvm_checksum_i210 - Update EEPROM checksum
463 * @hw: pointer to the HW structure
464 *
465 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
466 * up to the checksum. Then calculates the EEPROM checksum and writes the
467 * value to the EEPROM. Next commit EEPROM data onto the Flash.
468 **/
469s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
470{
471 s32 ret_val = E1000_SUCCESS;
472 u16 checksum = 0;
473 u16 i, nvm_data;
474
475 /*
476 * Read the first word from the EEPROM. If this times out or fails, do
477 * not continue or we could be in for a very long wait while every
478 * EEPROM read fails
479 */
480 ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
481 if (ret_val != E1000_SUCCESS) {
482 hw_dbg("EEPROM read failed\n");
483 goto out;
484 }
485
486 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
487 /*
488 * Do not use hw->nvm.ops.write, hw->nvm.ops.read
489 * because we do not want to take the synchronization
490 * semaphores twice here.
491 */
492
493 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
494 ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data);
495 if (ret_val) {
496 hw->nvm.ops.release(hw);
497 hw_dbg("NVM Read Error while updating checksum.\n");
498 goto out;
499 }
500 checksum += nvm_data;
501 }
502 checksum = (u16) NVM_SUM - checksum;
503 ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
504 &checksum);
505 if (ret_val != E1000_SUCCESS) {
506 hw->nvm.ops.release(hw);
507 hw_dbg("NVM Write Error while updating checksum.\n");
508 goto out;
509 }
510
511 hw->nvm.ops.release(hw);
512
513 ret_val = igb_update_flash_i210(hw);
514 } else {
515 ret_val = -E1000_ERR_SWFW_SYNC;
516 }
517out:
518 return ret_val;
519}
520
521/**
522 * igb_update_flash_i210 - Commit EEPROM to the flash
523 * @hw: pointer to the HW structure
524 *
525 **/
526s32 igb_update_flash_i210(struct e1000_hw *hw)
527{
528 s32 ret_val = E1000_SUCCESS;
529 u32 flup;
530
531 ret_val = igb_pool_flash_update_done_i210(hw);
532 if (ret_val == -E1000_ERR_NVM) {
533 hw_dbg("Flash update time out\n");
534 goto out;
535 }
536
537 flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210;
538 wr32(E1000_EECD, flup);
539
540 ret_val = igb_pool_flash_update_done_i210(hw);
541 if (ret_val == E1000_SUCCESS)
542 hw_dbg("Flash update complete\n");
543 else
544 hw_dbg("Flash update time out\n");
545
546out:
547 return ret_val;
548}
549
550/**
551 * igb_pool_flash_update_done_i210 - Pool FLUDONE status.
552 * @hw: pointer to the HW structure
553 *
554 **/
555s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
556{
557 s32 ret_val = -E1000_ERR_NVM;
558 u32 i, reg;
559
560 for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
561 reg = rd32(E1000_EECD);
562 if (reg & E1000_EECD_FLUDONE_I210) {
563 ret_val = E1000_SUCCESS;
564 break;
565 }
566 udelay(5);
567 }
568
569 return ret_val;
570}
571
572/**
573 * igb_valid_led_default_i210 - Verify a valid default LED config
574 * @hw: pointer to the HW structure
575 * @data: pointer to the NVM (EEPROM)
576 *
577 * Read the EEPROM for the current default LED configuration. If the
578 * LED configuration is not valid, set to a valid LED configuration.
579 **/
580s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
581{
582 s32 ret_val;
583
584 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
585 if (ret_val) {
586 hw_dbg("NVM Read Error\n");
587 goto out;
588 }
589
590 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
591 switch (hw->phy.media_type) {
592 case e1000_media_type_internal_serdes:
593 *data = ID_LED_DEFAULT_I210_SERDES;
594 break;
595 case e1000_media_type_copper:
596 default:
597 *data = ID_LED_DEFAULT_I210;
598 break;
599 }
600 }
601out:
602 return ret_val;
603}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
new file mode 100644
index 000000000000..5dc2bd3f50bc
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -0,0 +1,76 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_I210_H_
29#define _E1000_I210_H_
30
31extern s32 igb_update_flash_i210(struct e1000_hw *hw);
32extern s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
33extern s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
34extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
35 u16 words, u16 *data);
36extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
37 u16 words, u16 *data);
38extern s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data);
39extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
40extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
41extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
42extern void igb_release_nvm_i210(struct e1000_hw *hw);
43extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
44extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
45 u16 *data);
46
47#define E1000_STM_OPCODE 0xDB00
48#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
49
50#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \
51 (u8)((invm_dword) & 0x7)
52#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \
53 (u8)(((invm_dword) & 0x0000FE00) >> 9)
54#define INVM_DWORD_TO_WORD_DATA(invm_dword) \
55 (u16)(((invm_dword) & 0xFFFF0000) >> 16)
56
57enum E1000_INVM_STRUCTURE_TYPE {
58 E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00,
59 E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01,
60 E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02,
61 E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03,
62 E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04,
63 E1000_INVM_INVALIDATED_STRUCTURE = 0x0F,
64};
65
66#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8
67#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1
68
69#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \
70 (ID_LED_OFF1_OFF2 << 4) | \
71 (ID_LED_DEF1_DEF2))
72#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \
73 (ID_LED_DEF1_DEF2 << 4) | \
74 (ID_LED_DEF1_DEF2))
75
76#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index f57338afd71f..819c145ac762 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -658,6 +658,7 @@ s32 igb_setup_link(struct e1000_hw *hw)
658 ret_val = igb_set_fc_watermarks(hw); 658 ret_val = igb_set_fc_watermarks(hw);
659 659
660out: 660out:
661
661 return ret_val; 662 return ret_val;
662} 663}
663 664
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index fa2c6ba62139..aa5fcdf3f357 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -710,4 +710,3 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
710out: 710out:
711 return ret_val; 711 return ret_val;
712} 712}
713
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 789de5b83aad..7be98b6f1052 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -35,6 +35,7 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw);
35static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, 35static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
36 u16 *phy_ctrl); 36 u16 *phy_ctrl);
37static s32 igb_wait_autoneg(struct e1000_hw *hw); 37static s32 igb_wait_autoneg(struct e1000_hw *hw);
38static s32 igb_set_master_slave_mode(struct e1000_hw *hw);
38 39
39/* Cable length tables */ 40/* Cable length tables */
40static const u16 e1000_m88_cable_length_table[] = 41static const u16 e1000_m88_cable_length_table[] =
@@ -570,6 +571,11 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
570 hw_dbg("Error committing the PHY changes\n"); 571 hw_dbg("Error committing the PHY changes\n");
571 goto out; 572 goto out;
572 } 573 }
574 if (phy->type == e1000_phy_i210) {
575 ret_val = igb_set_master_slave_mode(hw);
576 if (ret_val)
577 return ret_val;
578 }
573 579
574out: 580out:
575 return ret_val; 581 return ret_val;
@@ -1213,12 +1219,22 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1213 goto out; 1219 goto out;
1214 1220
1215 if (!link) { 1221 if (!link) {
1216 if (hw->phy.type != e1000_phy_m88 || 1222 bool reset_dsp = true;
1217 hw->phy.id == I347AT4_E_PHY_ID || 1223
1218 hw->phy.id == M88E1112_E_PHY_ID) { 1224 switch (hw->phy.id) {
1225 case I347AT4_E_PHY_ID:
1226 case M88E1112_E_PHY_ID:
1227 case I210_I_PHY_ID:
1228 reset_dsp = false;
1229 break;
1230 default:
1231 if (hw->phy.type != e1000_phy_m88)
1232 reset_dsp = false;
1233 break;
1234 }
1235 if (!reset_dsp)
1219 hw_dbg("Link taking longer than expected.\n"); 1236 hw_dbg("Link taking longer than expected.\n");
1220 } else { 1237 else {
1221
1222 /* 1238 /*
1223 * We didn't get link. 1239 * We didn't get link.
1224 * Reset the DSP and cross our fingers. 1240 * Reset the DSP and cross our fingers.
@@ -1243,7 +1259,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1243 1259
1244 if (hw->phy.type != e1000_phy_m88 || 1260 if (hw->phy.type != e1000_phy_m88 ||
1245 hw->phy.id == I347AT4_E_PHY_ID || 1261 hw->phy.id == I347AT4_E_PHY_ID ||
1246 hw->phy.id == M88E1112_E_PHY_ID) 1262 hw->phy.id == M88E1112_E_PHY_ID ||
1263 hw->phy.id == I210_I_PHY_ID)
1247 goto out; 1264 goto out;
1248 1265
1249 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); 1266 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
@@ -1441,6 +1458,7 @@ s32 igb_check_downshift(struct e1000_hw *hw)
1441 u16 phy_data, offset, mask; 1458 u16 phy_data, offset, mask;
1442 1459
1443 switch (phy->type) { 1460 switch (phy->type) {
1461 case e1000_phy_i210:
1444 case e1000_phy_m88: 1462 case e1000_phy_m88:
1445 case e1000_phy_gg82563: 1463 case e1000_phy_gg82563:
1446 offset = M88E1000_PHY_SPEC_STATUS; 1464 offset = M88E1000_PHY_SPEC_STATUS;
@@ -1476,7 +1494,7 @@ out:
1476 * 1494 *
1477 * Polarity is determined based on the PHY specific status register. 1495 * Polarity is determined based on the PHY specific status register.
1478 **/ 1496 **/
1479static s32 igb_check_polarity_m88(struct e1000_hw *hw) 1497s32 igb_check_polarity_m88(struct e1000_hw *hw)
1480{ 1498{
1481 struct e1000_phy_info *phy = &hw->phy; 1499 struct e1000_phy_info *phy = &hw->phy;
1482 s32 ret_val; 1500 s32 ret_val;
@@ -1665,6 +1683,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
1665 u16 phy_data, phy_data2, index, default_page, is_cm; 1683 u16 phy_data, phy_data2, index, default_page, is_cm;
1666 1684
1667 switch (hw->phy.id) { 1685 switch (hw->phy.id) {
1686 case I210_I_PHY_ID:
1668 case I347AT4_E_PHY_ID: 1687 case I347AT4_E_PHY_ID:
1669 /* Remember the original page select and set it to 7 */ 1688 /* Remember the original page select and set it to 7 */
1670 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, 1689 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
@@ -2129,10 +2148,16 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
2129void igb_power_up_phy_copper(struct e1000_hw *hw) 2148void igb_power_up_phy_copper(struct e1000_hw *hw)
2130{ 2149{
2131 u16 mii_reg = 0; 2150 u16 mii_reg = 0;
2151 u16 power_reg = 0;
2132 2152
2133 /* The PHY will retain its settings across a power down/up cycle */ 2153 /* The PHY will retain its settings across a power down/up cycle */
2134 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); 2154 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
2135 mii_reg &= ~MII_CR_POWER_DOWN; 2155 mii_reg &= ~MII_CR_POWER_DOWN;
2156 if (hw->phy.type == e1000_phy_i210) {
2157 hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
2158 power_reg &= ~GS40G_CS_POWER_DOWN;
2159 hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
2160 }
2136 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); 2161 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
2137} 2162}
2138 2163
@@ -2146,10 +2171,18 @@ void igb_power_up_phy_copper(struct e1000_hw *hw)
2146void igb_power_down_phy_copper(struct e1000_hw *hw) 2171void igb_power_down_phy_copper(struct e1000_hw *hw)
2147{ 2172{
2148 u16 mii_reg = 0; 2173 u16 mii_reg = 0;
2174 u16 power_reg = 0;
2149 2175
2150 /* The PHY will retain its settings across a power down/up cycle */ 2176 /* The PHY will retain its settings across a power down/up cycle */
2151 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); 2177 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
2152 mii_reg |= MII_CR_POWER_DOWN; 2178 mii_reg |= MII_CR_POWER_DOWN;
2179
2180 /* i210 Phy requires an additional bit for power up/down */
2181 if (hw->phy.type == e1000_phy_i210) {
2182 hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
2183 power_reg |= GS40G_CS_POWER_DOWN;
2184 hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
2185 }
2153 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); 2186 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
2154 msleep(1); 2187 msleep(1);
2155} 2188}
@@ -2345,3 +2378,103 @@ s32 igb_get_cable_length_82580(struct e1000_hw *hw)
2345out: 2378out:
2346 return ret_val; 2379 return ret_val;
2347} 2380}
2381
2382/**
2383 * igb_write_phy_reg_gs40g - Write GS40G PHY register
2384 * @hw: pointer to the HW structure
2385 * @offset: lower half is register offset to write to
2386 * upper half is page to use.
2387 * @data: data to write at register offset
2388 *
2389 * Acquires semaphore, if necessary, then writes the data to PHY register
2390 * at the offset. Release any acquired semaphores before exiting.
2391 **/
2392s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data)
2393{
2394 s32 ret_val;
2395 u16 page = offset >> GS40G_PAGE_SHIFT;
2396
2397 offset = offset & GS40G_OFFSET_MASK;
2398 ret_val = hw->phy.ops.acquire(hw);
2399 if (ret_val)
2400 return ret_val;
2401
2402 ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
2403 if (ret_val)
2404 goto release;
2405 ret_val = igb_write_phy_reg_mdic(hw, offset, data);
2406
2407release:
2408 hw->phy.ops.release(hw);
2409 return ret_val;
2410}
2411
2412/**
2413 * igb_read_phy_reg_gs40g - Read GS40G PHY register
2414 * @hw: pointer to the HW structure
2415 * @offset: lower half is register offset to read to
2416 * upper half is page to use.
2417 * @data: data to read at register offset
2418 *
2419 * Acquires semaphore, if necessary, then reads the data in the PHY register
2420 * at the offset. Release any acquired semaphores before exiting.
2421 **/
2422s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data)
2423{
2424 s32 ret_val;
2425 u16 page = offset >> GS40G_PAGE_SHIFT;
2426
2427 offset = offset & GS40G_OFFSET_MASK;
2428 ret_val = hw->phy.ops.acquire(hw);
2429 if (ret_val)
2430 return ret_val;
2431
2432 ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
2433 if (ret_val)
2434 goto release;
2435 ret_val = igb_read_phy_reg_mdic(hw, offset, data);
2436
2437release:
2438 hw->phy.ops.release(hw);
2439 return ret_val;
2440}
2441
2442/**
2443 * igb_set_master_slave_mode - Setup PHY for Master/slave mode
2444 * @hw: pointer to the HW structure
2445 *
2446 * Sets up Master/slave mode
2447 **/
2448static s32 igb_set_master_slave_mode(struct e1000_hw *hw)
2449{
2450 s32 ret_val;
2451 u16 phy_data;
2452
2453 /* Resolve Master/Slave mode */
2454 ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data);
2455 if (ret_val)
2456 return ret_val;
2457
2458 /* load defaults for future use */
2459 hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ?
2460 ((phy_data & CR_1000T_MS_VALUE) ?
2461 e1000_ms_force_master :
2462 e1000_ms_force_slave) : e1000_ms_auto;
2463
2464 switch (hw->phy.ms_type) {
2465 case e1000_ms_force_master:
2466 phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
2467 break;
2468 case e1000_ms_force_slave:
2469 phy_data |= CR_1000T_MS_ENABLE;
2470 phy_data &= ~(CR_1000T_MS_VALUE);
2471 break;
2472 case e1000_ms_auto:
2473 phy_data &= ~CR_1000T_MS_ENABLE;
2474 /* fall-through */
2475 default:
2476 break;
2477 }
2478
2479 return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data);
2480}
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 4c32ac66ff39..34e40619f16b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -73,6 +73,9 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw);
73s32 igb_get_phy_info_82580(struct e1000_hw *hw); 73s32 igb_get_phy_info_82580(struct e1000_hw *hw);
74s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); 74s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
75s32 igb_get_cable_length_82580(struct e1000_hw *hw); 75s32 igb_get_cable_length_82580(struct e1000_hw *hw);
76s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data);
77s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data);
78s32 igb_check_polarity_m88(struct e1000_hw *hw);
76 79
77/* IGP01E1000 Specific Registers */ 80/* IGP01E1000 Specific Registers */
78#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ 81#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
@@ -114,6 +117,13 @@ s32 igb_get_cable_length_82580(struct e1000_hw *hw);
114/* I82580 PHY Diagnostics Status */ 117/* I82580 PHY Diagnostics Status */
115#define I82580_DSTATUS_CABLE_LENGTH 0x03FC 118#define I82580_DSTATUS_CABLE_LENGTH 0x03FC
116#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2 119#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2
120
121/* 82580 PHY Power Management */
122#define E1000_82580_PHY_POWER_MGMT 0xE14
123#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */
124#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */
125#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */
126
117/* Enable flexible speed on link-up */ 127/* Enable flexible speed on link-up */
118#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ 128#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
119#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ 129#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
@@ -133,4 +143,16 @@ s32 igb_get_cable_length_82580(struct e1000_hw *hw);
133 143
134#define E1000_CABLE_LENGTH_UNDEFINED 0xFF 144#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
135 145
146/* GS40G - I210 PHY defines */
147#define GS40G_PAGE_SELECT 0x16
148#define GS40G_PAGE_SHIFT 16
149#define GS40G_OFFSET_MASK 0xFFFF
150#define GS40G_PAGE_2 0x20000
151#define GS40G_MAC_REG2 0x15
152#define GS40G_MAC_LB 0x4140
153#define GS40G_MAC_SPEED_1G 0X0006
154#define GS40G_COPPER_SPEC 0x0010
155#define GS40G_CS_POWER_DOWN 0x0002
156#define GS40G_LINE_LB 0x4000
157
136#endif 158#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index ccdf36d503fd..35d1e4f2c92c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -352,4 +352,18 @@
352#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ 352#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
353#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ 353#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
354 354
355#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */
356#define E1000_I210_FLMNGCTL 0x12038
357#define E1000_I210_FLMNGDATA 0x1203C
358#define E1000_I210_FLMNGCNT 0x12040
359
360#define E1000_I210_FLSWCTL 0x12048
361#define E1000_I210_FLSWDATA 0x1204C
362#define E1000_I210_FLSWCNT 0x12050
363
364#define E1000_I210_FLA 0x1201C
365
366#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
367#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */
368
355#endif 369#endif
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 8e33bdd33eea..ae6d3f393a54 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -35,8 +35,8 @@
35#include "e1000_82575.h" 35#include "e1000_82575.h"
36 36
37#include <linux/clocksource.h> 37#include <linux/clocksource.h>
38#include <linux/timecompare.h>
39#include <linux/net_tstamp.h> 38#include <linux/net_tstamp.h>
39#include <linux/ptp_clock_kernel.h>
40#include <linux/bitops.h> 40#include <linux/bitops.h>
41#include <linux/if_vlan.h> 41#include <linux/if_vlan.h>
42 42
@@ -65,10 +65,13 @@ struct igb_adapter;
65#define MAX_Q_VECTORS 8 65#define MAX_Q_VECTORS 8
66 66
67/* Transmit and receive queues */ 67/* Transmit and receive queues */
68#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \ 68#define IGB_MAX_RX_QUEUES ((adapter->vfs_allocated_count ? 2 : \
69 (hw->mac.type > e1000_82575 ? 8 : 4)) 69 (hw->mac.type > e1000_82575 ? 8 : 4)))
70#define IGB_MAX_RX_QUEUES_I210 4
71#define IGB_MAX_RX_QUEUES_I211 2
70#define IGB_MAX_TX_QUEUES 16 72#define IGB_MAX_TX_QUEUES 16
71 73#define IGB_MAX_TX_QUEUES_I210 4
74#define IGB_MAX_TX_QUEUES_I211 2
72#define IGB_MAX_VF_MC_ENTRIES 30 75#define IGB_MAX_VF_MC_ENTRIES 30
73#define IGB_MAX_VF_FUNCTIONS 8 76#define IGB_MAX_VF_FUNCTIONS 8
74#define IGB_MAX_VFTA_ENTRIES 128 77#define IGB_MAX_VFTA_ENTRIES 128
@@ -328,9 +331,6 @@ struct igb_adapter {
328 331
329 /* OS defined structs */ 332 /* OS defined structs */
330 struct pci_dev *pdev; 333 struct pci_dev *pdev;
331 struct cyclecounter cycles;
332 struct timecounter clock;
333 struct timecompare compare;
334 struct hwtstamp_config hwtstamp_config; 334 struct hwtstamp_config hwtstamp_config;
335 335
336 spinlock_t stats64_lock; 336 spinlock_t stats64_lock;
@@ -364,6 +364,13 @@ struct igb_adapter {
364 u32 wvbr; 364 u32 wvbr;
365 int node; 365 int node;
366 u32 *shadow_vfta; 366 u32 *shadow_vfta;
367
368 struct ptp_clock *ptp_clock;
369 struct ptp_clock_info caps;
370 struct delayed_work overflow_work;
371 spinlock_t tmreg_lock;
372 struct cyclecounter cc;
373 struct timecounter tc;
367}; 374};
368 375
369#define IGB_FLAG_HAS_MSI (1 << 0) 376#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -378,7 +385,6 @@ struct igb_adapter {
378#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ 385#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
379 386
380#define IGB_82576_TSYNC_SHIFT 19 387#define IGB_82576_TSYNC_SHIFT 19
381#define IGB_82580_TSYNC_SHIFT 24
382#define IGB_TS_HDR_LEN 16 388#define IGB_TS_HDR_LEN 16
383enum e1000_state_t { 389enum e1000_state_t {
384 __IGB_TESTING, 390 __IGB_TESTING,
@@ -414,7 +420,15 @@ extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
414extern bool igb_has_link(struct igb_adapter *adapter); 420extern bool igb_has_link(struct igb_adapter *adapter);
415extern void igb_set_ethtool_ops(struct net_device *); 421extern void igb_set_ethtool_ops(struct net_device *);
416extern void igb_power_up_link(struct igb_adapter *); 422extern void igb_power_up_link(struct igb_adapter *);
423#ifdef CONFIG_IGB_PTP
424extern void igb_ptp_init(struct igb_adapter *adapter);
425extern void igb_ptp_remove(struct igb_adapter *adapter);
417 426
427extern void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
428 struct skb_shared_hwtstamps *hwtstamps,
429 u64 systim);
430
431#endif
418static inline s32 igb_reset_phy(struct e1000_hw *hw) 432static inline s32 igb_reset_phy(struct e1000_hw *hw)
419{ 433{
420 if (hw->phy.ops.reset) 434 if (hw->phy.ops.reset)
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index e10821a0f249..812d4f963bd1 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -335,7 +335,7 @@ static void igb_set_msglevel(struct net_device *netdev, u32 data)
335 335
336static int igb_get_regs_len(struct net_device *netdev) 336static int igb_get_regs_len(struct net_device *netdev)
337{ 337{
338#define IGB_REGS_LEN 551 338#define IGB_REGS_LEN 739
339 return IGB_REGS_LEN * sizeof(u32); 339 return IGB_REGS_LEN * sizeof(u32);
340} 340}
341 341
@@ -552,10 +552,49 @@ static void igb_get_regs(struct net_device *netdev,
552 regs_buff[548] = rd32(E1000_TDFT); 552 regs_buff[548] = rd32(E1000_TDFT);
553 regs_buff[549] = rd32(E1000_TDFHS); 553 regs_buff[549] = rd32(E1000_TDFHS);
554 regs_buff[550] = rd32(E1000_TDFPC); 554 regs_buff[550] = rd32(E1000_TDFPC);
555 regs_buff[551] = adapter->stats.o2bgptc; 555
556 regs_buff[552] = adapter->stats.b2ospc; 556 if (hw->mac.type > e1000_82580) {
557 regs_buff[553] = adapter->stats.o2bspc; 557 regs_buff[551] = adapter->stats.o2bgptc;
558 regs_buff[554] = adapter->stats.b2ogprc; 558 regs_buff[552] = adapter->stats.b2ospc;
559 regs_buff[553] = adapter->stats.o2bspc;
560 regs_buff[554] = adapter->stats.b2ogprc;
561 }
562
563 if (hw->mac.type != e1000_82576)
564 return;
565 for (i = 0; i < 12; i++)
566 regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4));
567 for (i = 0; i < 4; i++)
568 regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4));
569 for (i = 0; i < 12; i++)
570 regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4));
571 for (i = 0; i < 12; i++)
572 regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4));
573 for (i = 0; i < 12; i++)
574 regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4));
575 for (i = 0; i < 12; i++)
576 regs_buff[607 + i] = rd32(E1000_RDH(i + 4));
577 for (i = 0; i < 12; i++)
578 regs_buff[619 + i] = rd32(E1000_RDT(i + 4));
579 for (i = 0; i < 12; i++)
580 regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4));
581
582 for (i = 0; i < 12; i++)
583 regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4));
584 for (i = 0; i < 12; i++)
585 regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4));
586 for (i = 0; i < 12; i++)
587 regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4));
588 for (i = 0; i < 12; i++)
589 regs_buff[679 + i] = rd32(E1000_TDH(i + 4));
590 for (i = 0; i < 12; i++)
591 regs_buff[691 + i] = rd32(E1000_TDT(i + 4));
592 for (i = 0; i < 12; i++)
593 regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4));
594 for (i = 0; i < 12; i++)
595 regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4));
596 for (i = 0; i < 12; i++)
597 regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4));
559} 598}
560 599
561static int igb_get_eeprom_len(struct net_device *netdev) 600static int igb_get_eeprom_len(struct net_device *netdev)
@@ -624,6 +663,9 @@ static int igb_set_eeprom(struct net_device *netdev,
624 if (eeprom->len == 0) 663 if (eeprom->len == 0)
625 return -EOPNOTSUPP; 664 return -EOPNOTSUPP;
626 665
666 if (hw->mac.type == e1000_i211)
667 return -EOPNOTSUPP;
668
627 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 669 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
628 return -EFAULT; 670 return -EFAULT;
629 671
@@ -851,6 +893,36 @@ struct igb_reg_test {
851#define TABLE64_TEST_LO 5 893#define TABLE64_TEST_LO 5
852#define TABLE64_TEST_HI 6 894#define TABLE64_TEST_HI 6
853 895
896/* i210 reg test */
897static struct igb_reg_test reg_test_i210[] = {
898 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
899 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
900 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
901 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
902 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
903 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
904 /* RDH is read-only for i210, only test RDT. */
905 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
906 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
907 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
908 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
909 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
910 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
911 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
912 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
913 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
914 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
915 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
916 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
917 { E1000_RA, 0, 16, TABLE64_TEST_LO,
918 0xFFFFFFFF, 0xFFFFFFFF },
919 { E1000_RA, 0, 16, TABLE64_TEST_HI,
920 0x900FFFFF, 0xFFFFFFFF },
921 { E1000_MTA, 0, 128, TABLE32_TEST,
922 0xFFFFFFFF, 0xFFFFFFFF },
923 { 0, 0, 0, 0, 0 }
924};
925
854/* i350 reg test */ 926/* i350 reg test */
855static struct igb_reg_test reg_test_i350[] = { 927static struct igb_reg_test reg_test_i350[] = {
856 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 928 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1073,6 +1145,11 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
1073 test = reg_test_i350; 1145 test = reg_test_i350;
1074 toggle = 0x7FEFF3FF; 1146 toggle = 0x7FEFF3FF;
1075 break; 1147 break;
1148 case e1000_i210:
1149 case e1000_i211:
1150 test = reg_test_i210;
1151 toggle = 0x7FEFF3FF;
1152 break;
1076 case e1000_82580: 1153 case e1000_82580:
1077 test = reg_test_82580; 1154 test = reg_test_82580;
1078 toggle = 0x7FEFF3FF; 1155 toggle = 0x7FEFF3FF;
@@ -1154,23 +1231,13 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
1154 1231
1155static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) 1232static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1156{ 1233{
1157 u16 temp;
1158 u16 checksum = 0;
1159 u16 i;
1160
1161 *data = 0; 1234 *data = 0;
1162 /* Read and add up the contents of the EEPROM */
1163 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
1164 if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) {
1165 *data = 1;
1166 break;
1167 }
1168 checksum += temp;
1169 }
1170 1235
1171 /* If Checksum is not Correct return error else test passed */ 1236 /* Validate eeprom on all parts but i211 */
1172 if ((checksum != (u16) NVM_SUM) && !(*data)) 1237 if (adapter->hw.mac.type != e1000_i211) {
1173 *data = 2; 1238 if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
1239 *data = 2;
1240 }
1174 1241
1175 return *data; 1242 return *data;
1176} 1243}
@@ -1236,6 +1303,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1236 ics_mask = 0x77DCFED5; 1303 ics_mask = 0x77DCFED5;
1237 break; 1304 break;
1238 case e1000_i350: 1305 case e1000_i350:
1306 case e1000_i210:
1307 case e1000_i211:
1239 ics_mask = 0x77DCFED5; 1308 ics_mask = 0x77DCFED5;
1240 break; 1309 break;
1241 default: 1310 default:
@@ -1402,23 +1471,35 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1402{ 1471{
1403 struct e1000_hw *hw = &adapter->hw; 1472 struct e1000_hw *hw = &adapter->hw;
1404 u32 ctrl_reg = 0; 1473 u32 ctrl_reg = 0;
1474 u16 phy_reg = 0;
1405 1475
1406 hw->mac.autoneg = false; 1476 hw->mac.autoneg = false;
1407 1477
1408 if (hw->phy.type == e1000_phy_m88) { 1478 switch (hw->phy.type) {
1479 case e1000_phy_m88:
1409 /* Auto-MDI/MDIX Off */ 1480 /* Auto-MDI/MDIX Off */
1410 igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); 1481 igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
1411 /* reset to update Auto-MDI/MDIX */ 1482 /* reset to update Auto-MDI/MDIX */
1412 igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); 1483 igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
1413 /* autoneg off */ 1484 /* autoneg off */
1414 igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); 1485 igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
1415 } else if (hw->phy.type == e1000_phy_82580) { 1486 break;
1487 case e1000_phy_82580:
1416 /* enable MII loopback */ 1488 /* enable MII loopback */
1417 igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); 1489 igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
1490 break;
1491 case e1000_phy_i210:
1492 /* set loopback speed in PHY */
1493 igb_read_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2),
1494 &phy_reg);
1495 phy_reg |= GS40G_MAC_SPEED_1G;
1496 igb_write_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2),
1497 phy_reg);
1498 ctrl_reg = rd32(E1000_CTRL_EXT);
1499 default:
1500 break;
1418 } 1501 }
1419 1502
1420 ctrl_reg = rd32(E1000_CTRL);
1421
1422 /* force 1000, set loopback */ 1503 /* force 1000, set loopback */
1423 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); 1504 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
1424 1505
@@ -1431,7 +1512,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1431 E1000_CTRL_FD | /* Force Duplex to FULL */ 1512 E1000_CTRL_FD | /* Force Duplex to FULL */
1432 E1000_CTRL_SLU); /* Set link up enable bit */ 1513 E1000_CTRL_SLU); /* Set link up enable bit */
1433 1514
1434 if (hw->phy.type == e1000_phy_m88) 1515 if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210))
1435 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1516 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1436 1517
1437 wr32(E1000_CTRL, ctrl_reg); 1518 wr32(E1000_CTRL, ctrl_reg);
@@ -1439,7 +1520,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1439 /* Disable the receiver on the PHY so when a cable is plugged in, the 1520 /* Disable the receiver on the PHY so when a cable is plugged in, the
1440 * PHY does not begin to autoneg when a cable is reconnected to the NIC. 1521 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1441 */ 1522 */
1442 if (hw->phy.type == e1000_phy_m88) 1523 if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210))
1443 igb_phy_disable_receiver(adapter); 1524 igb_phy_disable_receiver(adapter);
1444 1525
1445 udelay(500); 1526 udelay(500);
@@ -1704,6 +1785,14 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
1704 *data = 0; 1785 *data = 0;
1705 goto out; 1786 goto out;
1706 } 1787 }
1788 if ((adapter->hw.mac.type == e1000_i210)
1789 || (adapter->hw.mac.type == e1000_i210)) {
1790 dev_err(&adapter->pdev->dev,
1791 "Loopback test not supported "
1792 "on this part at this time.\n");
1793 *data = 0;
1794 goto out;
1795 }
1707 *data = igb_setup_desc_rings(adapter); 1796 *data = igb_setup_desc_rings(adapter);
1708 if (*data) 1797 if (*data)
1709 goto out; 1798 goto out;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 5ec31598ee47..dd3bfe8cd36c 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -60,8 +60,8 @@
60#include "igb.h" 60#include "igb.h"
61 61
62#define MAJ 3 62#define MAJ 3
63#define MIN 2 63#define MIN 4
64#define BUILD 10 64#define BUILD 7
65#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 65#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
66__stringify(BUILD) "-k" 66__stringify(BUILD) "-k"
67char igb_driver_name[] = "igb"; 67char igb_driver_name[] = "igb";
@@ -75,6 +75,11 @@ static const struct e1000_info *igb_info_tbl[] = {
75}; 75};
76 76
77static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { 77static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, 83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, 84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, 85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
@@ -114,7 +119,6 @@ static void igb_free_all_rx_resources(struct igb_adapter *);
114static void igb_setup_mrqc(struct igb_adapter *); 119static void igb_setup_mrqc(struct igb_adapter *);
115static int igb_probe(struct pci_dev *, const struct pci_device_id *); 120static int igb_probe(struct pci_dev *, const struct pci_device_id *);
116static void __devexit igb_remove(struct pci_dev *pdev); 121static void __devexit igb_remove(struct pci_dev *pdev);
117static void igb_init_hw_timer(struct igb_adapter *adapter);
118static int igb_sw_init(struct igb_adapter *); 122static int igb_sw_init(struct igb_adapter *);
119static int igb_open(struct net_device *); 123static int igb_open(struct net_device *);
120static int igb_close(struct net_device *); 124static int igb_close(struct net_device *);
@@ -565,33 +569,6 @@ exit:
565 return; 569 return;
566} 570}
567 571
568
569/**
570 * igb_read_clock - read raw cycle counter (to be used by time counter)
571 */
572static cycle_t igb_read_clock(const struct cyclecounter *tc)
573{
574 struct igb_adapter *adapter =
575 container_of(tc, struct igb_adapter, cycles);
576 struct e1000_hw *hw = &adapter->hw;
577 u64 stamp = 0;
578 int shift = 0;
579
580 /*
581 * The timestamp latches on lowest register read. For the 82580
582 * the lowest register is SYSTIMR instead of SYSTIML. However we never
583 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
584 */
585 if (hw->mac.type >= e1000_82580) {
586 stamp = rd32(E1000_SYSTIMR) >> 8;
587 shift = IGB_82580_TSYNC_SHIFT;
588 }
589
590 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
591 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
592 return stamp;
593}
594
595/** 572/**
596 * igb_get_hw_dev - return device 573 * igb_get_hw_dev - return device
597 * used by hardware layer to print debugging information 574 * used by hardware layer to print debugging information
@@ -669,6 +646,8 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
669 case e1000_82575: 646 case e1000_82575:
670 case e1000_82580: 647 case e1000_82580:
671 case e1000_i350: 648 case e1000_i350:
649 case e1000_i210:
650 case e1000_i211:
672 default: 651 default:
673 for (; i < adapter->num_rx_queues; i++) 652 for (; i < adapter->num_rx_queues; i++)
674 adapter->rx_ring[i]->reg_idx = rbase_offset + i; 653 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -755,8 +734,11 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
755 if (adapter->hw.mac.type >= e1000_82576) 734 if (adapter->hw.mac.type >= e1000_82576)
756 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); 735 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
757 736
758 /* On i350, loopback VLAN packets have the tag byte-swapped. */ 737 /*
759 if (adapter->hw.mac.type == e1000_i350) 738 * On i350, i210, and i211, loopback VLAN packets
739 * have the tag byte-swapped.
740 * */
741 if (adapter->hw.mac.type >= e1000_i350)
760 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); 742 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
761 743
762 adapter->rx_ring[i] = ring; 744 adapter->rx_ring[i] = ring;
@@ -850,6 +832,8 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
850 break; 832 break;
851 case e1000_82580: 833 case e1000_82580:
852 case e1000_i350: 834 case e1000_i350:
835 case e1000_i210:
836 case e1000_i211:
853 /* 837 /*
854 * On 82580 and newer adapters the scheme is similar to 82576 838 * On 82580 and newer adapters the scheme is similar to 82576
855 * however instead of ordering column-major we have things 839 * however instead of ordering column-major we have things
@@ -916,6 +900,8 @@ static void igb_configure_msix(struct igb_adapter *adapter)
916 case e1000_82576: 900 case e1000_82576:
917 case e1000_82580: 901 case e1000_82580:
918 case e1000_i350: 902 case e1000_i350:
903 case e1000_i210:
904 case e1000_i211:
919 /* Turn on MSI-X capability first, or our settings 905 /* Turn on MSI-X capability first, or our settings
920 * won't stick. And it will take days to debug. */ 906 * won't stick. And it will take days to debug. */
921 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | 907 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
@@ -1062,6 +1048,11 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter)
1062 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) 1048 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1063 numvecs += adapter->num_tx_queues; 1049 numvecs += adapter->num_tx_queues;
1064 1050
1051 /* i210 and i211 can only have 4 MSIX vectors for rx/tx queues. */
1052 if ((adapter->hw.mac.type == e1000_i210)
1053 || (adapter->hw.mac.type == e1000_i211))
1054 numvecs = 4;
1055
1065 /* store the number of vectors reserved for queues */ 1056 /* store the number of vectors reserved for queues */
1066 adapter->num_q_vectors = numvecs; 1057 adapter->num_q_vectors = numvecs;
1067 1058
@@ -1069,6 +1060,7 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter)
1069 numvecs++; 1060 numvecs++;
1070 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), 1061 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1071 GFP_KERNEL); 1062 GFP_KERNEL);
1063
1072 if (!adapter->msix_entries) 1064 if (!adapter->msix_entries)
1073 goto msi_only; 1065 goto msi_only;
1074 1066
@@ -1111,9 +1103,12 @@ msi_only:
1111 adapter->flags |= IGB_FLAG_HAS_MSI; 1103 adapter->flags |= IGB_FLAG_HAS_MSI;
1112out: 1104out:
1113 /* Notify the stack of the (possibly) reduced queue counts. */ 1105 /* Notify the stack of the (possibly) reduced queue counts. */
1106 rtnl_lock();
1114 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); 1107 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1115 return netif_set_real_num_rx_queues(adapter->netdev, 1108 err = netif_set_real_num_rx_queues(adapter->netdev,
1116 adapter->num_rx_queues); 1109 adapter->num_rx_queues);
1110 rtnl_unlock();
1111 return err;
1117} 1112}
1118 1113
1119/** 1114/**
@@ -1659,6 +1654,8 @@ void igb_reset(struct igb_adapter *adapter)
1659 pba &= E1000_RXPBS_SIZE_MASK_82576; 1654 pba &= E1000_RXPBS_SIZE_MASK_82576;
1660 break; 1655 break;
1661 case e1000_82575: 1656 case e1000_82575:
1657 case e1000_i210:
1658 case e1000_i211:
1662 default: 1659 default:
1663 pba = E1000_PBA_34K; 1660 pba = E1000_PBA_34K;
1664 break; 1661 break;
@@ -1743,6 +1740,13 @@ void igb_reset(struct igb_adapter *adapter)
1743 if (hw->mac.ops.init_hw(hw)) 1740 if (hw->mac.ops.init_hw(hw))
1744 dev_err(&pdev->dev, "Hardware Error\n"); 1741 dev_err(&pdev->dev, "Hardware Error\n");
1745 1742
1743 /*
1744 * Flow control settings reset on hardware reset, so guarantee flow
1745 * control is off when forcing speed.
1746 */
1747 if (!hw->mac.autoneg)
1748 igb_force_mac_fc(hw);
1749
1746 igb_init_dmac(adapter, pba); 1750 igb_init_dmac(adapter, pba);
1747 if (!netif_running(adapter->netdev)) 1751 if (!netif_running(adapter->netdev))
1748 igb_power_down_link(adapter); 1752 igb_power_down_link(adapter);
@@ -1847,7 +1851,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1847 */ 1851 */
1848 if (pdev->is_virtfn) { 1852 if (pdev->is_virtfn) {
1849 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", 1853 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
1850 pci_name(pdev), pdev->vendor, pdev->device); 1854 pci_name(pdev), pdev->vendor, pdev->device);
1851 return -EINVAL; 1855 return -EINVAL;
1852 } 1856 }
1853 1857
@@ -2001,11 +2005,16 @@ static int __devinit igb_probe(struct pci_dev *pdev,
2001 * known good starting state */ 2005 * known good starting state */
2002 hw->mac.ops.reset_hw(hw); 2006 hw->mac.ops.reset_hw(hw);
2003 2007
2004 /* make sure the NVM is good */ 2008 /*
2005 if (hw->nvm.ops.validate(hw) < 0) { 2009 * make sure the NVM is good , i211 parts have special NVM that
2006 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); 2010 * doesn't contain a checksum
2007 err = -EIO; 2011 */
2008 goto err_eeprom; 2012 if (hw->mac.type != e1000_i211) {
2013 if (hw->nvm.ops.validate(hw) < 0) {
2014 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2015 err = -EIO;
2016 goto err_eeprom;
2017 }
2009 } 2018 }
2010 2019
2011 /* copy the MAC address out of the NVM */ 2020 /* copy the MAC address out of the NVM */
@@ -2110,9 +2119,11 @@ static int __devinit igb_probe(struct pci_dev *pdev,
2110 } 2119 }
2111 2120
2112#endif 2121#endif
2122#ifdef CONFIG_IGB_PTP
2113 /* do hw tstamp init after resetting */ 2123 /* do hw tstamp init after resetting */
2114 igb_init_hw_timer(adapter); 2124 igb_ptp_init(adapter);
2115 2125
2126#endif
2116 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 2127 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2117 /* print bus type/speed/width info */ 2128 /* print bus type/speed/width info */
2118 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 2129 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -2137,6 +2148,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
2137 adapter->num_rx_queues, adapter->num_tx_queues); 2148 adapter->num_rx_queues, adapter->num_tx_queues);
2138 switch (hw->mac.type) { 2149 switch (hw->mac.type) {
2139 case e1000_i350: 2150 case e1000_i350:
2151 case e1000_i210:
2152 case e1000_i211:
2140 igb_set_eee_i350(hw); 2153 igb_set_eee_i350(hw);
2141 break; 2154 break;
2142 default: 2155 default:
@@ -2184,7 +2197,10 @@ static void __devexit igb_remove(struct pci_dev *pdev)
2184 struct e1000_hw *hw = &adapter->hw; 2197 struct e1000_hw *hw = &adapter->hw;
2185 2198
2186 pm_runtime_get_noresume(&pdev->dev); 2199 pm_runtime_get_noresume(&pdev->dev);
2200#ifdef CONFIG_IGB_PTP
2201 igb_ptp_remove(adapter);
2187 2202
2203#endif
2188 /* 2204 /*
2189 * The watchdog timer may be rescheduled, so explicitly 2205 * The watchdog timer may be rescheduled, so explicitly
2190 * disable watchdog from being rescheduled. 2206 * disable watchdog from being rescheduled.
@@ -2260,9 +2276,14 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2260{ 2276{
2261#ifdef CONFIG_PCI_IOV 2277#ifdef CONFIG_PCI_IOV
2262 struct pci_dev *pdev = adapter->pdev; 2278 struct pci_dev *pdev = adapter->pdev;
2279 struct e1000_hw *hw = &adapter->hw;
2263 int old_vfs = igb_find_enabled_vfs(adapter); 2280 int old_vfs = igb_find_enabled_vfs(adapter);
2264 int i; 2281 int i;
2265 2282
2283 /* Virtualization features not supported on i210 family. */
2284 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
2285 return;
2286
2266 if (old_vfs) { 2287 if (old_vfs) {
2267 dev_info(&pdev->dev, "%d pre-allocated VFs found - override " 2288 dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
2268 "max_vfs setting of %d\n", old_vfs, max_vfs); 2289 "max_vfs setting of %d\n", old_vfs, max_vfs);
@@ -2274,6 +2295,7 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2274 2295
2275 adapter->vf_data = kcalloc(adapter->vfs_allocated_count, 2296 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2276 sizeof(struct vf_data_storage), GFP_KERNEL); 2297 sizeof(struct vf_data_storage), GFP_KERNEL);
2298
2277 /* if allocation failed then we do not support SR-IOV */ 2299 /* if allocation failed then we do not support SR-IOV */
2278 if (!adapter->vf_data) { 2300 if (!adapter->vf_data) {
2279 adapter->vfs_allocated_count = 0; 2301 adapter->vfs_allocated_count = 0;
@@ -2304,112 +2326,6 @@ out:
2304} 2326}
2305 2327
2306/** 2328/**
2307 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2308 * @adapter: board private structure to initialize
2309 *
2310 * igb_init_hw_timer initializes the function pointer and values for the hw
2311 * timer found in hardware.
2312 **/
2313static void igb_init_hw_timer(struct igb_adapter *adapter)
2314{
2315 struct e1000_hw *hw = &adapter->hw;
2316
2317 switch (hw->mac.type) {
2318 case e1000_i350:
2319 case e1000_82580:
2320 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2321 adapter->cycles.read = igb_read_clock;
2322 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2323 adapter->cycles.mult = 1;
2324 /*
2325 * The 82580 timesync updates the system timer every 8ns by 8ns
2326 * and the value cannot be shifted. Instead we need to shift
2327 * the registers to generate a 64bit timer value. As a result
2328 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2329 * 24 in order to generate a larger value for synchronization.
2330 */
2331 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
2332 /* disable system timer temporarily by setting bit 31 */
2333 wr32(E1000_TSAUXC, 0x80000000);
2334 wrfl();
2335
2336 /* Set registers so that rollover occurs soon to test this. */
2337 wr32(E1000_SYSTIMR, 0x00000000);
2338 wr32(E1000_SYSTIML, 0x80000000);
2339 wr32(E1000_SYSTIMH, 0x000000FF);
2340 wrfl();
2341
2342 /* enable system timer by clearing bit 31 */
2343 wr32(E1000_TSAUXC, 0x0);
2344 wrfl();
2345
2346 timecounter_init(&adapter->clock,
2347 &adapter->cycles,
2348 ktime_to_ns(ktime_get_real()));
2349 /*
2350 * Synchronize our NIC clock against system wall clock. NIC
2351 * time stamp reading requires ~3us per sample, each sample
2352 * was pretty stable even under load => only require 10
2353 * samples for each offset comparison.
2354 */
2355 memset(&adapter->compare, 0, sizeof(adapter->compare));
2356 adapter->compare.source = &adapter->clock;
2357 adapter->compare.target = ktime_get_real;
2358 adapter->compare.num_samples = 10;
2359 timecompare_update(&adapter->compare, 0);
2360 break;
2361 case e1000_82576:
2362 /*
2363 * Initialize hardware timer: we keep it running just in case
2364 * that some program needs it later on.
2365 */
2366 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2367 adapter->cycles.read = igb_read_clock;
2368 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2369 adapter->cycles.mult = 1;
2370 /**
2371 * Scale the NIC clock cycle by a large factor so that
2372 * relatively small clock corrections can be added or
2373 * subtracted at each clock tick. The drawbacks of a large
2374 * factor are a) that the clock register overflows more quickly
2375 * (not such a big deal) and b) that the increment per tick has
2376 * to fit into 24 bits. As a result we need to use a shift of
2377 * 19 so we can fit a value of 16 into the TIMINCA register.
2378 */
2379 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
2380 wr32(E1000_TIMINCA,
2381 (1 << E1000_TIMINCA_16NS_SHIFT) |
2382 (16 << IGB_82576_TSYNC_SHIFT));
2383
2384 /* Set registers so that rollover occurs soon to test this. */
2385 wr32(E1000_SYSTIML, 0x00000000);
2386 wr32(E1000_SYSTIMH, 0xFF800000);
2387 wrfl();
2388
2389 timecounter_init(&adapter->clock,
2390 &adapter->cycles,
2391 ktime_to_ns(ktime_get_real()));
2392 /*
2393 * Synchronize our NIC clock against system wall clock. NIC
2394 * time stamp reading requires ~3us per sample, each sample
2395 * was pretty stable even under load => only require 10
2396 * samples for each offset comparison.
2397 */
2398 memset(&adapter->compare, 0, sizeof(adapter->compare));
2399 adapter->compare.source = &adapter->clock;
2400 adapter->compare.target = ktime_get_real;
2401 adapter->compare.num_samples = 10;
2402 timecompare_update(&adapter->compare, 0);
2403 break;
2404 case e1000_82575:
2405 /* 82575 does not support timesync */
2406 default:
2407 break;
2408 }
2409
2410}
2411
2412/**
2413 * igb_sw_init - Initialize general software structures (struct igb_adapter) 2329 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2414 * @adapter: board private structure to initialize 2330 * @adapter: board private structure to initialize
2415 * 2331 *
@@ -2454,11 +2370,28 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2454 } else 2370 } else
2455 adapter->vfs_allocated_count = max_vfs; 2371 adapter->vfs_allocated_count = max_vfs;
2456 break; 2372 break;
2373 case e1000_i210:
2374 case e1000_i211:
2375 adapter->vfs_allocated_count = 0;
2376 break;
2457 default: 2377 default:
2458 break; 2378 break;
2459 } 2379 }
2460#endif /* CONFIG_PCI_IOV */ 2380#endif /* CONFIG_PCI_IOV */
2461 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); 2381 switch (hw->mac.type) {
2382 case e1000_i210:
2383 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I210,
2384 num_online_cpus());
2385 break;
2386 case e1000_i211:
2387 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I211,
2388 num_online_cpus());
2389 break;
2390 default:
2391 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES,
2392 num_online_cpus());
2393 break;
2394 }
2462 /* i350 cannot do RSS and SR-IOV at the same time */ 2395 /* i350 cannot do RSS and SR-IOV at the same time */
2463 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count) 2396 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2464 adapter->rss_queues = 1; 2397 adapter->rss_queues = 1;
@@ -2488,7 +2421,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2488 /* Explicitly disable IRQ since the NIC can be in any state. */ 2421 /* Explicitly disable IRQ since the NIC can be in any state. */
2489 igb_irq_disable(adapter); 2422 igb_irq_disable(adapter);
2490 2423
2491 if (hw->mac.type == e1000_i350) 2424 if (hw->mac.type >= e1000_i350)
2492 adapter->flags &= ~IGB_FLAG_DMAC; 2425 adapter->flags &= ~IGB_FLAG_DMAC;
2493 2426
2494 set_bit(__IGB_DOWN, &adapter->state); 2427 set_bit(__IGB_DOWN, &adapter->state);
@@ -2771,8 +2704,6 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
2771 2704
2772 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 2705 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2773 wr32(E1000_TXDCTL(reg_idx), txdctl); 2706 wr32(E1000_TXDCTL(reg_idx), txdctl);
2774
2775 netdev_tx_reset_queue(txring_txq(ring));
2776} 2707}
2777 2708
2778/** 2709/**
@@ -2943,6 +2874,17 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
2943 2874
2944 /* Don't need to set TUOFL or IPOFL, they default to 1 */ 2875 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2945 wr32(E1000_RXCSUM, rxcsum); 2876 wr32(E1000_RXCSUM, rxcsum);
2877 /*
2878 * Generate RSS hash based on TCP port numbers and/or
2879 * IPv4/v6 src and dst addresses since UDP cannot be
2880 * hashed reliably due to IP fragmentation
2881 */
2882
2883 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
2884 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2885 E1000_MRQC_RSS_FIELD_IPV6 |
2886 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2887 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
2946 2888
2947 /* If VMDq is enabled then we set the appropriate mode for that, else 2889 /* If VMDq is enabled then we set the appropriate mode for that, else
2948 * we default to RSS so that an RSS hash is calculated per packet even 2890 * we default to RSS so that an RSS hash is calculated per packet even
@@ -2958,25 +2900,15 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
2958 wr32(E1000_VT_CTL, vtctl); 2900 wr32(E1000_VT_CTL, vtctl);
2959 } 2901 }
2960 if (adapter->rss_queues > 1) 2902 if (adapter->rss_queues > 1)
2961 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q; 2903 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2962 else 2904 else
2963 mrqc = E1000_MRQC_ENABLE_VMDQ; 2905 mrqc |= E1000_MRQC_ENABLE_VMDQ;
2964 } else { 2906 } else {
2965 mrqc = E1000_MRQC_ENABLE_RSS_4Q; 2907 if (hw->mac.type != e1000_i211)
2908 mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
2966 } 2909 }
2967 igb_vmm_control(adapter); 2910 igb_vmm_control(adapter);
2968 2911
2969 /*
2970 * Generate RSS hash based on TCP port numbers and/or
2971 * IPv4/v6 src and dst addresses since UDP cannot be
2972 * hashed reliably due to IP fragmentation
2973 */
2974 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2975 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2976 E1000_MRQC_RSS_FIELD_IPV6 |
2977 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2978 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
2979
2980 wr32(E1000_MRQC, mrqc); 2912 wr32(E1000_MRQC, mrqc);
2981} 2913}
2982 2914
@@ -3282,6 +3214,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
3282 igb_unmap_and_free_tx_resource(tx_ring, buffer_info); 3214 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
3283 } 3215 }
3284 3216
3217 netdev_tx_reset_queue(txring_txq(tx_ring));
3218
3285 size = sizeof(struct igb_tx_buffer) * tx_ring->count; 3219 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3286 memset(tx_ring->tx_buffer_info, 0, size); 3220 memset(tx_ring->tx_buffer_info, 0, size);
3287 3221
@@ -3576,7 +3510,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
3576 * we will have issues with VLAN tag stripping not being done for frames 3510 * we will have issues with VLAN tag stripping not being done for frames
3577 * that are only arriving because we are the default pool 3511 * that are only arriving because we are the default pool
3578 */ 3512 */
3579 if (hw->mac.type < e1000_82576) 3513 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
3580 return; 3514 return;
3581 3515
3582 vmolr |= rd32(E1000_VMOLR(vfn)) & 3516 vmolr |= rd32(E1000_VMOLR(vfn)) &
@@ -3673,7 +3607,7 @@ static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3673 bool ret = false; 3607 bool ret = false;
3674 u32 ctrl_ext, thstat; 3608 u32 ctrl_ext, thstat;
3675 3609
3676 /* check for thermal sensor event on i350, copper only */ 3610 /* check for thermal sensor event on i350 copper only */
3677 if (hw->mac.type == e1000_i350) { 3611 if (hw->mac.type == e1000_i350) {
3678 thstat = rd32(E1000_THSTAT); 3612 thstat = rd32(E1000_THSTAT);
3679 ctrl_ext = rd32(E1000_CTRL_EXT); 3613 ctrl_ext = rd32(E1000_CTRL_EXT);
@@ -5718,35 +5652,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
5718 return 0; 5652 return 0;
5719} 5653}
5720 5654
5721/** 5655#ifdef CONFIG_IGB_PTP
5722 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
5723 * @adapter: board private structure
5724 * @shhwtstamps: timestamp structure to update
5725 * @regval: unsigned 64bit system time value.
5726 *
5727 * We need to convert the system time value stored in the RX/TXSTMP registers
5728 * into a hwtstamp which can be used by the upper level timestamping functions
5729 */
5730static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
5731 struct skb_shared_hwtstamps *shhwtstamps,
5732 u64 regval)
5733{
5734 u64 ns;
5735
5736 /*
5737 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5738 * 24 to match clock shift we setup earlier.
5739 */
5740 if (adapter->hw.mac.type >= e1000_82580)
5741 regval <<= IGB_82580_TSYNC_SHIFT;
5742
5743 ns = timecounter_cyc2time(&adapter->clock, regval);
5744 timecompare_update(&adapter->compare, ns);
5745 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
5746 shhwtstamps->hwtstamp = ns_to_ktime(ns);
5747 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
5748}
5749
5750/** 5656/**
5751 * igb_tx_hwtstamp - utility function which checks for TX time stamp 5657 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5752 * @q_vector: pointer to q_vector containing needed info 5658 * @q_vector: pointer to q_vector containing needed info
@@ -5776,6 +5682,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5776 skb_tstamp_tx(buffer_info->skb, &shhwtstamps); 5682 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
5777} 5683}
5778 5684
5685#endif
5779/** 5686/**
5780 * igb_clean_tx_irq - Reclaim resources after transmit completes 5687 * igb_clean_tx_irq - Reclaim resources after transmit completes
5781 * @q_vector: pointer to q_vector containing needed info 5688 * @q_vector: pointer to q_vector containing needed info
@@ -5819,9 +5726,11 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5819 total_bytes += tx_buffer->bytecount; 5726 total_bytes += tx_buffer->bytecount;
5820 total_packets += tx_buffer->gso_segs; 5727 total_packets += tx_buffer->gso_segs;
5821 5728
5729#ifdef CONFIG_IGB_PTP
5822 /* retrieve hardware timestamp */ 5730 /* retrieve hardware timestamp */
5823 igb_tx_hwtstamp(q_vector, tx_buffer); 5731 igb_tx_hwtstamp(q_vector, tx_buffer);
5824 5732
5733#endif
5825 /* free the skb */ 5734 /* free the skb */
5826 dev_kfree_skb_any(tx_buffer->skb); 5735 dev_kfree_skb_any(tx_buffer->skb);
5827 tx_buffer->skb = NULL; 5736 tx_buffer->skb = NULL;
@@ -5993,6 +5902,7 @@ static inline void igb_rx_hash(struct igb_ring *ring,
5993 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 5902 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5994} 5903}
5995 5904
5905#ifdef CONFIG_IGB_PTP
5996static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, 5906static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
5997 union e1000_adv_rx_desc *rx_desc, 5907 union e1000_adv_rx_desc *rx_desc,
5998 struct sk_buff *skb) 5908 struct sk_buff *skb)
@@ -6032,6 +5942,7 @@ static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
6032 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); 5942 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
6033} 5943}
6034 5944
5945#endif
6035static void igb_rx_vlan(struct igb_ring *ring, 5946static void igb_rx_vlan(struct igb_ring *ring,
6036 union e1000_adv_rx_desc *rx_desc, 5947 union e1000_adv_rx_desc *rx_desc,
6037 struct sk_buff *skb) 5948 struct sk_buff *skb)
@@ -6142,7 +6053,9 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
6142 goto next_desc; 6053 goto next_desc;
6143 } 6054 }
6144 6055
6056#ifdef CONFIG_IGB_PTP
6145 igb_rx_hwtstamp(q_vector, rx_desc, skb); 6057 igb_rx_hwtstamp(q_vector, rx_desc, skb);
6058#endif
6146 igb_rx_hash(rx_ring, rx_desc, skb); 6059 igb_rx_hash(rx_ring, rx_desc, skb);
6147 igb_rx_checksum(rx_ring, rx_desc, skb); 6060 igb_rx_checksum(rx_ring, rx_desc, skb);
6148 igb_rx_vlan(rx_ring, rx_desc, skb); 6061 igb_rx_vlan(rx_ring, rx_desc, skb);
@@ -6796,18 +6709,7 @@ static int igb_resume(struct device *dev)
6796 pci_enable_wake(pdev, PCI_D3hot, 0); 6709 pci_enable_wake(pdev, PCI_D3hot, 0);
6797 pci_enable_wake(pdev, PCI_D3cold, 0); 6710 pci_enable_wake(pdev, PCI_D3cold, 0);
6798 6711
6799 if (!rtnl_is_locked()) { 6712 if (igb_init_interrupt_scheme(adapter)) {
6800 /*
6801 * shut up ASSERT_RTNL() warning in
6802 * netif_set_real_num_tx/rx_queues.
6803 */
6804 rtnl_lock();
6805 err = igb_init_interrupt_scheme(adapter);
6806 rtnl_unlock();
6807 } else {
6808 err = igb_init_interrupt_scheme(adapter);
6809 }
6810 if (err) {
6811 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 6713 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6812 return -ENOMEM; 6714 return -ENOMEM;
6813 } 6715 }
@@ -7170,6 +7072,8 @@ static void igb_vmm_control(struct igb_adapter *adapter)
7170 7072
7171 switch (hw->mac.type) { 7073 switch (hw->mac.type) {
7172 case e1000_82575: 7074 case e1000_82575:
7075 case e1000_i210:
7076 case e1000_i211:
7173 default: 7077 default:
7174 /* replication is not supported for 82575 */ 7078 /* replication is not supported for 82575 */
7175 return; 7079 return;
@@ -7243,6 +7147,9 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7243 7147
7244 /* watchdog timer= +-1000 usec in 32usec intervals */ 7148 /* watchdog timer= +-1000 usec in 32usec intervals */
7245 reg |= (1000 >> 5); 7149 reg |= (1000 >> 5);
7150
7151 /* Disable BMC-to-OS Watchdog Enable */
7152 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
7246 wr32(E1000_DMACR, reg); 7153 wr32(E1000_DMACR, reg);
7247 7154
7248 /* 7155 /*
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
new file mode 100644
index 000000000000..d5ee7fa50723
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -0,0 +1,385 @@
1/*
2 * PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580
3 *
4 * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20#include <linux/module.h>
21#include <linux/device.h>
22#include <linux/pci.h>
23
24#include "igb.h"
25
26#define INCVALUE_MASK 0x7fffffff
27#define ISGN 0x80000000
28
29/*
30 * The 82580 timesync updates the system timer every 8ns by 8ns,
31 * and this update value cannot be reprogrammed.
32 *
33 * Neither the 82576 nor the 82580 offer registers wide enough to hold
34 * nanoseconds time values for very long. For the 82580, SYSTIM always
35 * counts nanoseconds, but the upper 24 bits are not availible. The
36 * frequency is adjusted by changing the 32 bit fractional nanoseconds
37 * register, TIMINCA.
38 *
39 * For the 82576, the SYSTIM register time unit is affect by the
40 * choice of the 24 bit TININCA:IV (incvalue) field. Five bits of this
41 * field are needed to provide the nominal 16 nanosecond period,
42 * leaving 19 bits for fractional nanoseconds.
43 *
44 * We scale the NIC clock cycle by a large factor so that relatively
45 * small clock corrections can be added or subtracted at each clock
46 * tick. The drawbacks of a large factor are a) that the clock
47 * register overflows more quickly (not such a big deal) and b) that
48 * the increment per tick has to fit into 24 bits. As a result we
49 * need to use a shift of 19 so we can fit a value of 16 into the
50 * TIMINCA register.
51 *
52 *
53 * SYSTIMH SYSTIML
54 * +--------------+ +---+---+------+
55 * 82576 | 32 | | 8 | 5 | 19 |
56 * +--------------+ +---+---+------+
57 * \________ 45 bits _______/ fract
58 *
59 * +----------+---+ +--------------+
60 * 82580 | 24 | 8 | | 32 |
61 * +----------+---+ +--------------+
62 * reserved \______ 40 bits _____/
63 *
64 *
65 * The 45 bit 82576 SYSTIM overflows every
66 * 2^45 * 10^-9 / 3600 = 9.77 hours.
67 *
68 * The 40 bit 82580 SYSTIM overflows every
69 * 2^40 * 10^-9 / 60 = 18.3 minutes.
70 */
71
72#define IGB_OVERFLOW_PERIOD (HZ * 60 * 9)
73#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
74#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
75#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
76#define IGB_NBITS_82580 40
77
78/*
79 * SYSTIM read access for the 82576
80 */
81
82static cycle_t igb_82576_systim_read(const struct cyclecounter *cc)
83{
84 u64 val;
85 u32 lo, hi;
86 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
87 struct e1000_hw *hw = &igb->hw;
88
89 lo = rd32(E1000_SYSTIML);
90 hi = rd32(E1000_SYSTIMH);
91
92 val = ((u64) hi) << 32;
93 val |= lo;
94
95 return val;
96}
97
98/*
99 * SYSTIM read access for the 82580
100 */
101
102static cycle_t igb_82580_systim_read(const struct cyclecounter *cc)
103{
104 u64 val;
105 u32 lo, hi, jk;
106 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
107 struct e1000_hw *hw = &igb->hw;
108
109 /*
110 * The timestamp latches on lowest register read. For the 82580
111 * the lowest register is SYSTIMR instead of SYSTIML. However we only
112 * need to provide nanosecond resolution, so we just ignore it.
113 */
114 jk = rd32(E1000_SYSTIMR);
115 lo = rd32(E1000_SYSTIML);
116 hi = rd32(E1000_SYSTIMH);
117
118 val = ((u64) hi) << 32;
119 val |= lo;
120
121 return val;
122}
123
124/*
125 * PTP clock operations
126 */
127
128static int ptp_82576_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
129{
130 u64 rate;
131 u32 incvalue;
132 int neg_adj = 0;
133 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
134 struct e1000_hw *hw = &igb->hw;
135
136 if (ppb < 0) {
137 neg_adj = 1;
138 ppb = -ppb;
139 }
140 rate = ppb;
141 rate <<= 14;
142 rate = div_u64(rate, 1953125);
143
144 incvalue = 16 << IGB_82576_TSYNC_SHIFT;
145
146 if (neg_adj)
147 incvalue -= rate;
148 else
149 incvalue += rate;
150
151 wr32(E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK));
152
153 return 0;
154}
155
156static int ptp_82580_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
157{
158 u64 rate;
159 u32 inca;
160 int neg_adj = 0;
161 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
162 struct e1000_hw *hw = &igb->hw;
163
164 if (ppb < 0) {
165 neg_adj = 1;
166 ppb = -ppb;
167 }
168 rate = ppb;
169 rate <<= 26;
170 rate = div_u64(rate, 1953125);
171
172 inca = rate & INCVALUE_MASK;
173 if (neg_adj)
174 inca |= ISGN;
175
176 wr32(E1000_TIMINCA, inca);
177
178 return 0;
179}
180
181static int igb_adjtime(struct ptp_clock_info *ptp, s64 delta)
182{
183 s64 now;
184 unsigned long flags;
185 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
186
187 spin_lock_irqsave(&igb->tmreg_lock, flags);
188
189 now = timecounter_read(&igb->tc);
190 now += delta;
191 timecounter_init(&igb->tc, &igb->cc, now);
192
193 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
194
195 return 0;
196}
197
198static int igb_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
199{
200 u64 ns;
201 u32 remainder;
202 unsigned long flags;
203 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
204
205 spin_lock_irqsave(&igb->tmreg_lock, flags);
206
207 ns = timecounter_read(&igb->tc);
208
209 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
210
211 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
212 ts->tv_nsec = remainder;
213
214 return 0;
215}
216
217static int igb_settime(struct ptp_clock_info *ptp, const struct timespec *ts)
218{
219 u64 ns;
220 unsigned long flags;
221 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
222
223 ns = ts->tv_sec * 1000000000ULL;
224 ns += ts->tv_nsec;
225
226 spin_lock_irqsave(&igb->tmreg_lock, flags);
227
228 timecounter_init(&igb->tc, &igb->cc, ns);
229
230 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
231
232 return 0;
233}
234
235static int ptp_82576_enable(struct ptp_clock_info *ptp,
236 struct ptp_clock_request *rq, int on)
237{
238 return -EOPNOTSUPP;
239}
240
241static int ptp_82580_enable(struct ptp_clock_info *ptp,
242 struct ptp_clock_request *rq, int on)
243{
244 return -EOPNOTSUPP;
245}
246
247static void igb_overflow_check(struct work_struct *work)
248{
249 struct timespec ts;
250 struct igb_adapter *igb =
251 container_of(work, struct igb_adapter, overflow_work.work);
252
253 igb_gettime(&igb->caps, &ts);
254
255 pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
256
257 schedule_delayed_work(&igb->overflow_work, IGB_OVERFLOW_PERIOD);
258}
259
260void igb_ptp_init(struct igb_adapter *adapter)
261{
262 struct e1000_hw *hw = &adapter->hw;
263
264 switch (hw->mac.type) {
265 case e1000_i210:
266 case e1000_i211:
267 case e1000_i350:
268 case e1000_82580:
269 adapter->caps.owner = THIS_MODULE;
270 strcpy(adapter->caps.name, "igb-82580");
271 adapter->caps.max_adj = 62499999;
272 adapter->caps.n_ext_ts = 0;
273 adapter->caps.pps = 0;
274 adapter->caps.adjfreq = ptp_82580_adjfreq;
275 adapter->caps.adjtime = igb_adjtime;
276 adapter->caps.gettime = igb_gettime;
277 adapter->caps.settime = igb_settime;
278 adapter->caps.enable = ptp_82580_enable;
279 adapter->cc.read = igb_82580_systim_read;
280 adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
281 adapter->cc.mult = 1;
282 adapter->cc.shift = 0;
283 /* Enable the timer functions by clearing bit 31. */
284 wr32(E1000_TSAUXC, 0x0);
285 break;
286
287 case e1000_82576:
288 adapter->caps.owner = THIS_MODULE;
289 strcpy(adapter->caps.name, "igb-82576");
290 adapter->caps.max_adj = 1000000000;
291 adapter->caps.n_ext_ts = 0;
292 adapter->caps.pps = 0;
293 adapter->caps.adjfreq = ptp_82576_adjfreq;
294 adapter->caps.adjtime = igb_adjtime;
295 adapter->caps.gettime = igb_gettime;
296 adapter->caps.settime = igb_settime;
297 adapter->caps.enable = ptp_82576_enable;
298 adapter->cc.read = igb_82576_systim_read;
299 adapter->cc.mask = CLOCKSOURCE_MASK(64);
300 adapter->cc.mult = 1;
301 adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
302 /* Dial the nominal frequency. */
303 wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
304 break;
305
306 default:
307 adapter->ptp_clock = NULL;
308 return;
309 }
310
311 wrfl();
312
313 timecounter_init(&adapter->tc, &adapter->cc,
314 ktime_to_ns(ktime_get_real()));
315
316 INIT_DELAYED_WORK(&adapter->overflow_work, igb_overflow_check);
317
318 spin_lock_init(&adapter->tmreg_lock);
319
320 schedule_delayed_work(&adapter->overflow_work, IGB_OVERFLOW_PERIOD);
321
322 adapter->ptp_clock = ptp_clock_register(&adapter->caps);
323 if (IS_ERR(adapter->ptp_clock)) {
324 adapter->ptp_clock = NULL;
325 dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
326 } else
327 dev_info(&adapter->pdev->dev, "added PHC on %s\n",
328 adapter->netdev->name);
329}
330
331void igb_ptp_remove(struct igb_adapter *adapter)
332{
333 cancel_delayed_work_sync(&adapter->overflow_work);
334
335 if (adapter->ptp_clock) {
336 ptp_clock_unregister(adapter->ptp_clock);
337 dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
338 adapter->netdev->name);
339 }
340}
341
342/**
343 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
344 * @adapter: board private structure
345 * @hwtstamps: timestamp structure to update
346 * @systim: unsigned 64bit system time value.
347 *
348 * We need to convert the system time value stored in the RX/TXSTMP registers
349 * into a hwtstamp which can be used by the upper level timestamping functions.
350 *
351 * The 'tmreg_lock' spinlock is used to protect the consistency of the
352 * system time value. This is needed because reading the 64 bit time
353 * value involves reading two (or three) 32 bit registers. The first
354 * read latches the value. Ditto for writing.
355 *
356 * In addition, here have extended the system time with an overflow
357 * counter in software.
358 **/
359void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
360 struct skb_shared_hwtstamps *hwtstamps,
361 u64 systim)
362{
363 u64 ns;
364 unsigned long flags;
365
366 switch (adapter->hw.mac.type) {
367 case e1000_i210:
368 case e1000_i211:
369 case e1000_i350:
370 case e1000_82580:
371 case e1000_82576:
372 break;
373 default:
374 return;
375 }
376
377 spin_lock_irqsave(&adapter->tmreg_lock, flags);
378
379 ns = timecounter_cyc2time(&adapter->tc, systim);
380
381 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
382
383 memset(hwtstamps, 0, sizeof(*hwtstamps));
384 hwtstamps->hwtstamp = ns_to_ktime(ns);
385}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index d61ca2a732f0..8ec74b07f940 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2731,14 +2731,14 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
2731 netdev->addr_len); 2731 netdev->addr_len);
2732 } 2732 }
2733 2733
2734 if (!is_valid_ether_addr(netdev->perm_addr)) { 2734 if (!is_valid_ether_addr(netdev->dev_addr)) {
2735 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", 2735 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
2736 netdev->dev_addr); 2736 netdev->dev_addr);
2737 err = -EIO; 2737 err = -EIO;
2738 goto err_hw_init; 2738 goto err_hw_init;
2739 } 2739 }
2740 2740
2741 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 2741 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2742 2742
2743 setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, 2743 setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
2744 (unsigned long) adapter); 2744 (unsigned long) adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 8be1d1b2132e..0bdf06bc5c49 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -34,9 +34,11 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ 36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
37 ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o 37 ixgbe_mbx.o ixgbe_x540.o ixgbe_sysfs.o ixgbe_lib.o
38 38
39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ 39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o 40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o
41 41
42ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o
43
42ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o 44ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 74e192107f9a..3ef3c5284e52 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -36,6 +36,12 @@
36#include <linux/aer.h> 36#include <linux/aer.h>
37#include <linux/if_vlan.h> 37#include <linux/if_vlan.h>
38 38
39#ifdef CONFIG_IXGBE_PTP
40#include <linux/clocksource.h>
41#include <linux/net_tstamp.h>
42#include <linux/ptp_clock_kernel.h>
43#endif /* CONFIG_IXGBE_PTP */
44
39#include "ixgbe_type.h" 45#include "ixgbe_type.h"
40#include "ixgbe_common.h" 46#include "ixgbe_common.h"
41#include "ixgbe_dcb.h" 47#include "ixgbe_dcb.h"
@@ -96,6 +102,7 @@
96#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5) 102#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5)
97#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6) 103#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
98#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7) 104#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
105#define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8)
99#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 106#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
100#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 107#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
101#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 108#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -331,6 +338,26 @@ struct ixgbe_q_vector {
331 /* for dynamic allocation of rings associated with this q_vector */ 338 /* for dynamic allocation of rings associated with this q_vector */
332 struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; 339 struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
333}; 340};
341#ifdef CONFIG_IXGBE_HWMON
342
343#define IXGBE_HWMON_TYPE_LOC 0
344#define IXGBE_HWMON_TYPE_TEMP 1
345#define IXGBE_HWMON_TYPE_CAUTION 2
346#define IXGBE_HWMON_TYPE_MAX 3
347
348struct hwmon_attr {
349 struct device_attribute dev_attr;
350 struct ixgbe_hw *hw;
351 struct ixgbe_thermal_diode_data *sensor;
352 char name[12];
353};
354
355struct hwmon_buff {
356 struct device *device;
357 struct hwmon_attr *hwmon_list;
358 unsigned int n_hwmon;
359};
360#endif /* CONFIG_IXGBE_HWMON */
334 361
335/* 362/*
336 * microsecond values for various ITR rates shifted by 2 to fit itr register 363 * microsecond values for various ITR rates shifted by 2 to fit itr register
@@ -438,6 +465,8 @@ struct ixgbe_adapter {
438#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) 465#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
439#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) 466#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
440#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) 467#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
468#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED (u32)(1 << 10)
469#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11)
441 470
442 /* Tx fast path data */ 471 /* Tx fast path data */
443 int num_tx_queues; 472 int num_tx_queues;
@@ -525,6 +554,17 @@ struct ixgbe_adapter {
525 u32 interrupt_event; 554 u32 interrupt_event;
526 u32 led_reg; 555 u32 led_reg;
527 556
557#ifdef CONFIG_IXGBE_PTP
558 struct ptp_clock *ptp_clock;
559 struct ptp_clock_info ptp_caps;
560 unsigned long last_overflow_check;
561 spinlock_t tmreg_lock;
562 struct cyclecounter cc;
563 struct timecounter tc;
564 u32 base_incval;
565 u32 cycle_speed;
566#endif /* CONFIG_IXGBE_PTP */
567
528 /* SR-IOV */ 568 /* SR-IOV */
529 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); 569 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
530 unsigned int num_vfs; 570 unsigned int num_vfs;
@@ -535,6 +575,10 @@ struct ixgbe_adapter {
535 575
536 u32 timer_event_accumulator; 576 u32 timer_event_accumulator;
537 u32 vferr_refcount; 577 u32 vferr_refcount;
578 struct kobject *info_kobj;
579#ifdef CONFIG_IXGBE_HWMON
580 struct hwmon_buff ixgbe_hwmon_buff;
581#endif /* CONFIG_IXGBE_HWMON */
538}; 582};
539 583
540struct ixgbe_fdir_filter { 584struct ixgbe_fdir_filter {
@@ -574,9 +618,6 @@ extern struct ixgbe_info ixgbe_82599_info;
574extern struct ixgbe_info ixgbe_X540_info; 618extern struct ixgbe_info ixgbe_X540_info;
575#ifdef CONFIG_IXGBE_DCB 619#ifdef CONFIG_IXGBE_DCB
576extern const struct dcbnl_rtnl_ops dcbnl_ops; 620extern const struct dcbnl_rtnl_ops dcbnl_ops;
577extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
578 struct ixgbe_dcb_config *dst_dcb_cfg,
579 int tc_max);
580#endif 621#endif
581 622
582extern char ixgbe_driver_name[]; 623extern char ixgbe_driver_name[];
@@ -600,6 +641,8 @@ extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
600 struct ixgbe_ring *); 641 struct ixgbe_ring *);
601extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 642extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
602extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); 643extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
644extern int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
645 u16 subdevice_id);
603extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); 646extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
604extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, 647extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
605 struct ixgbe_adapter *, 648 struct ixgbe_adapter *,
@@ -629,10 +672,15 @@ extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
629 union ixgbe_atr_input *mask); 672 union ixgbe_atr_input *mask);
630extern void ixgbe_set_rx_mode(struct net_device *netdev); 673extern void ixgbe_set_rx_mode(struct net_device *netdev);
631#ifdef CONFIG_IXGBE_DCB 674#ifdef CONFIG_IXGBE_DCB
675extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
632extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); 676extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
633#endif 677#endif
634extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); 678extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
635extern void ixgbe_do_reset(struct net_device *netdev); 679extern void ixgbe_do_reset(struct net_device *netdev);
680#ifdef CONFIG_IXGBE_HWMON
681extern void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
682extern int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
683#endif /* CONFIG_IXGBE_HWMON */
636#ifdef IXGBE_FCOE 684#ifdef IXGBE_FCOE
637extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); 685extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
638extern int ixgbe_fso(struct ixgbe_ring *tx_ring, 686extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
@@ -663,4 +711,18 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
663 return netdev_get_tx_queue(ring->netdev, ring->queue_index); 711 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
664} 712}
665 713
714#ifdef CONFIG_IXGBE_PTP
715extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
716extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
717extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
718extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
719 struct sk_buff *skb);
720extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
721 struct sk_buff *skb);
722extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
723 struct ifreq *ifr, int cmd);
724extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
725extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
726#endif /* CONFIG_IXGBE_PTP */
727
666#endif /* _IXGBE_H_ */ 728#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 85d2e2c4ce4a..42537336110c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -91,29 +91,6 @@ out:
91 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); 91 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
92} 92}
93 93
94/**
95 * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
96 * @hw: pointer to hardware structure
97 *
98 * Read PCIe configuration space, and get the MSI-X vector count from
99 * the capabilities table.
100 **/
101static u16 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
102{
103 struct ixgbe_adapter *adapter = hw->back;
104 u16 msix_count;
105 pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82598_CAPS,
106 &msix_count);
107 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
108
109 /* MSI-X count is zero-based in HW, so increment to give proper value */
110 msix_count++;
111
112 return msix_count;
113}
114
115/**
116 */
117static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) 94static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
118{ 95{
119 struct ixgbe_mac_info *mac = &hw->mac; 96 struct ixgbe_mac_info *mac = &hw->mac;
@@ -126,7 +103,7 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
126 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; 103 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
127 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; 104 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
128 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; 105 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
129 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw); 106 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
130 107
131 return 0; 108 return 0;
132} 109}
@@ -347,24 +324,33 @@ out:
347/** 324/**
348 * ixgbe_fc_enable_82598 - Enable flow control 325 * ixgbe_fc_enable_82598 - Enable flow control
349 * @hw: pointer to hardware structure 326 * @hw: pointer to hardware structure
350 * @packetbuf_num: packet buffer number (0-7)
351 * 327 *
352 * Enable flow control according to the current settings. 328 * Enable flow control according to the current settings.
353 **/ 329 **/
354static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) 330static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
355{ 331{
356 s32 ret_val = 0; 332 s32 ret_val = 0;
357 u32 fctrl_reg; 333 u32 fctrl_reg;
358 u32 rmcs_reg; 334 u32 rmcs_reg;
359 u32 reg; 335 u32 reg;
336 u32 fcrtl, fcrth;
360 u32 link_speed = 0; 337 u32 link_speed = 0;
338 int i;
361 bool link_up; 339 bool link_up;
362 340
363#ifdef CONFIG_DCB 341 /*
364 if (hw->fc.requested_mode == ixgbe_fc_pfc) 342 * Validate the water mark configuration for packet buffer 0. Zero
343 * water marks indicate that the packet buffer was not configured
344 * and the watermarks for packet buffer 0 should always be configured.
345 */
346 if (!hw->fc.low_water ||
347 !hw->fc.high_water[0] ||
348 !hw->fc.pause_time) {
349 hw_dbg(hw, "Invalid water mark configuration\n");
350 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
365 goto out; 351 goto out;
352 }
366 353
367#endif /* CONFIG_DCB */
368 /* 354 /*
369 * On 82598 having Rx FC on causes resets while doing 1G 355 * On 82598 having Rx FC on causes resets while doing 1G
370 * so if it's on turn it off once we know link_speed. For 356 * so if it's on turn it off once we know link_speed. For
@@ -386,9 +372,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
386 } 372 }
387 373
388 /* Negotiate the fc mode to use */ 374 /* Negotiate the fc mode to use */
389 ret_val = ixgbe_fc_autoneg(hw); 375 ixgbe_fc_autoneg(hw);
390 if (ret_val == IXGBE_ERR_FLOW_CONTROL)
391 goto out;
392 376
393 /* Disable any previous flow control settings */ 377 /* Disable any previous flow control settings */
394 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); 378 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
@@ -405,9 +389,6 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
405 * 2: Tx flow control is enabled (we can send pause frames but 389 * 2: Tx flow control is enabled (we can send pause frames but
406 * we do not support receiving pause frames). 390 * we do not support receiving pause frames).
407 * 3: Both Rx and Tx flow control (symmetric) are enabled. 391 * 3: Both Rx and Tx flow control (symmetric) are enabled.
408#ifdef CONFIG_DCB
409 * 4: Priority Flow Control is enabled.
410#endif
411 * other: Invalid. 392 * other: Invalid.
412 */ 393 */
413 switch (hw->fc.current_mode) { 394 switch (hw->fc.current_mode) {
@@ -440,11 +421,6 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
440 fctrl_reg |= IXGBE_FCTRL_RFCE; 421 fctrl_reg |= IXGBE_FCTRL_RFCE;
441 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 422 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
442 break; 423 break;
443#ifdef CONFIG_DCB
444 case ixgbe_fc_pfc:
445 goto out;
446 break;
447#endif /* CONFIG_DCB */
448 default: 424 default:
449 hw_dbg(hw, "Flow control param set incorrectly\n"); 425 hw_dbg(hw, "Flow control param set incorrectly\n");
450 ret_val = IXGBE_ERR_CONFIG; 426 ret_val = IXGBE_ERR_CONFIG;
@@ -457,29 +433,29 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
457 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); 433 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
458 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); 434 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
459 435
460 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 436 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
461 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
462 reg = hw->fc.low_water << 6;
463 if (hw->fc.send_xon)
464 reg |= IXGBE_FCRTL_XONE;
465
466 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
467 437
468 reg = hw->fc.high_water[packetbuf_num] << 6; 438 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
469 reg |= IXGBE_FCRTH_FCEN; 439 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
440 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
441 hw->fc.high_water[i]) {
442 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
443 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
444 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
445 } else {
446 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
447 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
448 }
470 449
471 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
472 } 450 }
473 451
474 /* Configure pause time (2 TCs per register) */ 452 /* Configure pause time (2 TCs per register) */
475 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); 453 reg = hw->fc.pause_time * 0x00010001;
476 if ((packetbuf_num & 1) == 0) 454 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
477 reg = (reg & 0xFFFF0000) | hw->fc.pause_time; 455 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
478 else
479 reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
480 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
481 456
482 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); 457 /* Configure flow control refresh threshold value */
458 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
483 459
484out: 460out:
485 return ret_val; 461 return ret_val;
@@ -1300,6 +1276,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1300 .set_fw_drv_ver = NULL, 1276 .set_fw_drv_ver = NULL,
1301 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, 1277 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
1302 .release_swfw_sync = &ixgbe_release_swfw_sync, 1278 .release_swfw_sync = &ixgbe_release_swfw_sync,
1279 .get_thermal_sensor_data = NULL,
1280 .init_thermal_sensor_thresh = NULL,
1303}; 1281};
1304 1282
1305static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1283static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 9c14685358eb..dee64d2703f0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -2119,6 +2119,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2119 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, 2119 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
2120 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, 2120 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
2121 .release_swfw_sync = &ixgbe_release_swfw_sync, 2121 .release_swfw_sync = &ixgbe_release_swfw_sync,
2122 .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
2123 .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
2122 2124
2123}; 2125};
2124 2126
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 49aa41fe7b84..77ac41feb0fe 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -47,13 +47,6 @@ static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 47static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
48 48
49static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 49static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
50static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
51static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
52static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
53static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
54static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
55 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
56static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
57static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); 50static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
58static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 51static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
59 u16 words, u16 *data); 52 u16 words, u16 *data);
@@ -64,6 +57,172 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
64static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); 57static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
65 58
66/** 59/**
60 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
61 * control
62 * @hw: pointer to hardware structure
63 *
64 * There are several phys that do not support autoneg flow control. This
65 * function check the device id to see if the associated phy supports
66 * autoneg flow control.
67 **/
68static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
69{
70
71 switch (hw->device_id) {
72 case IXGBE_DEV_ID_X540T:
73 return 0;
74 case IXGBE_DEV_ID_82599_T3_LOM:
75 return 0;
76 default:
77 return IXGBE_ERR_FC_NOT_SUPPORTED;
78 }
79}
80
81/**
82 * ixgbe_setup_fc - Set up flow control
83 * @hw: pointer to hardware structure
84 *
85 * Called at init time to set up flow control.
86 **/
87static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
88{
89 s32 ret_val = 0;
90 u32 reg = 0, reg_bp = 0;
91 u16 reg_cu = 0;
92
93 /*
94 * Validate the requested mode. Strict IEEE mode does not allow
95 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
96 */
97 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
98 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
99 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
100 goto out;
101 }
102
103 /*
104 * 10gig parts do not have a word in the EEPROM to determine the
105 * default flow control setting, so we explicitly set it to full.
106 */
107 if (hw->fc.requested_mode == ixgbe_fc_default)
108 hw->fc.requested_mode = ixgbe_fc_full;
109
110 /*
111 * Set up the 1G and 10G flow control advertisement registers so the
112 * HW will be able to do fc autoneg once the cable is plugged in. If
113 * we link at 10G, the 1G advertisement is harmless and vice versa.
114 */
115 switch (hw->phy.media_type) {
116 case ixgbe_media_type_fiber:
117 case ixgbe_media_type_backplane:
118 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
119 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
120 break;
121 case ixgbe_media_type_copper:
122 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
123 MDIO_MMD_AN, &reg_cu);
124 break;
125 default:
126 break;
127 }
128
129 /*
130 * The possible values of fc.requested_mode are:
131 * 0: Flow control is completely disabled
132 * 1: Rx flow control is enabled (we can receive pause frames,
133 * but not send pause frames).
134 * 2: Tx flow control is enabled (we can send pause frames but
135 * we do not support receiving pause frames).
136 * 3: Both Rx and Tx flow control (symmetric) are enabled.
137 * other: Invalid.
138 */
139 switch (hw->fc.requested_mode) {
140 case ixgbe_fc_none:
141 /* Flow control completely disabled by software override. */
142 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
143 if (hw->phy.media_type == ixgbe_media_type_backplane)
144 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
145 IXGBE_AUTOC_ASM_PAUSE);
146 else if (hw->phy.media_type == ixgbe_media_type_copper)
147 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
148 break;
149 case ixgbe_fc_tx_pause:
150 /*
151 * Tx Flow control is enabled, and Rx Flow control is
152 * disabled by software override.
153 */
154 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
155 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
156 if (hw->phy.media_type == ixgbe_media_type_backplane) {
157 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
158 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
159 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
160 reg_cu |= IXGBE_TAF_ASM_PAUSE;
161 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
162 }
163 break;
164 case ixgbe_fc_rx_pause:
165 /*
166 * Rx Flow control is enabled and Tx Flow control is
167 * disabled by software override. Since there really
168 * isn't a way to advertise that we are capable of RX
169 * Pause ONLY, we will advertise that we support both
170 * symmetric and asymmetric Rx PAUSE, as such we fall
171 * through to the fc_full statement. Later, we will
172 * disable the adapter's ability to send PAUSE frames.
173 */
174 case ixgbe_fc_full:
175 /* Flow control (both Rx and Tx) is enabled by SW override. */
176 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
177 if (hw->phy.media_type == ixgbe_media_type_backplane)
178 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
179 IXGBE_AUTOC_ASM_PAUSE;
180 else if (hw->phy.media_type == ixgbe_media_type_copper)
181 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
182 break;
183 default:
184 hw_dbg(hw, "Flow control param set incorrectly\n");
185 ret_val = IXGBE_ERR_CONFIG;
186 goto out;
187 break;
188 }
189
190 if (hw->mac.type != ixgbe_mac_X540) {
191 /*
192 * Enable auto-negotiation between the MAC & PHY;
193 * the MAC will advertise clause 37 flow control.
194 */
195 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
196 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
197
198 /* Disable AN timeout */
199 if (hw->fc.strict_ieee)
200 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
201
202 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
203 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
204 }
205
206 /*
207 * AUTOC restart handles negotiation of 1G and 10G on backplane
208 * and copper. There is no need to set the PCS1GCTL register.
209 *
210 */
211 if (hw->phy.media_type == ixgbe_media_type_backplane) {
212 reg_bp |= IXGBE_AUTOC_AN_RESTART;
213 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
214 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
215 (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
216 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
217 MDIO_MMD_AN, reg_cu);
218 }
219
220 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
221out:
222 return ret_val;
223}
224
225/**
67 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 226 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
68 * @hw: pointer to hardware structure 227 * @hw: pointer to hardware structure
69 * 228 *
@@ -95,7 +254,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
95 IXGBE_WRITE_FLUSH(hw); 254 IXGBE_WRITE_FLUSH(hw);
96 255
97 /* Setup flow control */ 256 /* Setup flow control */
98 ixgbe_setup_fc(hw, 0); 257 ixgbe_setup_fc(hw);
99 258
100 /* Clear adapter stopped flag */ 259 /* Clear adapter stopped flag */
101 hw->adapter_stopped = false; 260 hw->adapter_stopped = false;
@@ -1923,30 +2082,36 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
1923/** 2082/**
1924 * ixgbe_fc_enable_generic - Enable flow control 2083 * ixgbe_fc_enable_generic - Enable flow control
1925 * @hw: pointer to hardware structure 2084 * @hw: pointer to hardware structure
1926 * @packetbuf_num: packet buffer number (0-7)
1927 * 2085 *
1928 * Enable flow control according to the current settings. 2086 * Enable flow control according to the current settings.
1929 **/ 2087 **/
1930s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) 2088s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
1931{ 2089{
1932 s32 ret_val = 0; 2090 s32 ret_val = 0;
1933 u32 mflcn_reg, fccfg_reg; 2091 u32 mflcn_reg, fccfg_reg;
1934 u32 reg; 2092 u32 reg;
1935 u32 fcrtl, fcrth; 2093 u32 fcrtl, fcrth;
2094 int i;
1936 2095
1937#ifdef CONFIG_DCB 2096 /*
1938 if (hw->fc.requested_mode == ixgbe_fc_pfc) 2097 * Validate the water mark configuration for packet buffer 0. Zero
2098 * water marks indicate that the packet buffer was not configured
2099 * and the watermarks for packet buffer 0 should always be configured.
2100 */
2101 if (!hw->fc.low_water ||
2102 !hw->fc.high_water[0] ||
2103 !hw->fc.pause_time) {
2104 hw_dbg(hw, "Invalid water mark configuration\n");
2105 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
1939 goto out; 2106 goto out;
2107 }
1940 2108
1941#endif /* CONFIG_DCB */
1942 /* Negotiate the fc mode to use */ 2109 /* Negotiate the fc mode to use */
1943 ret_val = ixgbe_fc_autoneg(hw); 2110 ixgbe_fc_autoneg(hw);
1944 if (ret_val == IXGBE_ERR_FLOW_CONTROL)
1945 goto out;
1946 2111
1947 /* Disable any previous flow control settings */ 2112 /* Disable any previous flow control settings */
1948 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2113 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
1949 mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE); 2114 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
1950 2115
1951 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 2116 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
1952 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 2117 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
@@ -1959,9 +2124,6 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1959 * 2: Tx flow control is enabled (we can send pause frames but 2124 * 2: Tx flow control is enabled (we can send pause frames but
1960 * we do not support receiving pause frames). 2125 * we do not support receiving pause frames).
1961 * 3: Both Rx and Tx flow control (symmetric) are enabled. 2126 * 3: Both Rx and Tx flow control (symmetric) are enabled.
1962#ifdef CONFIG_DCB
1963 * 4: Priority Flow Control is enabled.
1964#endif
1965 * other: Invalid. 2127 * other: Invalid.
1966 */ 2128 */
1967 switch (hw->fc.current_mode) { 2129 switch (hw->fc.current_mode) {
@@ -1994,11 +2156,6 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1994 mflcn_reg |= IXGBE_MFLCN_RFCE; 2156 mflcn_reg |= IXGBE_MFLCN_RFCE;
1995 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2157 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
1996 break; 2158 break;
1997#ifdef CONFIG_DCB
1998 case ixgbe_fc_pfc:
1999 goto out;
2000 break;
2001#endif /* CONFIG_DCB */
2002 default: 2159 default:
2003 hw_dbg(hw, "Flow control param set incorrectly\n"); 2160 hw_dbg(hw, "Flow control param set incorrectly\n");
2004 ret_val = IXGBE_ERR_CONFIG; 2161 ret_val = IXGBE_ERR_CONFIG;
@@ -2011,100 +2168,86 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
2011 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2168 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2012 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2169 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2013 2170
2014 fcrtl = hw->fc.low_water << 10; 2171 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
2015 2172
2016 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 2173 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2017 fcrth = hw->fc.high_water[packetbuf_num] << 10; 2174 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
2018 fcrth |= IXGBE_FCRTH_FCEN; 2175 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2019 if (hw->fc.send_xon) 2176 hw->fc.high_water[i]) {
2020 fcrtl |= IXGBE_FCRTL_XONE; 2177 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2021 } else { 2178 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2022 /* 2179 } else {
2023 * If Tx flow control is disabled, set our high water mark 2180 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2024 * to Rx FIFO size minus 32 in order prevent Tx switch 2181 /*
2025 * loopback from stalling on DMA. 2182 * In order to prevent Tx hangs when the internal Tx
2026 */ 2183 * switch is enabled we must set the high water mark
2027 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)) - 32; 2184 * to the maximum FCRTH value. This allows the Tx
2028 } 2185 * switch to function even under heavy Rx workloads.
2186 */
2187 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2188 }
2029 2189
2030 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth); 2190 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2031 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl); 2191 }
2032 2192
2033 /* Configure pause time (2 TCs per register) */ 2193 /* Configure pause time (2 TCs per register) */
2034 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); 2194 reg = hw->fc.pause_time * 0x00010001;
2035 if ((packetbuf_num & 1) == 0) 2195 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
2036 reg = (reg & 0xFFFF0000) | hw->fc.pause_time; 2196 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2037 else
2038 reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
2039 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
2040 2197
2041 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); 2198 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2042 2199
2043out: 2200out:
2044 return ret_val; 2201 return ret_val;
2045} 2202}
2046 2203
2047/** 2204/**
2048 * ixgbe_fc_autoneg - Configure flow control 2205 * ixgbe_negotiate_fc - Negotiate flow control
2049 * @hw: pointer to hardware structure 2206 * @hw: pointer to hardware structure
2207 * @adv_reg: flow control advertised settings
2208 * @lp_reg: link partner's flow control settings
2209 * @adv_sym: symmetric pause bit in advertisement
2210 * @adv_asm: asymmetric pause bit in advertisement
2211 * @lp_sym: symmetric pause bit in link partner advertisement
2212 * @lp_asm: asymmetric pause bit in link partner advertisement
2050 * 2213 *
2051 * Compares our advertised flow control capabilities to those advertised by 2214 * Find the intersection between advertised settings and link partner's
2052 * our link partner, and determines the proper flow control mode to use. 2215 * advertised settings
2053 **/ 2216 **/
2054s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) 2217static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2218 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2055{ 2219{
2056 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2220 if ((!(adv_reg)) || (!(lp_reg)))
2057 ixgbe_link_speed speed; 2221 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2058 bool link_up;
2059
2060 if (hw->fc.disable_fc_autoneg)
2061 goto out;
2062
2063 /*
2064 * AN should have completed when the cable was plugged in.
2065 * Look for reasons to bail out. Bail out if:
2066 * - FC autoneg is disabled, or if
2067 * - link is not up.
2068 *
2069 * Since we're being called from an LSC, link is already known to be up.
2070 * So use link_up_wait_to_complete=false.
2071 */
2072 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2073 if (!link_up) {
2074 ret_val = IXGBE_ERR_FLOW_CONTROL;
2075 goto out;
2076 }
2077
2078 switch (hw->phy.media_type) {
2079 /* Autoneg flow control on fiber adapters */
2080 case ixgbe_media_type_fiber:
2081 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2082 ret_val = ixgbe_fc_autoneg_fiber(hw);
2083 break;
2084
2085 /* Autoneg flow control on backplane adapters */
2086 case ixgbe_media_type_backplane:
2087 ret_val = ixgbe_fc_autoneg_backplane(hw);
2088 break;
2089
2090 /* Autoneg flow control on copper adapters */
2091 case ixgbe_media_type_copper:
2092 if (ixgbe_device_supports_autoneg_fc(hw) == 0)
2093 ret_val = ixgbe_fc_autoneg_copper(hw);
2094 break;
2095
2096 default:
2097 break;
2098 }
2099 2222
2100out: 2223 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2101 if (ret_val == 0) { 2224 /*
2102 hw->fc.fc_was_autonegged = true; 2225 * Now we need to check if the user selected Rx ONLY
2226 * of pause frames. In this case, we had to advertise
2227 * FULL flow control because we could not advertise RX
2228 * ONLY. Hence, we must now check to see if we need to
2229 * turn OFF the TRANSMISSION of PAUSE frames.
2230 */
2231 if (hw->fc.requested_mode == ixgbe_fc_full) {
2232 hw->fc.current_mode = ixgbe_fc_full;
2233 hw_dbg(hw, "Flow Control = FULL.\n");
2234 } else {
2235 hw->fc.current_mode = ixgbe_fc_rx_pause;
2236 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
2237 }
2238 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2239 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2240 hw->fc.current_mode = ixgbe_fc_tx_pause;
2241 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
2242 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2243 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2244 hw->fc.current_mode = ixgbe_fc_rx_pause;
2245 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
2103 } else { 2246 } else {
2104 hw->fc.fc_was_autonegged = false; 2247 hw->fc.current_mode = ixgbe_fc_none;
2105 hw->fc.current_mode = hw->fc.requested_mode; 2248 hw_dbg(hw, "Flow Control = NONE.\n");
2106 } 2249 }
2107 return ret_val; 2250 return 0;
2108} 2251}
2109 2252
2110/** 2253/**
@@ -2116,7 +2259,7 @@ out:
2116static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) 2259static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2117{ 2260{
2118 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 2261 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2119 s32 ret_val; 2262 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2120 2263
2121 /* 2264 /*
2122 * On multispeed fiber at 1g, bail out if 2265 * On multispeed fiber at 1g, bail out if
@@ -2126,10 +2269,8 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2126 2269
2127 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 2270 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2128 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 2271 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2129 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { 2272 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
2130 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2131 goto out; 2273 goto out;
2132 }
2133 2274
2134 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2275 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2135 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 2276 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
@@ -2153,7 +2294,7 @@ out:
2153static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) 2294static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2154{ 2295{
2155 u32 links2, anlp1_reg, autoc_reg, links; 2296 u32 links2, anlp1_reg, autoc_reg, links;
2156 s32 ret_val; 2297 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2157 2298
2158 /* 2299 /*
2159 * On backplane, bail out if 2300 * On backplane, bail out if
@@ -2161,21 +2302,13 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2161 * - we are 82599 and link partner is not AN enabled 2302 * - we are 82599 and link partner is not AN enabled
2162 */ 2303 */
2163 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 2304 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2164 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { 2305 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
2165 hw->fc.fc_was_autonegged = false;
2166 hw->fc.current_mode = hw->fc.requested_mode;
2167 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2168 goto out; 2306 goto out;
2169 }
2170 2307
2171 if (hw->mac.type == ixgbe_mac_82599EB) { 2308 if (hw->mac.type == ixgbe_mac_82599EB) {
2172 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 2309 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2173 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { 2310 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
2174 hw->fc.fc_was_autonegged = false;
2175 hw->fc.current_mode = hw->fc.requested_mode;
2176 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2177 goto out; 2311 goto out;
2178 }
2179 } 2312 }
2180 /* 2313 /*
2181 * Read the 10g AN autoc and LP ability registers and resolve 2314 * Read the 10g AN autoc and LP ability registers and resolve
@@ -2217,241 +2350,63 @@ static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2217} 2350}
2218 2351
2219/** 2352/**
2220 * ixgbe_negotiate_fc - Negotiate flow control 2353 * ixgbe_fc_autoneg - Configure flow control
2221 * @hw: pointer to hardware structure
2222 * @adv_reg: flow control advertised settings
2223 * @lp_reg: link partner's flow control settings
2224 * @adv_sym: symmetric pause bit in advertisement
2225 * @adv_asm: asymmetric pause bit in advertisement
2226 * @lp_sym: symmetric pause bit in link partner advertisement
2227 * @lp_asm: asymmetric pause bit in link partner advertisement
2228 *
2229 * Find the intersection between advertised settings and link partner's
2230 * advertised settings
2231 **/
2232static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2233 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2234{
2235 if ((!(adv_reg)) || (!(lp_reg)))
2236 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2237
2238 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2239 /*
2240 * Now we need to check if the user selected Rx ONLY
2241 * of pause frames. In this case, we had to advertise
2242 * FULL flow control because we could not advertise RX
2243 * ONLY. Hence, we must now check to see if we need to
2244 * turn OFF the TRANSMISSION of PAUSE frames.
2245 */
2246 if (hw->fc.requested_mode == ixgbe_fc_full) {
2247 hw->fc.current_mode = ixgbe_fc_full;
2248 hw_dbg(hw, "Flow Control = FULL.\n");
2249 } else {
2250 hw->fc.current_mode = ixgbe_fc_rx_pause;
2251 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
2252 }
2253 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2254 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2255 hw->fc.current_mode = ixgbe_fc_tx_pause;
2256 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
2257 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2258 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2259 hw->fc.current_mode = ixgbe_fc_rx_pause;
2260 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
2261 } else {
2262 hw->fc.current_mode = ixgbe_fc_none;
2263 hw_dbg(hw, "Flow Control = NONE.\n");
2264 }
2265 return 0;
2266}
2267
2268/**
2269 * ixgbe_setup_fc - Set up flow control
2270 * @hw: pointer to hardware structure 2354 * @hw: pointer to hardware structure
2271 * 2355 *
2272 * Called at init time to set up flow control. 2356 * Compares our advertised flow control capabilities to those advertised by
2357 * our link partner, and determines the proper flow control mode to use.
2273 **/ 2358 **/
2274static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) 2359void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2275{ 2360{
2276 s32 ret_val = 0; 2361 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2277 u32 reg = 0, reg_bp = 0; 2362 ixgbe_link_speed speed;
2278 u16 reg_cu = 0; 2363 bool link_up;
2279
2280#ifdef CONFIG_DCB
2281 if (hw->fc.requested_mode == ixgbe_fc_pfc) {
2282 hw->fc.current_mode = hw->fc.requested_mode;
2283 goto out;
2284 }
2285
2286#endif /* CONFIG_DCB */
2287 /* Validate the packetbuf configuration */
2288 if (packetbuf_num < 0 || packetbuf_num > 7) {
2289 hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
2290 "is 0-7\n", packetbuf_num);
2291 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2292 goto out;
2293 }
2294 2364
2295 /* 2365 /*
2296 * Validate the water mark configuration. Zero water marks are invalid 2366 * AN should have completed when the cable was plugged in.
2297 * because it causes the controller to just blast out fc packets. 2367 * Look for reasons to bail out. Bail out if:
2368 * - FC autoneg is disabled, or if
2369 * - link is not up.
2370 *
2371 * Since we're being called from an LSC, link is already known to be up.
2372 * So use link_up_wait_to_complete=false.
2298 */ 2373 */
2299 if (!hw->fc.low_water || 2374 if (hw->fc.disable_fc_autoneg)
2300 !hw->fc.high_water[packetbuf_num] ||
2301 !hw->fc.pause_time) {
2302 hw_dbg(hw, "Invalid water mark configuration\n");
2303 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2304 goto out; 2375 goto out;
2305 }
2306 2376
2307 /* 2377 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2308 * Validate the requested mode. Strict IEEE mode does not allow 2378 if (!link_up)
2309 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
2310 */
2311 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
2312 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict "
2313 "IEEE mode\n");
2314 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2315 goto out; 2379 goto out;
2316 }
2317
2318 /*
2319 * 10gig parts do not have a word in the EEPROM to determine the
2320 * default flow control setting, so we explicitly set it to full.
2321 */
2322 if (hw->fc.requested_mode == ixgbe_fc_default)
2323 hw->fc.requested_mode = ixgbe_fc_full;
2324
2325 /*
2326 * Set up the 1G and 10G flow control advertisement registers so the
2327 * HW will be able to do fc autoneg once the cable is plugged in. If
2328 * we link at 10G, the 1G advertisement is harmless and vice versa.
2329 */
2330 2380
2331 switch (hw->phy.media_type) { 2381 switch (hw->phy.media_type) {
2382 /* Autoneg flow control on fiber adapters */
2332 case ixgbe_media_type_fiber: 2383 case ixgbe_media_type_fiber:
2384 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2385 ret_val = ixgbe_fc_autoneg_fiber(hw);
2386 break;
2387
2388 /* Autoneg flow control on backplane adapters */
2333 case ixgbe_media_type_backplane: 2389 case ixgbe_media_type_backplane:
2334 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2390 ret_val = ixgbe_fc_autoneg_backplane(hw);
2335 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2336 break; 2391 break;
2337 2392
2393 /* Autoneg flow control on copper adapters */
2338 case ixgbe_media_type_copper: 2394 case ixgbe_media_type_copper:
2339 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, 2395 if (ixgbe_device_supports_autoneg_fc(hw) == 0)
2340 MDIO_MMD_AN, &reg_cu); 2396 ret_val = ixgbe_fc_autoneg_copper(hw);
2341 break; 2397 break;
2342 2398
2343 default: 2399 default:
2344 ;
2345 }
2346
2347 /*
2348 * The possible values of fc.requested_mode are:
2349 * 0: Flow control is completely disabled
2350 * 1: Rx flow control is enabled (we can receive pause frames,
2351 * but not send pause frames).
2352 * 2: Tx flow control is enabled (we can send pause frames but
2353 * we do not support receiving pause frames).
2354 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2355#ifdef CONFIG_DCB
2356 * 4: Priority Flow Control is enabled.
2357#endif
2358 * other: Invalid.
2359 */
2360 switch (hw->fc.requested_mode) {
2361 case ixgbe_fc_none:
2362 /* Flow control completely disabled by software override. */
2363 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2364 if (hw->phy.media_type == ixgbe_media_type_backplane)
2365 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
2366 IXGBE_AUTOC_ASM_PAUSE);
2367 else if (hw->phy.media_type == ixgbe_media_type_copper)
2368 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2369 break;
2370 case ixgbe_fc_rx_pause:
2371 /*
2372 * Rx Flow control is enabled and Tx Flow control is
2373 * disabled by software override. Since there really
2374 * isn't a way to advertise that we are capable of RX
2375 * Pause ONLY, we will advertise that we support both
2376 * symmetric and asymmetric Rx PAUSE. Later, we will
2377 * disable the adapter's ability to send PAUSE frames.
2378 */
2379 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2380 if (hw->phy.media_type == ixgbe_media_type_backplane)
2381 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2382 IXGBE_AUTOC_ASM_PAUSE);
2383 else if (hw->phy.media_type == ixgbe_media_type_copper)
2384 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2385 break;
2386 case ixgbe_fc_tx_pause:
2387 /*
2388 * Tx Flow control is enabled, and Rx Flow control is
2389 * disabled by software override.
2390 */
2391 reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
2392 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
2393 if (hw->phy.media_type == ixgbe_media_type_backplane) {
2394 reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
2395 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
2396 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
2397 reg_cu |= (IXGBE_TAF_ASM_PAUSE);
2398 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
2399 }
2400 break; 2400 break;
2401 case ixgbe_fc_full:
2402 /* Flow control (both Rx and Tx) is enabled by SW override. */
2403 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2404 if (hw->phy.media_type == ixgbe_media_type_backplane)
2405 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2406 IXGBE_AUTOC_ASM_PAUSE);
2407 else if (hw->phy.media_type == ixgbe_media_type_copper)
2408 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2409 break;
2410#ifdef CONFIG_DCB
2411 case ixgbe_fc_pfc:
2412 goto out;
2413 break;
2414#endif /* CONFIG_DCB */
2415 default:
2416 hw_dbg(hw, "Flow control param set incorrectly\n");
2417 ret_val = IXGBE_ERR_CONFIG;
2418 goto out;
2419 break;
2420 }
2421
2422 if (hw->mac.type != ixgbe_mac_X540) {
2423 /*
2424 * Enable auto-negotiation between the MAC & PHY;
2425 * the MAC will advertise clause 37 flow control.
2426 */
2427 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
2428 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
2429
2430 /* Disable AN timeout */
2431 if (hw->fc.strict_ieee)
2432 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
2433
2434 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
2435 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
2436 }
2437
2438 /*
2439 * AUTOC restart handles negotiation of 1G and 10G on backplane
2440 * and copper. There is no need to set the PCS1GCTL register.
2441 *
2442 */
2443 if (hw->phy.media_type == ixgbe_media_type_backplane) {
2444 reg_bp |= IXGBE_AUTOC_AN_RESTART;
2445 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
2446 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
2447 (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
2448 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
2449 MDIO_MMD_AN, reg_cu);
2450 } 2401 }
2451 2402
2452 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2453out: 2403out:
2454 return ret_val; 2404 if (ret_val == 0) {
2405 hw->fc.fc_was_autonegged = true;
2406 } else {
2407 hw->fc.fc_was_autonegged = false;
2408 hw->fc.current_mode = hw->fc.requested_mode;
2409 }
2455} 2410}
2456 2411
2457/** 2412/**
@@ -2606,7 +2561,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
2606 break; 2561 break;
2607 else 2562 else
2608 /* Use interrupt-safe sleep just in case */ 2563 /* Use interrupt-safe sleep just in case */
2609 udelay(10); 2564 udelay(1000);
2610 } 2565 }
2611 2566
2612 /* For informational purposes only */ 2567 /* For informational purposes only */
@@ -2783,17 +2738,36 @@ san_mac_addr_out:
2783 * Read PCIe configuration space, and get the MSI-X vector count from 2738 * Read PCIe configuration space, and get the MSI-X vector count from
2784 * the capabilities table. 2739 * the capabilities table.
2785 **/ 2740 **/
2786u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) 2741u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2787{ 2742{
2788 struct ixgbe_adapter *adapter = hw->back; 2743 struct ixgbe_adapter *adapter = hw->back;
2789 u16 msix_count; 2744 u16 msix_count = 1;
2790 pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS, 2745 u16 max_msix_count;
2791 &msix_count); 2746 u16 pcie_offset;
2747
2748 switch (hw->mac.type) {
2749 case ixgbe_mac_82598EB:
2750 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
2751 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
2752 break;
2753 case ixgbe_mac_82599EB:
2754 case ixgbe_mac_X540:
2755 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
2756 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
2757 break;
2758 default:
2759 return msix_count;
2760 }
2761
2762 pci_read_config_word(adapter->pdev, pcie_offset, &msix_count);
2792 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 2763 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2793 2764
2794 /* MSI-X count is zero-based in HW, so increment to give proper value */ 2765 /* MSI-X count is zero-based in HW */
2795 msix_count++; 2766 msix_count++;
2796 2767
2768 if (msix_count > max_msix_count)
2769 msix_count = max_msix_count;
2770
2797 return msix_count; 2771 return msix_count;
2798} 2772}
2799 2773
@@ -3203,28 +3177,6 @@ wwn_prefix_out:
3203} 3177}
3204 3178
3205/** 3179/**
3206 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
3207 * control
3208 * @hw: pointer to hardware structure
3209 *
3210 * There are several phys that do not support autoneg flow control. This
3211 * function check the device id to see if the associated phy supports
3212 * autoneg flow control.
3213 **/
3214static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
3215{
3216
3217 switch (hw->device_id) {
3218 case IXGBE_DEV_ID_X540T:
3219 return 0;
3220 case IXGBE_DEV_ID_82599_T3_LOM:
3221 return 0;
3222 default:
3223 return IXGBE_ERR_FC_NOT_SUPPORTED;
3224 }
3225}
3226
3227/**
3228 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 3180 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
3229 * @hw: pointer to hardware structure 3181 * @hw: pointer to hardware structure
3230 * @enable: enable or disable switch for anti-spoofing 3182 * @enable: enable or disable switch for anti-spoofing
@@ -3585,3 +3537,172 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
3585 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 3537 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3586 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 3538 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3587} 3539}
3540
3541static const u8 ixgbe_emc_temp_data[4] = {
3542 IXGBE_EMC_INTERNAL_DATA,
3543 IXGBE_EMC_DIODE1_DATA,
3544 IXGBE_EMC_DIODE2_DATA,
3545 IXGBE_EMC_DIODE3_DATA
3546};
3547static const u8 ixgbe_emc_therm_limit[4] = {
3548 IXGBE_EMC_INTERNAL_THERM_LIMIT,
3549 IXGBE_EMC_DIODE1_THERM_LIMIT,
3550 IXGBE_EMC_DIODE2_THERM_LIMIT,
3551 IXGBE_EMC_DIODE3_THERM_LIMIT
3552};
3553
3554/**
3555 * ixgbe_get_ets_data - Extracts the ETS bit data
3556 * @hw: pointer to hardware structure
3557 * @ets_cfg: extected ETS data
3558 * @ets_offset: offset of ETS data
3559 *
3560 * Returns error code.
3561 **/
3562static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
3563 u16 *ets_offset)
3564{
3565 s32 status = 0;
3566
3567 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset);
3568 if (status)
3569 goto out;
3570
3571 if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) {
3572 status = IXGBE_NOT_IMPLEMENTED;
3573 goto out;
3574 }
3575
3576 status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg);
3577 if (status)
3578 goto out;
3579
3580 if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) {
3581 status = IXGBE_NOT_IMPLEMENTED;
3582 goto out;
3583 }
3584
3585out:
3586 return status;
3587}
3588
3589/**
3590 * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
3591 * @hw: pointer to hardware structure
3592 *
3593 * Returns the thermal sensor data structure
3594 **/
3595s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
3596{
3597 s32 status = 0;
3598 u16 ets_offset;
3599 u16 ets_cfg;
3600 u16 ets_sensor;
3601 u8 num_sensors;
3602 u8 i;
3603 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
3604
3605 /* Only support thermal sensors attached to physical port 0 */
3606 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
3607 status = IXGBE_NOT_IMPLEMENTED;
3608 goto out;
3609 }
3610
3611 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
3612 if (status)
3613 goto out;
3614
3615 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
3616 if (num_sensors > IXGBE_MAX_SENSORS)
3617 num_sensors = IXGBE_MAX_SENSORS;
3618
3619 for (i = 0; i < num_sensors; i++) {
3620 u8 sensor_index;
3621 u8 sensor_location;
3622
3623 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
3624 &ets_sensor);
3625 if (status)
3626 goto out;
3627
3628 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
3629 IXGBE_ETS_DATA_INDEX_SHIFT);
3630 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
3631 IXGBE_ETS_DATA_LOC_SHIFT);
3632
3633 if (sensor_location != 0) {
3634 status = hw->phy.ops.read_i2c_byte(hw,
3635 ixgbe_emc_temp_data[sensor_index],
3636 IXGBE_I2C_THERMAL_SENSOR_ADDR,
3637 &data->sensor[i].temp);
3638 if (status)
3639 goto out;
3640 }
3641 }
3642out:
3643 return status;
3644}
3645
3646/**
3647 * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
3648 * @hw: pointer to hardware structure
3649 *
3650 * Inits the thermal sensor thresholds according to the NVM map
3651 * and save off the threshold and location values into mac.thermal_sensor_data
3652 **/
3653s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
3654{
3655 s32 status = 0;
3656 u16 ets_offset;
3657 u16 ets_cfg;
3658 u16 ets_sensor;
3659 u8 low_thresh_delta;
3660 u8 num_sensors;
3661 u8 therm_limit;
3662 u8 i;
3663 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
3664
3665 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
3666
3667 /* Only support thermal sensors attached to physical port 0 */
3668 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
3669 status = IXGBE_NOT_IMPLEMENTED;
3670 goto out;
3671 }
3672
3673 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
3674 if (status)
3675 goto out;
3676
3677 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
3678 IXGBE_ETS_LTHRES_DELTA_SHIFT);
3679 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
3680 if (num_sensors > IXGBE_MAX_SENSORS)
3681 num_sensors = IXGBE_MAX_SENSORS;
3682
3683 for (i = 0; i < num_sensors; i++) {
3684 u8 sensor_index;
3685 u8 sensor_location;
3686
3687 hw->eeprom.ops.read(hw, (ets_offset + 1 + i), &ets_sensor);
3688 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
3689 IXGBE_ETS_DATA_INDEX_SHIFT);
3690 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
3691 IXGBE_ETS_DATA_LOC_SHIFT);
3692 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
3693
3694 hw->phy.ops.write_i2c_byte(hw,
3695 ixgbe_emc_therm_limit[sensor_index],
3696 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
3697
3698 if (sensor_location == 0)
3699 continue;
3700
3701 data->sensor[i].location = sensor_location;
3702 data->sensor[i].caution_thresh = therm_limit;
3703 data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta;
3704 }
3705out:
3706 return status;
3707}
3708
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 204f06235b45..6222fdb3d3f1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -31,7 +31,7 @@
31#include "ixgbe_type.h" 31#include "ixgbe_type.h"
32#include "ixgbe.h" 32#include "ixgbe.h"
33 33
34u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); 34u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
35s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); 35s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
36s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); 36s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
37s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); 37s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
@@ -77,8 +77,8 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
77s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw); 77s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
78s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); 78s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
79s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); 79s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
80s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num); 80s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
81s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw); 81void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
82 82
83s32 ixgbe_validate_mac_addr(u8 *mac_addr); 83s32 ixgbe_validate_mac_addr(u8 *mac_addr);
84s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); 84s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
@@ -107,6 +107,19 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
107void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, 107void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
108 u32 headroom, int strategy); 108 u32 headroom, int strategy);
109 109
110#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
111#define IXGBE_EMC_INTERNAL_DATA 0x00
112#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
113#define IXGBE_EMC_DIODE1_DATA 0x01
114#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19
115#define IXGBE_EMC_DIODE2_DATA 0x23
116#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A
117#define IXGBE_EMC_DIODE3_DATA 0x2A
118#define IXGBE_EMC_DIODE3_THERM_LIMIT 0x30
119
120s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
121s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
122
110#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) 123#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
111 124
112#ifndef writeq 125#ifndef writeq
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index d3695edfcb8b..87592b458c9c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -191,53 +191,46 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
191 */ 191 */
192s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) 192s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
193{ 193{
194 u32 reg; 194 u32 fcrtl, reg;
195 u8 i; 195 u8 i;
196 196
197 if (pfc_en) { 197 /* Enable Transmit Priority Flow Control */
198 /* Enable Transmit Priority Flow Control */ 198 reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
199 reg = IXGBE_READ_REG(hw, IXGBE_RMCS); 199 reg &= ~IXGBE_RMCS_TFCE_802_3X;
200 reg &= ~IXGBE_RMCS_TFCE_802_3X; 200 reg |= IXGBE_RMCS_TFCE_PRIORITY;
201 /* correct the reporting of our flow control status */ 201 IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
202 reg |= IXGBE_RMCS_TFCE_PRIORITY;
203 IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
204
205 /* Enable Receive Priority Flow Control */
206 reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
207 reg &= ~IXGBE_FCTRL_RFCE;
208 reg |= IXGBE_FCTRL_RPFCE;
209 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
210
211 /* Configure pause time */
212 for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
213 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
214 202
215 /* Configure flow control refresh threshold value */ 203 /* Enable Receive Priority Flow Control */
216 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400); 204 reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
217 } 205 reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE);
218 206
219 /* 207 if (pfc_en)
220 * Configure flow control thresholds and enable priority flow control 208 reg |= IXGBE_FCTRL_RPFCE;
221 * for each traffic class.
222 */
223 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
224 int enabled = pfc_en & (1 << i);
225 209
226 reg = hw->fc.low_water << 10; 210 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
227 211
228 if (enabled == pfc_enabled_tx || 212 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
229 enabled == pfc_enabled_full) 213 /* Configure PFC Tx thresholds per TC */
230 reg |= IXGBE_FCRTL_XONE; 214 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
215 if (!(pfc_en & (1 << i))) {
216 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
217 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
218 continue;
219 }
220
221 reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
222 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
223 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
224 }
231 225
232 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg); 226 /* Configure pause time */
227 reg = hw->fc.pause_time * 0x00010001;
228 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
229 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
233 230
234 reg = hw->fc.high_water[i] << 10; 231 /* Configure flow control refresh threshold value */
235 if (enabled == pfc_enabled_tx || 232 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
236 enabled == pfc_enabled_full)
237 reg |= IXGBE_FCRTH_FCEN;
238 233
239 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
240 }
241 234
242 return 0; 235 return 0;
243} 236}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 888a419dc3d9..4eac80d01857 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -211,24 +211,42 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
211 */ 211 */
212s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) 212s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
213{ 213{
214 u32 i, j, reg; 214 u32 i, j, fcrtl, reg;
215 u8 max_tc = 0; 215 u8 max_tc = 0;
216 216
217 for (i = 0; i < MAX_USER_PRIORITY; i++) 217 /* Enable Transmit Priority Flow Control */
218 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
219
220 /* Enable Receive Priority Flow Control */
221 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
222 reg |= IXGBE_MFLCN_DPF;
223
224 /*
225 * X540 supports per TC Rx priority flow control. So
226 * clear all TCs and only enable those that should be
227 * enabled.
228 */
229 reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
230
231 if (hw->mac.type == ixgbe_mac_X540)
232 reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
233
234 if (pfc_en)
235 reg |= IXGBE_MFLCN_RPFCE;
236
237 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
238
239 for (i = 0; i < MAX_USER_PRIORITY; i++) {
218 if (prio_tc[i] > max_tc) 240 if (prio_tc[i] > max_tc)
219 max_tc = prio_tc[i]; 241 max_tc = prio_tc[i];
242 }
243
244 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
220 245
221 /* Configure PFC Tx thresholds per TC */ 246 /* Configure PFC Tx thresholds per TC */
222 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 247 for (i = 0; i <= max_tc; i++) {
223 int enabled = 0; 248 int enabled = 0;
224 249
225 if (i > max_tc) {
226 reg = 0;
227 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
228 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
229 continue;
230 }
231
232 for (j = 0; j < MAX_USER_PRIORITY; j++) { 250 for (j = 0; j < MAX_USER_PRIORITY; j++) {
233 if ((prio_tc[j] == i) && (pfc_en & (1 << j))) { 251 if ((prio_tc[j] == i) && (pfc_en & (1 << j))) {
234 enabled = 1; 252 enabled = 1;
@@ -236,61 +254,29 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
236 } 254 }
237 } 255 }
238 256
239 reg = hw->fc.low_water << 10; 257 if (enabled) {
240 258 reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
241 if (enabled) 259 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
242 reg |= IXGBE_FCRTL_XONE; 260 } else {
243 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); 261 reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
262 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
263 }
244 264
245 reg = hw->fc.high_water[i] << 10;
246 if (enabled)
247 reg |= IXGBE_FCRTH_FCEN;
248 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); 265 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
249 } 266 }
250 267
251 if (pfc_en) { 268 for (; i < MAX_TRAFFIC_CLASS; i++) {
252 /* Configure pause time (2 TCs per register) */ 269 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
253 reg = hw->fc.pause_time | (hw->fc.pause_time << 16); 270 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
254 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) 271 }
255 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
256
257 /* Configure flow control refresh threshold value */
258 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
259
260
261 reg = IXGBE_FCCFG_TFCE_PRIORITY;
262 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
263 /*
264 * Enable Receive PFC
265 * 82599 will always honor XOFF frames we receive when
266 * we are in PFC mode however X540 only honors enabled
267 * traffic classes.
268 */
269 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
270 reg &= ~IXGBE_MFLCN_RFCE;
271 reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
272
273 if (hw->mac.type == ixgbe_mac_X540) {
274 reg &= ~IXGBE_MFLCN_RPFCE_MASK;
275 reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
276 }
277 272
278 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); 273 /* Configure pause time (2 TCs per register) */
279 274 reg = hw->fc.pause_time * 0x00010001;
280 } else { 275 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
281 /* X540 devices have a RX bit that should be cleared 276 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
282 * if PFC is disabled on all TCs but PFC features is
283 * enabled.
284 */
285 if (hw->mac.type == ixgbe_mac_X540) {
286 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
287 reg &= ~IXGBE_MFLCN_RPFCE_MASK;
288 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
289 }
290 277
291 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) 278 /* Configure flow control refresh threshold value */
292 hw->mac.ops.fc_enable(hw, i); 279 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
293 }
294 280
295 return 0; 281 return 0;
296} 282}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 652e4b09546d..5164a21b13ca 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -44,18 +44,26 @@
44#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ 44#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */
45#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ 45#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */
46 46
47int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *scfg, 47static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
48 struct ixgbe_dcb_config *dcfg, int tc_max)
49{ 48{
49 struct ixgbe_dcb_config *scfg = &adapter->temp_dcb_cfg;
50 struct ixgbe_dcb_config *dcfg = &adapter->dcb_cfg;
50 struct tc_configuration *src = NULL; 51 struct tc_configuration *src = NULL;
51 struct tc_configuration *dst = NULL; 52 struct tc_configuration *dst = NULL;
52 int i, j; 53 int i, j;
53 int tx = DCB_TX_CONFIG; 54 int tx = DCB_TX_CONFIG;
54 int rx = DCB_RX_CONFIG; 55 int rx = DCB_RX_CONFIG;
55 int changes = 0; 56 int changes = 0;
57#ifdef IXGBE_FCOE
58 struct dcb_app app = {
59 .selector = DCB_APP_IDTYPE_ETHTYPE,
60 .protocol = ETH_P_FCOE,
61 };
62 u8 up = dcb_getapp(adapter->netdev, &app);
56 63
57 if (!scfg || !dcfg) 64 if (up && !(up & (1 << adapter->fcoe.up)))
58 return changes; 65 changes |= BIT_APP_UPCHG;
66#endif
59 67
60 for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { 68 for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
61 src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0]; 69 src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0];
@@ -330,60 +338,20 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev)
330static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) 338static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
331{ 339{
332 struct ixgbe_adapter *adapter = netdev_priv(netdev); 340 struct ixgbe_adapter *adapter = netdev_priv(netdev);
341 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
342 struct ixgbe_hw *hw = &adapter->hw;
333 int ret = DCB_NO_HW_CHG; 343 int ret = DCB_NO_HW_CHG;
334 int i; 344 int i;
335#ifdef IXGBE_FCOE
336 struct dcb_app app = {
337 .selector = DCB_APP_IDTYPE_ETHTYPE,
338 .protocol = ETH_P_FCOE,
339 };
340 u8 up;
341
342 /* In IEEE mode, use the IEEE Ethertype selector value */
343 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) {
344 app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
345 up = dcb_ieee_getapp_mask(netdev, &app);
346 } else {
347 up = dcb_getapp(netdev, &app);
348 }
349#endif
350 345
351 /* Fail command if not in CEE mode */ 346 /* Fail command if not in CEE mode */
352 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 347 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
353 return ret; 348 return ret;
354 349
355 adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, 350 adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter,
356 &adapter->dcb_cfg,
357 MAX_TRAFFIC_CLASS); 351 MAX_TRAFFIC_CLASS);
358 if (!adapter->dcb_set_bitmap) 352 if (!adapter->dcb_set_bitmap)
359 return ret; 353 return ret;
360 354
361 if (adapter->dcb_cfg.pfc_mode_enable) {
362 switch (adapter->hw.mac.type) {
363 case ixgbe_mac_82599EB:
364 case ixgbe_mac_X540:
365 if (adapter->hw.fc.current_mode != ixgbe_fc_pfc)
366 adapter->last_lfc_mode =
367 adapter->hw.fc.current_mode;
368 break;
369 default:
370 break;
371 }
372 adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
373 } else {
374 switch (adapter->hw.mac.type) {
375 case ixgbe_mac_82598EB:
376 adapter->hw.fc.requested_mode = ixgbe_fc_none;
377 break;
378 case ixgbe_mac_82599EB:
379 case ixgbe_mac_X540:
380 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
381 break;
382 default:
383 break;
384 }
385 }
386
387 if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) { 355 if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
388 u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS]; 356 u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
389 u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS]; 357 u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
@@ -396,23 +364,19 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
396 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); 364 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
397#endif 365#endif
398 366
399 ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg, 367 ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame,
400 max_frame, DCB_TX_CONFIG); 368 DCB_TX_CONFIG);
401 ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg, 369 ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame,
402 max_frame, DCB_RX_CONFIG); 370 DCB_RX_CONFIG);
403 371
404 ixgbe_dcb_unpack_refill(&adapter->dcb_cfg, 372 ixgbe_dcb_unpack_refill(dcb_cfg, DCB_TX_CONFIG, refill);
405 DCB_TX_CONFIG, refill); 373 ixgbe_dcb_unpack_max(dcb_cfg, max);
406 ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max); 374 ixgbe_dcb_unpack_bwgid(dcb_cfg, DCB_TX_CONFIG, bwg_id);
407 ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg, 375 ixgbe_dcb_unpack_prio(dcb_cfg, DCB_TX_CONFIG, prio_type);
408 DCB_TX_CONFIG, bwg_id); 376 ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc);
409 ixgbe_dcb_unpack_prio(&adapter->dcb_cfg,
410 DCB_TX_CONFIG, prio_type);
411 ixgbe_dcb_unpack_map(&adapter->dcb_cfg,
412 DCB_TX_CONFIG, prio_tc);
413 377
414 ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max, 378 ixgbe_dcb_hw_ets_config(hw, refill, max, bwg_id,
415 bwg_id, prio_type, prio_tc); 379 prio_type, prio_tc);
416 380
417 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 381 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
418 netdev_set_prio_tc_map(netdev, i, prio_tc[i]); 382 netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
@@ -421,27 +385,34 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
421 } 385 }
422 386
423 if (adapter->dcb_set_bitmap & BIT_PFC) { 387 if (adapter->dcb_set_bitmap & BIT_PFC) {
424 u8 pfc_en; 388 if (dcb_cfg->pfc_mode_enable) {
425 u8 prio_tc[MAX_USER_PRIORITY]; 389 u8 pfc_en;
390 u8 prio_tc[MAX_USER_PRIORITY];
391
392 ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc);
393 ixgbe_dcb_unpack_pfc(dcb_cfg, &pfc_en);
394 ixgbe_dcb_hw_pfc_config(hw, pfc_en, prio_tc);
395 } else {
396 hw->mac.ops.fc_enable(hw);
397 }
426 398
427 ixgbe_dcb_unpack_map(&adapter->dcb_cfg, 399 ixgbe_set_rx_drop_en(adapter);
428 DCB_TX_CONFIG, prio_tc);
429 ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
430 ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en, prio_tc);
431 if (ret != DCB_HW_CHG_RST)
432 ret = DCB_HW_CHG;
433 }
434 400
435 if (adapter->dcb_cfg.pfc_mode_enable) 401 ret = DCB_HW_CHG;
436 adapter->hw.fc.current_mode = ixgbe_fc_pfc; 402 }
437 403
438#ifdef IXGBE_FCOE 404#ifdef IXGBE_FCOE
439 /* Reprogam FCoE hardware offloads when the traffic class 405 /* Reprogam FCoE hardware offloads when the traffic class
440 * FCoE is using changes. This happens if the APP info 406 * FCoE is using changes. This happens if the APP info
441 * changes or the up2tc mapping is updated. 407 * changes or the up2tc mapping is updated.
442 */ 408 */
443 if ((up && !(up & (1 << adapter->fcoe.up))) || 409 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
444 (adapter->dcb_set_bitmap & BIT_APP_UPCHG)) { 410 struct dcb_app app = {
411 .selector = DCB_APP_IDTYPE_ETHTYPE,
412 .protocol = ETH_P_FCOE,
413 };
414 u8 up = dcb_getapp(netdev, &app);
415
445 adapter->fcoe.up = ffs(up) - 1; 416 adapter->fcoe.up = ffs(up) - 1;
446 ixgbe_dcbnl_devreset(netdev); 417 ixgbe_dcbnl_devreset(netdev);
447 ret = DCB_HW_CHG_RST; 418 ret = DCB_HW_CHG_RST;
@@ -650,7 +621,9 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
650 struct ieee_pfc *pfc) 621 struct ieee_pfc *pfc)
651{ 622{
652 struct ixgbe_adapter *adapter = netdev_priv(dev); 623 struct ixgbe_adapter *adapter = netdev_priv(dev);
624 struct ixgbe_hw *hw = &adapter->hw;
653 u8 *prio_tc; 625 u8 *prio_tc;
626 int err;
654 627
655 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) 628 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
656 return -EINVAL; 629 return -EINVAL;
@@ -664,7 +637,16 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
664 637
665 prio_tc = adapter->ixgbe_ieee_ets->prio_tc; 638 prio_tc = adapter->ixgbe_ieee_ets->prio_tc;
666 memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc)); 639 memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
667 return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc); 640
641 /* Enable link flow control parameters if PFC is disabled */
642 if (pfc->pfc_en)
643 err = ixgbe_dcb_hw_pfc_config(hw, pfc->pfc_en, prio_tc);
644 else
645 err = hw->mac.ops.fc_enable(hw);
646
647 ixgbe_set_rx_drop_en(adapter);
648
649 return err;
668} 650}
669 651
670static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, 652static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 31a2bf76a346..3178f1ec3711 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -391,11 +391,6 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
391 } else if (hw->fc.current_mode == ixgbe_fc_full) { 391 } else if (hw->fc.current_mode == ixgbe_fc_full) {
392 pause->rx_pause = 1; 392 pause->rx_pause = 1;
393 pause->tx_pause = 1; 393 pause->tx_pause = 1;
394#ifdef CONFIG_DCB
395 } else if (hw->fc.current_mode == ixgbe_fc_pfc) {
396 pause->rx_pause = 0;
397 pause->tx_pause = 0;
398#endif
399 } 394 }
400} 395}
401 396
@@ -404,21 +399,14 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
404{ 399{
405 struct ixgbe_adapter *adapter = netdev_priv(netdev); 400 struct ixgbe_adapter *adapter = netdev_priv(netdev);
406 struct ixgbe_hw *hw = &adapter->hw; 401 struct ixgbe_hw *hw = &adapter->hw;
407 struct ixgbe_fc_info fc; 402 struct ixgbe_fc_info fc = hw->fc;
408 403
409#ifdef CONFIG_DCB 404 /* 82598 does no support link flow control with DCB enabled */
410 if (adapter->dcb_cfg.pfc_mode_enable || 405 if ((hw->mac.type == ixgbe_mac_82598EB) &&
411 ((hw->mac.type == ixgbe_mac_82598EB) && 406 (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
412 (adapter->flags & IXGBE_FLAG_DCB_ENABLED)))
413 return -EINVAL; 407 return -EINVAL;
414 408
415#endif 409 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
416 fc = hw->fc;
417
418 if (pause->autoneg != AUTONEG_ENABLE)
419 fc.disable_fc_autoneg = true;
420 else
421 fc.disable_fc_autoneg = false;
422 410
423 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) 411 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
424 fc.requested_mode = ixgbe_fc_full; 412 fc.requested_mode = ixgbe_fc_full;
@@ -426,14 +414,8 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
426 fc.requested_mode = ixgbe_fc_rx_pause; 414 fc.requested_mode = ixgbe_fc_rx_pause;
427 else if (!pause->rx_pause && pause->tx_pause) 415 else if (!pause->rx_pause && pause->tx_pause)
428 fc.requested_mode = ixgbe_fc_tx_pause; 416 fc.requested_mode = ixgbe_fc_tx_pause;
429 else if (!pause->rx_pause && !pause->tx_pause)
430 fc.requested_mode = ixgbe_fc_none;
431 else 417 else
432 return -EINVAL; 418 fc.requested_mode = ixgbe_fc_none;
433
434#ifdef CONFIG_DCB
435 adapter->last_lfc_mode = fc.requested_mode;
436#endif
437 419
438 /* if the thing changed then we'll update and use new autoneg */ 420 /* if the thing changed then we'll update and use new autoneg */
439 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { 421 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
@@ -1780,6 +1762,8 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1780 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); 1762 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1781 } 1763 }
1782 1764
1765 netdev_tx_reset_queue(txring_txq(tx_ring));
1766
1783 /* re-map buffers to ring, store next to clean values */ 1767 /* re-map buffers to ring, store next to clean values */
1784 ixgbe_alloc_rx_buffers(rx_ring, count); 1768 ixgbe_alloc_rx_buffers(rx_ring, count);
1785 rx_ring->next_to_clean = rx_ntc; 1769 rx_ring->next_to_clean = rx_ntc;
@@ -1969,53 +1953,12 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
1969 struct ethtool_wolinfo *wol) 1953 struct ethtool_wolinfo *wol)
1970{ 1954{
1971 struct ixgbe_hw *hw = &adapter->hw; 1955 struct ixgbe_hw *hw = &adapter->hw;
1972 int retval = 1; 1956 int retval = 0;
1973 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
1974
1975 /* WOL not supported except for the following */
1976 switch(hw->device_id) {
1977 case IXGBE_DEV_ID_82599_SFP:
1978 /* Only these subdevices could supports WOL */
1979 switch (hw->subsystem_device_id) {
1980 case IXGBE_SUBDEV_ID_82599_560FLR:
1981 /* only support first port */
1982 if (hw->bus.func != 0) {
1983 wol->supported = 0;
1984 break;
1985 }
1986 case IXGBE_SUBDEV_ID_82599_SFP:
1987 retval = 0;
1988 break;
1989 default:
1990 wol->supported = 0;
1991 break;
1992 }
1993 break;
1994 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
1995 /* All except this subdevice support WOL */
1996 if (hw->subsystem_device_id ==
1997 IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
1998 wol->supported = 0;
1999 break;
2000 }
2001 retval = 0;
2002 break;
2003 case IXGBE_DEV_ID_82599_KX4:
2004 retval = 0;
2005 break;
2006 case IXGBE_DEV_ID_X540T:
2007 /* check eeprom to see if enabled wol */
2008 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
2009 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
2010 (hw->bus.func == 0))) {
2011 retval = 0;
2012 break;
2013 }
2014 1957
2015 /* All others not supported */ 1958 /* WOL not supported for all devices */
2016 wol->supported = 0; 1959 if (!ixgbe_wol_supported(adapter, hw->device_id,
2017 break; 1960 hw->subsystem_device_id)) {
2018 default: 1961 retval = 1;
2019 wol->supported = 0; 1962 wol->supported = 0;
2020 } 1963 }
2021 1964
@@ -2753,6 +2696,46 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2753 return ret; 2696 return ret;
2754} 2697}
2755 2698
2699static int ixgbe_get_ts_info(struct net_device *dev,
2700 struct ethtool_ts_info *info)
2701{
2702 struct ixgbe_adapter *adapter = netdev_priv(dev);
2703
2704 switch (adapter->hw.mac.type) {
2705#ifdef CONFIG_IXGBE_PTP
2706 case ixgbe_mac_X540:
2707 case ixgbe_mac_82599EB:
2708 info->so_timestamping =
2709 SOF_TIMESTAMPING_TX_HARDWARE |
2710 SOF_TIMESTAMPING_RX_HARDWARE |
2711 SOF_TIMESTAMPING_RAW_HARDWARE;
2712
2713 if (adapter->ptp_clock)
2714 info->phc_index = ptp_clock_index(adapter->ptp_clock);
2715 else
2716 info->phc_index = -1;
2717
2718 info->tx_types =
2719 (1 << HWTSTAMP_TX_OFF) |
2720 (1 << HWTSTAMP_TX_ON);
2721
2722 info->rx_filters =
2723 (1 << HWTSTAMP_FILTER_NONE) |
2724 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
2725 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
2726 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
2727 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
2728 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
2729 (1 << HWTSTAMP_FILTER_SOME);
2730 break;
2731#endif /* CONFIG_IXGBE_PTP */
2732 default:
2733 return ethtool_op_get_ts_info(dev, info);
2734 break;
2735 }
2736 return 0;
2737}
2738
2756static const struct ethtool_ops ixgbe_ethtool_ops = { 2739static const struct ethtool_ops ixgbe_ethtool_ops = {
2757 .get_settings = ixgbe_get_settings, 2740 .get_settings = ixgbe_get_settings,
2758 .set_settings = ixgbe_set_settings, 2741 .set_settings = ixgbe_set_settings,
@@ -2781,6 +2764,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
2781 .set_coalesce = ixgbe_set_coalesce, 2764 .set_coalesce = ixgbe_set_coalesce,
2782 .get_rxnfc = ixgbe_get_rxnfc, 2765 .get_rxnfc = ixgbe_get_rxnfc,
2783 .set_rxnfc = ixgbe_set_rxnfc, 2766 .set_rxnfc = ixgbe_set_rxnfc,
2767 .get_ts_info = ixgbe_get_ts_info,
2784}; 2768};
2785 2769
2786void ixgbe_set_ethtool_ops(struct net_device *netdev) 2770void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 77ea4b716535..bc07933d67da 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -437,6 +437,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
437 */ 437 */
438 if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && 438 if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
439 (fctl & FC_FC_END_SEQ)) { 439 (fctl & FC_FC_END_SEQ)) {
440 skb_linearize(skb);
440 crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); 441 crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
441 crc->fcoe_eof = FC_EOF_T; 442 crc->fcoe_eof = FC_EOF_T;
442 } 443 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 027d7a75be39..af1a5314b494 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -523,11 +523,17 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring,
523/** 523/**
524 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector 524 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
525 * @adapter: board private structure to initialize 525 * @adapter: board private structure to initialize
526 * @v_count: q_vectors allocated on adapter, used for ring interleaving
526 * @v_idx: index of vector in adapter struct 527 * @v_idx: index of vector in adapter struct
528 * @txr_count: total number of Tx rings to allocate
529 * @txr_idx: index of first Tx ring to allocate
530 * @rxr_count: total number of Rx rings to allocate
531 * @rxr_idx: index of first Rx ring to allocate
527 * 532 *
528 * We allocate one q_vector. If allocation fails we return -ENOMEM. 533 * We allocate one q_vector. If allocation fails we return -ENOMEM.
529 **/ 534 **/
530static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx, 535static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
536 int v_count, int v_idx,
531 int txr_count, int txr_idx, 537 int txr_count, int txr_idx,
532 int rxr_count, int rxr_idx) 538 int rxr_count, int rxr_idx)
533{ 539{
@@ -598,7 +604,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
598 604
599 /* update count and index */ 605 /* update count and index */
600 txr_count--; 606 txr_count--;
601 txr_idx++; 607 txr_idx += v_count;
602 608
603 /* push pointer to next ring */ 609 /* push pointer to next ring */
604 ring++; 610 ring++;
@@ -622,6 +628,16 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
622 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 628 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
623 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); 629 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
624 630
631#ifdef IXGBE_FCOE
632 if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
633 struct ixgbe_ring_feature *f;
634 f = &adapter->ring_feature[RING_F_FCOE];
635 if ((rxr_idx >= f->mask) &&
636 (rxr_idx < f->mask + f->indices))
637 set_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state);
638 }
639
640#endif /* IXGBE_FCOE */
625 /* apply Rx specific ring traits */ 641 /* apply Rx specific ring traits */
626 ring->count = adapter->rx_ring_count; 642 ring->count = adapter->rx_ring_count;
627 ring->queue_index = rxr_idx; 643 ring->queue_index = rxr_idx;
@@ -631,7 +647,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
631 647
632 /* update count and index */ 648 /* update count and index */
633 rxr_count--; 649 rxr_count--;
634 rxr_idx++; 650 rxr_idx += v_count;
635 651
636 /* push pointer to next ring */ 652 /* push pointer to next ring */
637 ring++; 653 ring++;
@@ -690,24 +706,23 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
690 q_vectors = 1; 706 q_vectors = 1;
691 707
692 if (q_vectors >= (rxr_remaining + txr_remaining)) { 708 if (q_vectors >= (rxr_remaining + txr_remaining)) {
693 for (; rxr_remaining; v_idx++, q_vectors--) { 709 for (; rxr_remaining; v_idx++) {
694 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); 710 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
695 err = ixgbe_alloc_q_vector(adapter, v_idx, 711 0, 0, 1, rxr_idx);
696 0, 0, rqpv, rxr_idx);
697 712
698 if (err) 713 if (err)
699 goto err_out; 714 goto err_out;
700 715
701 /* update counts and index */ 716 /* update counts and index */
702 rxr_remaining -= rqpv; 717 rxr_remaining--;
703 rxr_idx += rqpv; 718 rxr_idx++;
704 } 719 }
705 } 720 }
706 721
707 for (; q_vectors; v_idx++, q_vectors--) { 722 for (; v_idx < q_vectors; v_idx++) {
708 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); 723 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
709 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors); 724 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
710 err = ixgbe_alloc_q_vector(adapter, v_idx, 725 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
711 tqpv, txr_idx, 726 tqpv, txr_idx,
712 rqpv, rxr_idx); 727 rqpv, rxr_idx);
713 728
@@ -716,9 +731,9 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
716 731
717 /* update counts and index */ 732 /* update counts and index */
718 rxr_remaining -= rqpv; 733 rxr_remaining -= rqpv;
719 rxr_idx += rqpv;
720 txr_remaining -= tqpv; 734 txr_remaining -= tqpv;
721 txr_idx += tqpv; 735 rxr_idx++;
736 txr_idx++;
722 } 737 }
723 738
724 return 0; 739 return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 3e26b1f9ac75..bf20457ea23a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -63,8 +63,8 @@ static char ixgbe_default_device_descr[] =
63 "Intel(R) 10 Gigabit Network Connection"; 63 "Intel(R) 10 Gigabit Network Connection";
64#endif 64#endif
65#define MAJ 3 65#define MAJ 3
66#define MIN 8 66#define MIN 9
67#define BUILD 21 67#define BUILD 15
68#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 68#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
69 __stringify(BUILD) "-k" 69 __stringify(BUILD) "-k"
70const char ixgbe_driver_version[] = DRV_VERSION; 70const char ixgbe_driver_version[] = DRV_VERSION;
@@ -133,7 +133,7 @@ static struct notifier_block dca_notifier = {
133static unsigned int max_vfs; 133static unsigned int max_vfs;
134module_param(max_vfs, uint, 0); 134module_param(max_vfs, uint, 0);
135MODULE_PARM_DESC(max_vfs, 135MODULE_PARM_DESC(max_vfs,
136 "Maximum number of virtual functions to allocate per physical function"); 136 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63");
137#endif /* CONFIG_PCI_IOV */ 137#endif /* CONFIG_PCI_IOV */
138 138
139static unsigned int allow_unsupported_sfp; 139static unsigned int allow_unsupported_sfp;
@@ -610,35 +610,50 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
610 /* tx_buffer must be completely set up in the transmit path */ 610 /* tx_buffer must be completely set up in the transmit path */
611} 611}
612 612
613static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) 613static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
614{ 614{
615 struct ixgbe_hw *hw = &adapter->hw; 615 struct ixgbe_hw *hw = &adapter->hw;
616 struct ixgbe_hw_stats *hwstats = &adapter->stats; 616 struct ixgbe_hw_stats *hwstats = &adapter->stats;
617 u32 data = 0;
618 u32 xoff[8] = {0};
619 int i; 617 int i;
618 u32 data;
620 619
621 if ((hw->fc.current_mode == ixgbe_fc_full) || 620 if ((hw->fc.current_mode != ixgbe_fc_full) &&
622 (hw->fc.current_mode == ixgbe_fc_rx_pause)) { 621 (hw->fc.current_mode != ixgbe_fc_rx_pause))
623 switch (hw->mac.type) { 622 return;
624 case ixgbe_mac_82598EB:
625 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
626 break;
627 default:
628 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
629 }
630 hwstats->lxoffrxc += data;
631 623
632 /* refill credits (no tx hang) if we received xoff */ 624 switch (hw->mac.type) {
633 if (!data) 625 case ixgbe_mac_82598EB:
634 return; 626 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
627 break;
628 default:
629 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
630 }
631 hwstats->lxoffrxc += data;
635 632
636 for (i = 0; i < adapter->num_tx_queues; i++) 633 /* refill credits (no tx hang) if we received xoff */
637 clear_bit(__IXGBE_HANG_CHECK_ARMED, 634 if (!data)
638 &adapter->tx_ring[i]->state);
639 return; 635 return;
640 } else if (!(adapter->dcb_cfg.pfc_mode_enable)) 636
637 for (i = 0; i < adapter->num_tx_queues; i++)
638 clear_bit(__IXGBE_HANG_CHECK_ARMED,
639 &adapter->tx_ring[i]->state);
640}
641
642static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
643{
644 struct ixgbe_hw *hw = &adapter->hw;
645 struct ixgbe_hw_stats *hwstats = &adapter->stats;
646 u32 xoff[8] = {0};
647 int i;
648 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
649
650 if (adapter->ixgbe_ieee_pfc)
651 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
652
653 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
654 ixgbe_update_xoff_rx_lfc(adapter);
641 return; 655 return;
656 }
642 657
643 /* update stats for each tc, only valid with PFC enabled */ 658 /* update stats for each tc, only valid with PFC enabled */
644 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { 659 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
@@ -774,6 +789,13 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
774 total_bytes += tx_buffer->bytecount; 789 total_bytes += tx_buffer->bytecount;
775 total_packets += tx_buffer->gso_segs; 790 total_packets += tx_buffer->gso_segs;
776 791
792#ifdef CONFIG_IXGBE_PTP
793 if (unlikely(tx_buffer->tx_flags &
794 IXGBE_TX_FLAGS_TSTAMP))
795 ixgbe_ptp_tx_hwtstamp(q_vector,
796 tx_buffer->skb);
797
798#endif
777 /* free the skb */ 799 /* free the skb */
778 dev_kfree_skb_any(tx_buffer->skb); 800 dev_kfree_skb_any(tx_buffer->skb);
779 801
@@ -1144,7 +1166,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1144 * there isn't much point in holding memory we can't use 1166 * there isn't much point in holding memory we can't use
1145 */ 1167 */
1146 if (dma_mapping_error(rx_ring->dev, dma)) { 1168 if (dma_mapping_error(rx_ring->dev, dma)) {
1147 put_page(page); 1169 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1148 bi->page = NULL; 1170 bi->page = NULL;
1149 1171
1150 rx_ring->rx_stats.alloc_rx_page_failed++; 1172 rx_ring->rx_stats.alloc_rx_page_failed++;
@@ -1374,6 +1396,11 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1374 1396
1375 ixgbe_rx_checksum(rx_ring, rx_desc, skb); 1397 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1376 1398
1399#ifdef CONFIG_IXGBE_PTP
1400 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))
1401 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
1402#endif
1403
1377 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { 1404 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1378 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 1405 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1379 __vlan_hwaccel_put_tag(skb, vid); 1406 __vlan_hwaccel_put_tag(skb, vid);
@@ -2295,6 +2322,9 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
2295 } 2322 }
2296 2323
2297 ixgbe_check_fan_failure(adapter, eicr); 2324 ixgbe_check_fan_failure(adapter, eicr);
2325#ifdef CONFIG_IXGBE_PTP
2326 ixgbe_ptp_check_pps_event(adapter, eicr);
2327#endif
2298 2328
2299 /* re-enable the original interrupt state, no lsc, no queues */ 2329 /* re-enable the original interrupt state, no lsc, no queues */
2300 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2330 if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -2487,6 +2517,9 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2487 } 2517 }
2488 2518
2489 ixgbe_check_fan_failure(adapter, eicr); 2519 ixgbe_check_fan_failure(adapter, eicr);
2520#ifdef CONFIG_IXGBE_PTP
2521 ixgbe_ptp_check_pps_event(adapter, eicr);
2522#endif
2490 2523
2491 /* would disable interrupts here but EIAM disabled it */ 2524 /* would disable interrupts here but EIAM disabled it */
2492 napi_schedule(&q_vector->napi); 2525 napi_schedule(&q_vector->napi);
@@ -2671,8 +2704,6 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2671 /* enable queue */ 2704 /* enable queue */
2672 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); 2705 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
2673 2706
2674 netdev_tx_reset_queue(txring_txq(ring));
2675
2676 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ 2707 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2677 if (hw->mac.type == ixgbe_mac_82598EB && 2708 if (hw->mac.type == ixgbe_mac_82598EB &&
2678 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) 2709 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
@@ -2758,6 +2789,61 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
2758 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); 2789 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
2759} 2790}
2760 2791
2792static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
2793 struct ixgbe_ring *ring)
2794{
2795 struct ixgbe_hw *hw = &adapter->hw;
2796 u8 reg_idx = ring->reg_idx;
2797 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
2798
2799 srrctl |= IXGBE_SRRCTL_DROP_EN;
2800
2801 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
2802}
2803
2804static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
2805 struct ixgbe_ring *ring)
2806{
2807 struct ixgbe_hw *hw = &adapter->hw;
2808 u8 reg_idx = ring->reg_idx;
2809 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
2810
2811 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
2812
2813 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
2814}
2815
2816#ifdef CONFIG_IXGBE_DCB
2817void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
2818#else
2819static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
2820#endif
2821{
2822 int i;
2823 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
2824
2825 if (adapter->ixgbe_ieee_pfc)
2826 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
2827
2828 /*
2829 * We should set the drop enable bit if:
2830 * SR-IOV is enabled
2831 * or
2832 * Number of Rx queues > 1 and flow control is disabled
2833 *
2834 * This allows us to avoid head of line blocking for security
2835 * and performance reasons.
2836 */
2837 if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
2838 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
2839 for (i = 0; i < adapter->num_rx_queues; i++)
2840 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
2841 } else {
2842 for (i = 0; i < adapter->num_rx_queues; i++)
2843 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
2844 }
2845}
2846
2761#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 2847#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2762 2848
2763static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, 2849static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
@@ -2904,33 +2990,6 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2904 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); 2990 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
2905} 2991}
2906 2992
2907/**
2908 * ixgbe_set_uta - Set unicast filter table address
2909 * @adapter: board private structure
2910 *
2911 * The unicast table address is a register array of 32-bit registers.
2912 * The table is meant to be used in a way similar to how the MTA is used
2913 * however due to certain limitations in the hardware it is necessary to
2914 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
2915 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
2916 **/
2917static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
2918{
2919 struct ixgbe_hw *hw = &adapter->hw;
2920 int i;
2921
2922 /* The UTA table only exists on 82599 hardware and newer */
2923 if (hw->mac.type < ixgbe_mac_82599EB)
2924 return;
2925
2926 /* we only need to do this if VMDq is enabled */
2927 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2928 return;
2929
2930 for (i = 0; i < 128; i++)
2931 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
2932}
2933
2934#define IXGBE_MAX_RX_DESC_POLL 10 2993#define IXGBE_MAX_RX_DESC_POLL 10
2935static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, 2994static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2936 struct ixgbe_ring *ring) 2995 struct ixgbe_ring *ring)
@@ -3154,14 +3213,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3154 set_ring_rsc_enabled(rx_ring); 3213 set_ring_rsc_enabled(rx_ring);
3155 else 3214 else
3156 clear_ring_rsc_enabled(rx_ring); 3215 clear_ring_rsc_enabled(rx_ring);
3157#ifdef IXGBE_FCOE
3158 if (netdev->features & NETIF_F_FCOE_MTU) {
3159 struct ixgbe_ring_feature *f;
3160 f = &adapter->ring_feature[RING_F_FCOE];
3161 if ((i >= f->mask) && (i < f->mask + f->indices))
3162 set_bit(__IXGBE_RX_FCOE_BUFSZ, &rx_ring->state);
3163 }
3164#endif /* IXGBE_FCOE */
3165 } 3216 }
3166} 3217}
3167 3218
@@ -3224,8 +3275,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3224 /* Program registers for the distribution of queues */ 3275 /* Program registers for the distribution of queues */
3225 ixgbe_setup_mrqc(adapter); 3276 ixgbe_setup_mrqc(adapter);
3226 3277
3227 ixgbe_set_uta(adapter);
3228
3229 /* set_rx_buffer_len must be called before ring initialization */ 3278 /* set_rx_buffer_len must be called before ring initialization */
3230 ixgbe_set_rx_buffer_len(adapter); 3279 ixgbe_set_rx_buffer_len(adapter);
3231 3280
@@ -3462,16 +3511,17 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3462 } 3511 }
3463 ixgbe_vlan_filter_enable(adapter); 3512 ixgbe_vlan_filter_enable(adapter);
3464 hw->addr_ctrl.user_set_promisc = false; 3513 hw->addr_ctrl.user_set_promisc = false;
3465 /* 3514 }
3466 * Write addresses to available RAR registers, if there is not 3515
3467 * sufficient space to store all the addresses then enable 3516 /*
3468 * unicast promiscuous mode 3517 * Write addresses to available RAR registers, if there is not
3469 */ 3518 * sufficient space to store all the addresses then enable
3470 count = ixgbe_write_uc_addr_list(netdev); 3519 * unicast promiscuous mode
3471 if (count < 0) { 3520 */
3472 fctrl |= IXGBE_FCTRL_UPE; 3521 count = ixgbe_write_uc_addr_list(netdev);
3473 vmolr |= IXGBE_VMOLR_ROPE; 3522 if (count < 0) {
3474 } 3523 fctrl |= IXGBE_FCTRL_UPE;
3524 vmolr |= IXGBE_VMOLR_ROPE;
3475 } 3525 }
3476 3526
3477 if (adapter->num_vfs) { 3527 if (adapter->num_vfs) {
@@ -4138,7 +4188,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4138 DMA_FROM_DEVICE); 4188 DMA_FROM_DEVICE);
4139 rx_buffer->dma = 0; 4189 rx_buffer->dma = 0;
4140 if (rx_buffer->page) 4190 if (rx_buffer->page)
4141 put_page(rx_buffer->page); 4191 __free_pages(rx_buffer->page,
4192 ixgbe_rx_pg_order(rx_ring));
4142 rx_buffer->page = NULL; 4193 rx_buffer->page = NULL;
4143 } 4194 }
4144 4195
@@ -4175,6 +4226,8 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
4175 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 4226 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4176 } 4227 }
4177 4228
4229 netdev_tx_reset_queue(txring_txq(tx_ring));
4230
4178 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 4231 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4179 memset(tx_ring->tx_buffer_info, 0, size); 4232 memset(tx_ring->tx_buffer_info, 0, size);
4180 4233
@@ -4426,17 +4479,14 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4426 adapter->dcb_cfg.pfc_mode_enable = false; 4479 adapter->dcb_cfg.pfc_mode_enable = false;
4427 adapter->dcb_set_bitmap = 0x00; 4480 adapter->dcb_set_bitmap = 0x00;
4428 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; 4481 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
4429 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, 4482 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
4430 MAX_TRAFFIC_CLASS); 4483 sizeof(adapter->temp_dcb_cfg));
4431 4484
4432#endif 4485#endif
4433 4486
4434 /* default flow control settings */ 4487 /* default flow control settings */
4435 hw->fc.requested_mode = ixgbe_fc_full; 4488 hw->fc.requested_mode = ixgbe_fc_full;
4436 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ 4489 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
4437#ifdef CONFIG_DCB
4438 adapter->last_lfc_mode = hw->fc.current_mode;
4439#endif
4440 ixgbe_pbthresh_setup(adapter); 4490 ixgbe_pbthresh_setup(adapter);
4441 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; 4491 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
4442 hw->fc.send_xon = true; 4492 hw->fc.send_xon = true;
@@ -4836,7 +4886,9 @@ static int ixgbe_resume(struct pci_dev *pdev)
4836 4886
4837 pci_wake_from_d3(pdev, false); 4887 pci_wake_from_d3(pdev, false);
4838 4888
4889 rtnl_lock();
4839 err = ixgbe_init_interrupt_scheme(adapter); 4890 err = ixgbe_init_interrupt_scheme(adapter);
4891 rtnl_unlock();
4840 if (err) { 4892 if (err) {
4841 e_dev_err("Cannot initialize interrupts for device\n"); 4893 e_dev_err("Cannot initialize interrupts for device\n");
4842 return err; 4894 return err;
@@ -4872,17 +4924,15 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
4872 netif_device_detach(netdev); 4924 netif_device_detach(netdev);
4873 4925
4874 if (netif_running(netdev)) { 4926 if (netif_running(netdev)) {
4927 rtnl_lock();
4875 ixgbe_down(adapter); 4928 ixgbe_down(adapter);
4876 ixgbe_free_irq(adapter); 4929 ixgbe_free_irq(adapter);
4877 ixgbe_free_all_tx_resources(adapter); 4930 ixgbe_free_all_tx_resources(adapter);
4878 ixgbe_free_all_rx_resources(adapter); 4931 ixgbe_free_all_rx_resources(adapter);
4932 rtnl_unlock();
4879 } 4933 }
4880 4934
4881 ixgbe_clear_interrupt_scheme(adapter); 4935 ixgbe_clear_interrupt_scheme(adapter);
4882#ifdef CONFIG_DCB
4883 kfree(adapter->ixgbe_ieee_pfc);
4884 kfree(adapter->ixgbe_ieee_ets);
4885#endif
4886 4936
4887#ifdef CONFIG_PM 4937#ifdef CONFIG_PM
4888 retval = pci_save_state(pdev); 4938 retval = pci_save_state(pdev);
@@ -4893,6 +4943,16 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
4893 if (wufc) { 4943 if (wufc) {
4894 ixgbe_set_rx_mode(netdev); 4944 ixgbe_set_rx_mode(netdev);
4895 4945
4946 /*
4947 * enable the optics for both mult-speed fiber and
4948 * 82599 SFP+ fiber as we can WoL.
4949 */
4950 if (hw->mac.ops.enable_tx_laser &&
4951 (hw->phy.multispeed_fiber ||
4952 (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber &&
4953 hw->mac.type == ixgbe_mac_82599EB)))
4954 hw->mac.ops.enable_tx_laser(hw);
4955
4896 /* turn on all-multi mode if wake on multicast is enabled */ 4956 /* turn on all-multi mode if wake on multicast is enabled */
4897 if (wufc & IXGBE_WUFC_MC) { 4957 if (wufc & IXGBE_WUFC_MC) {
4898 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4958 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
@@ -4991,9 +5051,6 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4991 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 5051 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
4992 u64 rsc_count = 0; 5052 u64 rsc_count = 0;
4993 u64 rsc_flush = 0; 5053 u64 rsc_flush = 0;
4994 for (i = 0; i < 16; i++)
4995 adapter->hw_rx_no_dma_resources +=
4996 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4997 for (i = 0; i < adapter->num_rx_queues; i++) { 5054 for (i = 0; i < adapter->num_rx_queues; i++) {
4998 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; 5055 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
4999 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; 5056 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
@@ -5096,6 +5153,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5096 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); 5153 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
5097 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); 5154 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
5098 case ixgbe_mac_82599EB: 5155 case ixgbe_mac_82599EB:
5156 for (i = 0; i < 16; i++)
5157 adapter->hw_rx_no_dma_resources +=
5158 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5099 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 5159 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
5100 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ 5160 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
5101 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 5161 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
@@ -5273,7 +5333,7 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
5273 struct ixgbe_hw *hw = &adapter->hw; 5333 struct ixgbe_hw *hw = &adapter->hw;
5274 u32 link_speed = adapter->link_speed; 5334 u32 link_speed = adapter->link_speed;
5275 bool link_up = adapter->link_up; 5335 bool link_up = adapter->link_up;
5276 int i; 5336 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
5277 5337
5278 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) 5338 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
5279 return; 5339 return;
@@ -5285,13 +5345,13 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
5285 link_speed = IXGBE_LINK_SPEED_10GB_FULL; 5345 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5286 link_up = true; 5346 link_up = true;
5287 } 5347 }
5288 if (link_up) { 5348
5289 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 5349 if (adapter->ixgbe_ieee_pfc)
5290 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) 5350 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
5291 hw->mac.ops.fc_enable(hw, i); 5351
5292 } else { 5352 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
5293 hw->mac.ops.fc_enable(hw, 0); 5353 hw->mac.ops.fc_enable(hw);
5294 } 5354 ixgbe_set_rx_drop_en(adapter);
5295 } 5355 }
5296 5356
5297 if (link_up || 5357 if (link_up ||
@@ -5345,6 +5405,11 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
5345 flow_rx = false; 5405 flow_rx = false;
5346 break; 5406 break;
5347 } 5407 }
5408
5409#ifdef CONFIG_IXGBE_PTP
5410 ixgbe_ptp_start_cyclecounter(adapter);
5411#endif
5412
5348 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", 5413 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
5349 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 5414 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
5350 "10 Gbps" : 5415 "10 Gbps" :
@@ -5382,6 +5447,10 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
5382 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) 5447 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
5383 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; 5448 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5384 5449
5450#ifdef CONFIG_IXGBE_PTP
5451 ixgbe_ptp_start_cyclecounter(adapter);
5452#endif
5453
5385 e_info(drv, "NIC Link is Down\n"); 5454 e_info(drv, "NIC Link is Down\n");
5386 netif_carrier_off(netdev); 5455 netif_carrier_off(netdev);
5387} 5456}
@@ -5681,6 +5750,9 @@ static void ixgbe_service_task(struct work_struct *work)
5681 ixgbe_watchdog_subtask(adapter); 5750 ixgbe_watchdog_subtask(adapter);
5682 ixgbe_fdir_reinit_subtask(adapter); 5751 ixgbe_fdir_reinit_subtask(adapter);
5683 ixgbe_check_hang_subtask(adapter); 5752 ixgbe_check_hang_subtask(adapter);
5753#ifdef CONFIG_IXGBE_PTP
5754 ixgbe_ptp_overflow_check(adapter);
5755#endif
5684 5756
5685 ixgbe_service_event_complete(adapter); 5757 ixgbe_service_event_complete(adapter);
5686} 5758}
@@ -5831,6 +5903,11 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
5831 if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN) 5903 if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
5832 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); 5904 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
5833 5905
5906#ifdef CONFIG_IXGBE_PTP
5907 if (tx_flags & IXGBE_TX_FLAGS_TSTAMP)
5908 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP);
5909#endif
5910
5834 /* set segmentation enable bits for TSO/FSO */ 5911 /* set segmentation enable bits for TSO/FSO */
5835#ifdef IXGBE_FCOE 5912#ifdef IXGBE_FCOE
5836 if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO)) 5913 if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO))
@@ -6221,6 +6298,15 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6221 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; 6298 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
6222 } 6299 }
6223 6300
6301 skb_tx_timestamp(skb);
6302
6303#ifdef CONFIG_IXGBE_PTP
6304 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6305 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6306 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
6307 }
6308#endif
6309
6224#ifdef CONFIG_PCI_IOV 6310#ifdef CONFIG_PCI_IOV
6225 /* 6311 /*
6226 * Use the l2switch_enable flag - would be false if the DMA 6312 * Use the l2switch_enable flag - would be false if the DMA
@@ -6373,7 +6459,14 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
6373{ 6459{
6374 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6460 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6375 6461
6376 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); 6462 switch (cmd) {
6463#ifdef CONFIG_IXGBE_PTP
6464 case SIOCSHWTSTAMP:
6465 return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd);
6466#endif
6467 default:
6468 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
6469 }
6377} 6470}
6378 6471
6379/** 6472/**
@@ -6565,15 +6658,17 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
6565 6658
6566 if (tc) { 6659 if (tc) {
6567 netdev_set_num_tc(dev, tc); 6660 netdev_set_num_tc(dev, tc);
6568 adapter->last_lfc_mode = adapter->hw.fc.current_mode;
6569 adapter->flags |= IXGBE_FLAG_DCB_ENABLED; 6661 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
6570 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 6662 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
6571 6663
6572 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 6664 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
6665 adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
6573 adapter->hw.fc.requested_mode = ixgbe_fc_none; 6666 adapter->hw.fc.requested_mode = ixgbe_fc_none;
6667 }
6574 } else { 6668 } else {
6575 netdev_reset_tc(dev); 6669 netdev_reset_tc(dev);
6576 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; 6670 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
6671 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
6577 6672
6578 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 6673 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
6579 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 6674 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
@@ -6622,7 +6717,7 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
6622 /* Turn off LRO if not RSC capable */ 6717 /* Turn off LRO if not RSC capable */
6623 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) 6718 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
6624 features &= ~NETIF_F_LRO; 6719 features &= ~NETIF_F_LRO;
6625 6720
6626 6721
6627 return features; 6722 return features;
6628} 6723}
@@ -6681,6 +6776,74 @@ static int ixgbe_set_features(struct net_device *netdev,
6681 return 0; 6776 return 0;
6682} 6777}
6683 6778
6779static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
6780 struct net_device *dev,
6781 unsigned char *addr,
6782 u16 flags)
6783{
6784 struct ixgbe_adapter *adapter = netdev_priv(dev);
6785 int err = -EOPNOTSUPP;
6786
6787 if (ndm->ndm_state & NUD_PERMANENT) {
6788 pr_info("%s: FDB only supports static addresses\n",
6789 ixgbe_driver_name);
6790 return -EINVAL;
6791 }
6792
6793 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
6794 if (is_unicast_ether_addr(addr))
6795 err = dev_uc_add_excl(dev, addr);
6796 else if (is_multicast_ether_addr(addr))
6797 err = dev_mc_add_excl(dev, addr);
6798 else
6799 err = -EINVAL;
6800 }
6801
6802 /* Only return duplicate errors if NLM_F_EXCL is set */
6803 if (err == -EEXIST && !(flags & NLM_F_EXCL))
6804 err = 0;
6805
6806 return err;
6807}
6808
6809static int ixgbe_ndo_fdb_del(struct ndmsg *ndm,
6810 struct net_device *dev,
6811 unsigned char *addr)
6812{
6813 struct ixgbe_adapter *adapter = netdev_priv(dev);
6814 int err = -EOPNOTSUPP;
6815
6816 if (ndm->ndm_state & NUD_PERMANENT) {
6817 pr_info("%s: FDB only supports static addresses\n",
6818 ixgbe_driver_name);
6819 return -EINVAL;
6820 }
6821
6822 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
6823 if (is_unicast_ether_addr(addr))
6824 err = dev_uc_del(dev, addr);
6825 else if (is_multicast_ether_addr(addr))
6826 err = dev_mc_del(dev, addr);
6827 else
6828 err = -EINVAL;
6829 }
6830
6831 return err;
6832}
6833
6834static int ixgbe_ndo_fdb_dump(struct sk_buff *skb,
6835 struct netlink_callback *cb,
6836 struct net_device *dev,
6837 int idx)
6838{
6839 struct ixgbe_adapter *adapter = netdev_priv(dev);
6840
6841 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6842 idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
6843
6844 return idx;
6845}
6846
6684static const struct net_device_ops ixgbe_netdev_ops = { 6847static const struct net_device_ops ixgbe_netdev_ops = {
6685 .ndo_open = ixgbe_open, 6848 .ndo_open = ixgbe_open,
6686 .ndo_stop = ixgbe_close, 6849 .ndo_stop = ixgbe_close,
@@ -6717,6 +6880,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {
6717#endif /* IXGBE_FCOE */ 6880#endif /* IXGBE_FCOE */
6718 .ndo_set_features = ixgbe_set_features, 6881 .ndo_set_features = ixgbe_set_features,
6719 .ndo_fix_features = ixgbe_fix_features, 6882 .ndo_fix_features = ixgbe_fix_features,
6883 .ndo_fdb_add = ixgbe_ndo_fdb_add,
6884 .ndo_fdb_del = ixgbe_ndo_fdb_del,
6885 .ndo_fdb_dump = ixgbe_ndo_fdb_dump,
6720}; 6886};
6721 6887
6722static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, 6888static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
@@ -6731,14 +6897,66 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
6731 /* The 82599 supports up to 64 VFs per physical function 6897 /* The 82599 supports up to 64 VFs per physical function
6732 * but this implementation limits allocation to 63 so that 6898 * but this implementation limits allocation to 63 so that
6733 * basic networking resources are still available to the 6899 * basic networking resources are still available to the
6734 * physical function 6900 * physical function. If the user requests greater thn
6901 * 63 VFs then it is an error - reset to default of zero.
6735 */ 6902 */
6736 adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs; 6903 adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
6737 ixgbe_enable_sriov(adapter, ii); 6904 ixgbe_enable_sriov(adapter, ii);
6738#endif /* CONFIG_PCI_IOV */ 6905#endif /* CONFIG_PCI_IOV */
6739} 6906}
6740 6907
6741/** 6908/**
6909 * ixgbe_wol_supported - Check whether device supports WoL
6910 * @hw: hw specific details
6911 * @device_id: the device ID
6912 * @subdev_id: the subsystem device ID
6913 *
6914 * This function is used by probe and ethtool to determine
6915 * which devices have WoL support
6916 *
6917 **/
6918int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
6919 u16 subdevice_id)
6920{
6921 struct ixgbe_hw *hw = &adapter->hw;
6922 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
6923 int is_wol_supported = 0;
6924
6925 switch (device_id) {
6926 case IXGBE_DEV_ID_82599_SFP:
6927 /* Only these subdevices could supports WOL */
6928 switch (subdevice_id) {
6929 case IXGBE_SUBDEV_ID_82599_560FLR:
6930 /* only support first port */
6931 if (hw->bus.func != 0)
6932 break;
6933 case IXGBE_SUBDEV_ID_82599_SFP:
6934 is_wol_supported = 1;
6935 break;
6936 }
6937 break;
6938 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
6939 /* All except this subdevice support WOL */
6940 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
6941 is_wol_supported = 1;
6942 break;
6943 case IXGBE_DEV_ID_82599_KX4:
6944 is_wol_supported = 1;
6945 break;
6946 case IXGBE_DEV_ID_X540T:
6947 /* check eeprom to see if enabled wol */
6948 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
6949 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
6950 (hw->bus.func == 0))) {
6951 is_wol_supported = 1;
6952 }
6953 break;
6954 }
6955
6956 return is_wol_supported;
6957}
6958
6959/**
6742 * ixgbe_probe - Device Initialization Routine 6960 * ixgbe_probe - Device Initialization Routine
6743 * @pdev: PCI device information struct 6961 * @pdev: PCI device information struct
6744 * @ent: entry in ixgbe_pci_tbl 6962 * @ent: entry in ixgbe_pci_tbl
@@ -6764,7 +6982,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6764 u16 device_caps; 6982 u16 device_caps;
6765#endif 6983#endif
6766 u32 eec; 6984 u32 eec;
6767 u16 wol_cap;
6768 6985
6769 /* Catch broken hardware that put the wrong VF device ID in 6986 /* Catch broken hardware that put the wrong VF device ID in
6770 * the PCIe SR-IOV capability. 6987 * the PCIe SR-IOV capability.
@@ -7028,42 +7245,18 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7028 netdev->features &= ~NETIF_F_RXHASH; 7245 netdev->features &= ~NETIF_F_RXHASH;
7029 } 7246 }
7030 7247
7031 /* WOL not supported for all but the following */ 7248 /* WOL not supported for all devices */
7032 adapter->wol = 0; 7249 adapter->wol = 0;
7033 switch (pdev->device) { 7250 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
7034 case IXGBE_DEV_ID_82599_SFP: 7251 if (ixgbe_wol_supported(adapter, pdev->device, pdev->subsystem_device))
7035 /* Only these subdevice supports WOL */
7036 switch (pdev->subsystem_device) {
7037 case IXGBE_SUBDEV_ID_82599_560FLR:
7038 /* only support first port */
7039 if (hw->bus.func != 0)
7040 break;
7041 case IXGBE_SUBDEV_ID_82599_SFP:
7042 adapter->wol = IXGBE_WUFC_MAG;
7043 break;
7044 }
7045 break;
7046 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
7047 /* All except this subdevice support WOL */
7048 if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
7049 adapter->wol = IXGBE_WUFC_MAG;
7050 break;
7051 case IXGBE_DEV_ID_82599_KX4:
7052 adapter->wol = IXGBE_WUFC_MAG; 7252 adapter->wol = IXGBE_WUFC_MAG;
7053 break;
7054 case IXGBE_DEV_ID_X540T:
7055 /* Check eeprom to see if it is enabled */
7056 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
7057 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
7058 7253
7059 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
7060 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
7061 (hw->bus.func == 0)))
7062 adapter->wol = IXGBE_WUFC_MAG;
7063 break;
7064 }
7065 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 7254 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
7066 7255
7256#ifdef CONFIG_IXGBE_PTP
7257 ixgbe_ptp_init(adapter);
7258#endif /* CONFIG_IXGBE_PTP*/
7259
7067 /* save off EEPROM version number */ 7260 /* save off EEPROM version number */
7068 hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh); 7261 hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
7069 hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl); 7262 hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
@@ -7150,6 +7343,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7150 7343
7151 e_dev_info("%s\n", ixgbe_default_device_descr); 7344 e_dev_info("%s\n", ixgbe_default_device_descr);
7152 cards_found++; 7345 cards_found++;
7346
7347#ifdef CONFIG_IXGBE_HWMON
7348 if (ixgbe_sysfs_init(adapter))
7349 e_err(probe, "failed to allocate sysfs resources\n");
7350#endif /* CONFIG_IXGBE_HWMON */
7351
7153 return 0; 7352 return 0;
7154 7353
7155err_register: 7354err_register:
@@ -7188,6 +7387,10 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
7188 set_bit(__IXGBE_DOWN, &adapter->state); 7387 set_bit(__IXGBE_DOWN, &adapter->state);
7189 cancel_work_sync(&adapter->service_task); 7388 cancel_work_sync(&adapter->service_task);
7190 7389
7390#ifdef CONFIG_IXGBE_PTP
7391 ixgbe_ptp_stop(adapter);
7392#endif
7393
7191#ifdef CONFIG_IXGBE_DCA 7394#ifdef CONFIG_IXGBE_DCA
7192 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 7395 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
7193 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; 7396 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
@@ -7196,6 +7399,10 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
7196 } 7399 }
7197 7400
7198#endif 7401#endif
7402#ifdef CONFIG_IXGBE_HWMON
7403 ixgbe_sysfs_exit(adapter);
7404#endif /* CONFIG_IXGBE_HWMON */
7405
7199#ifdef IXGBE_FCOE 7406#ifdef IXGBE_FCOE
7200 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 7407 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
7201 ixgbe_cleanup_fcoe(adapter); 7408 ixgbe_cleanup_fcoe(adapter);
@@ -7220,6 +7427,11 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
7220 7427
7221 ixgbe_release_hw_control(adapter); 7428 ixgbe_release_hw_control(adapter);
7222 7429
7430#ifdef CONFIG_DCB
7431 kfree(adapter->ixgbe_ieee_pfc);
7432 kfree(adapter->ixgbe_ieee_ets);
7433
7434#endif
7223 iounmap(adapter->hw.hw_addr); 7435 iounmap(adapter->hw.hw_addr);
7224 pci_release_selected_regions(pdev, pci_select_bars(pdev, 7436 pci_release_selected_regions(pdev, pci_select_bars(pdev,
7225 IORESOURCE_MEM)); 7437 IORESOURCE_MEM));
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index bf9f82f4b1ae..24117709d6a2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1582,13 +1582,21 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
1582 **/ 1582 **/
1583static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) 1583static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
1584{ 1584{
1585 *i2cctl |= IXGBE_I2C_CLK_OUT; 1585 u32 i = 0;
1586 1586 u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
1587 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); 1587 u32 i2cctl_r = 0;
1588 IXGBE_WRITE_FLUSH(hw);
1589 1588
1590 /* SCL rise time (1000ns) */ 1589 for (i = 0; i < timeout; i++) {
1591 udelay(IXGBE_I2C_T_RISE); 1590 *i2cctl |= IXGBE_I2C_CLK_OUT;
1591 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
1592 IXGBE_WRITE_FLUSH(hw);
1593 /* SCL rise time (1000ns) */
1594 udelay(IXGBE_I2C_T_RISE);
1595
1596 i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
1597 if (i2cctl_r & IXGBE_I2C_CLK_IN)
1598 break;
1599 }
1592} 1600}
1593 1601
1594/** 1602/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
new file mode 100644
index 000000000000..ddc6a4d19302
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -0,0 +1,900 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27#include "ixgbe.h"
28#include <linux/export.h>
29
30/*
31 * The 82599 and the X540 do not have true 64bit nanosecond scale
32 * counter registers. Instead, SYSTIME is defined by a fixed point
33 * system which allows the user to define the scale counter increment
34 * value at every level change of the oscillator driving the SYSTIME
35 * value. For both devices the TIMINCA:IV field defines this
36 * increment. On the X540 device, 31 bits are provided. However on the
37 * 82599 only provides 24 bits. The time unit is determined by the
38 * clock frequency of the oscillator in combination with the TIMINCA
39 * register. When these devices link at 10Gb the oscillator has a
40 * period of 6.4ns. In order to convert the scale counter into
41 * nanoseconds the cyclecounter and timecounter structures are
42 * used. The SYSTIME registers need to be converted to ns values by use
43 * of only a right shift (division by power of 2). The following math
44 * determines the largest incvalue that will fit into the available
45 * bits in the TIMINCA register.
46 *
47 * PeriodWidth: Number of bits to store the clock period
48 * MaxWidth: The maximum width value of the TIMINCA register
49 * Period: The clock period for the oscillator
50 * round(): discard the fractional portion of the calculation
51 *
52 * Period * [ 2 ^ ( MaxWidth - PeriodWidth ) ]
53 *
54 * For the X540, MaxWidth is 31 bits, and the base period is 6.4 ns
55 * For the 82599, MaxWidth is 24 bits, and the base period is 6.4 ns
56 *
57 * The period also changes based on the link speed:
58 * At 10Gb link or no link, the period remains the same.
59 * At 1Gb link, the period is multiplied by 10. (64ns)
60 * At 100Mb link, the period is multiplied by 100. (640ns)
61 *
62 * The calculated value allows us to right shift the SYSTIME register
63 * value in order to quickly convert it into a nanosecond clock,
64 * while allowing for the maximum possible adjustment value.
65 *
66 * These diagrams are only for the 10Gb link period
67 *
68 * SYSTIMEH SYSTIMEL
69 * +--------------+ +--------------+
70 * X540 | 32 | | 1 | 3 | 28 |
71 * *--------------+ +--------------+
72 * \________ 36 bits ______/ fract
73 *
74 * +--------------+ +--------------+
75 * 82599 | 32 | | 8 | 3 | 21 |
76 * *--------------+ +--------------+
77 * \________ 43 bits ______/ fract
78 *
79 * The 36 bit X540 SYSTIME overflows every
80 * 2^36 * 10^-9 / 60 = 1.14 minutes or 69 seconds
81 *
82 * The 43 bit 82599 SYSTIME overflows every
83 * 2^43 * 10^-9 / 3600 = 2.4 hours
84 */
85#define IXGBE_INCVAL_10GB 0x66666666
86#define IXGBE_INCVAL_1GB 0x40000000
87#define IXGBE_INCVAL_100 0x50000000
88
89#define IXGBE_INCVAL_SHIFT_10GB 28
90#define IXGBE_INCVAL_SHIFT_1GB 24
91#define IXGBE_INCVAL_SHIFT_100 21
92
93#define IXGBE_INCVAL_SHIFT_82599 7
94#define IXGBE_INCPER_SHIFT_82599 24
95#define IXGBE_MAX_TIMEADJ_VALUE 0x7FFFFFFFFFFFFFFFULL
96
97#define IXGBE_OVERFLOW_PERIOD (HZ * 30)
98
99#ifndef NSECS_PER_SEC
100#define NSECS_PER_SEC 1000000000ULL
101#endif
102
103/**
104 * ixgbe_ptp_read - read raw cycle counter (to be used by time counter)
105 * @cc - the cyclecounter structure
106 *
107 * this function reads the cyclecounter registers and is called by the
108 * cyclecounter structure used to construct a ns counter from the
109 * arbitrary fixed point registers
110 */
111static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc)
112{
113 struct ixgbe_adapter *adapter =
114 container_of(cc, struct ixgbe_adapter, cc);
115 struct ixgbe_hw *hw = &adapter->hw;
116 u64 stamp = 0;
117
118 stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
119 stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
120
121 return stamp;
122}
123
124/**
125 * ixgbe_ptp_adjfreq
126 * @ptp - the ptp clock structure
127 * @ppb - parts per billion adjustment from base
128 *
129 * adjust the frequency of the ptp cycle counter by the
130 * indicated ppb from the base frequency.
131 */
132static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
133{
134 struct ixgbe_adapter *adapter =
135 container_of(ptp, struct ixgbe_adapter, ptp_caps);
136 struct ixgbe_hw *hw = &adapter->hw;
137 u64 freq;
138 u32 diff, incval;
139 int neg_adj = 0;
140
141 if (ppb < 0) {
142 neg_adj = 1;
143 ppb = -ppb;
144 }
145
146 smp_mb();
147 incval = ACCESS_ONCE(adapter->base_incval);
148
149 freq = incval;
150 freq *= ppb;
151 diff = div_u64(freq, 1000000000ULL);
152
153 incval = neg_adj ? (incval - diff) : (incval + diff);
154
155 switch (hw->mac.type) {
156 case ixgbe_mac_X540:
157 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
158 break;
159 case ixgbe_mac_82599EB:
160 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
161 (1 << IXGBE_INCPER_SHIFT_82599) |
162 incval);
163 break;
164 default:
165 break;
166 }
167
168 return 0;
169}
170
171/**
172 * ixgbe_ptp_adjtime
173 * @ptp - the ptp clock structure
174 * @delta - offset to adjust the cycle counter by
175 *
176 * adjust the timer by resetting the timecounter structure.
177 */
178static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
179{
180 struct ixgbe_adapter *adapter =
181 container_of(ptp, struct ixgbe_adapter, ptp_caps);
182 unsigned long flags;
183 u64 now;
184
185 spin_lock_irqsave(&adapter->tmreg_lock, flags);
186
187 now = timecounter_read(&adapter->tc);
188 now += delta;
189
190 /* reset the timecounter */
191 timecounter_init(&adapter->tc,
192 &adapter->cc,
193 now);
194
195 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
196 return 0;
197}
198
199/**
200 * ixgbe_ptp_gettime
201 * @ptp - the ptp clock structure
202 * @ts - timespec structure to hold the current time value
203 *
204 * read the timecounter and return the correct value on ns,
205 * after converting it into a struct timespec.
206 */
207static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
208{
209 struct ixgbe_adapter *adapter =
210 container_of(ptp, struct ixgbe_adapter, ptp_caps);
211 u64 ns;
212 u32 remainder;
213 unsigned long flags;
214
215 spin_lock_irqsave(&adapter->tmreg_lock, flags);
216 ns = timecounter_read(&adapter->tc);
217 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
218
219 ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
220 ts->tv_nsec = remainder;
221
222 return 0;
223}
224
225/**
226 * ixgbe_ptp_settime
227 * @ptp - the ptp clock structure
228 * @ts - the timespec containing the new time for the cycle counter
229 *
230 * reset the timecounter to use a new base value instead of the kernel
231 * wall timer value.
232 */
233static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
234 const struct timespec *ts)
235{
236 struct ixgbe_adapter *adapter =
237 container_of(ptp, struct ixgbe_adapter, ptp_caps);
238 u64 ns;
239 unsigned long flags;
240
241 ns = ts->tv_sec * 1000000000ULL;
242 ns += ts->tv_nsec;
243
244 /* reset the timecounter */
245 spin_lock_irqsave(&adapter->tmreg_lock, flags);
246 timecounter_init(&adapter->tc, &adapter->cc, ns);
247 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
248
249 return 0;
250}
251
252/**
253 * ixgbe_ptp_enable
254 * @ptp - the ptp clock structure
255 * @rq - the requested feature to change
256 * @on - whether to enable or disable the feature
257 *
258 * enable (or disable) ancillary features of the phc subsystem.
259 * our driver only supports the PPS feature on the X540
260 */
261static int ixgbe_ptp_enable(struct ptp_clock_info *ptp,
262 struct ptp_clock_request *rq, int on)
263{
264 struct ixgbe_adapter *adapter =
265 container_of(ptp, struct ixgbe_adapter, ptp_caps);
266
267 /**
268 * When PPS is enabled, unmask the interrupt for the ClockOut
269 * feature, so that the interrupt handler can send the PPS
270 * event when the clock SDP triggers. Clear mask when PPS is
271 * disabled
272 */
273 if (rq->type == PTP_CLK_REQ_PPS) {
274 switch (adapter->hw.mac.type) {
275 case ixgbe_mac_X540:
276 if (on)
277 adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED;
278 else
279 adapter->flags2 &=
280 ~IXGBE_FLAG2_PTP_PPS_ENABLED;
281 return 0;
282 default:
283 break;
284 }
285 }
286
287 return -ENOTSUPP;
288}
289
290/**
291 * ixgbe_ptp_check_pps_event
292 * @adapter - the private adapter structure
293 * @eicr - the interrupt cause register value
294 *
295 * This function is called by the interrupt routine when checking for
296 * interrupts. It will check and handle a pps event.
297 */
298void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)
299{
300 struct ixgbe_hw *hw = &adapter->hw;
301 struct ptp_clock_event event;
302
303 event.type = PTP_CLOCK_PPS;
304
305 /* Make sure ptp clock is valid, and PPS event enabled */
306 if (!adapter->ptp_clock ||
307 !(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED))
308 return;
309
310 switch (hw->mac.type) {
311 case ixgbe_mac_X540:
312 if (eicr & IXGBE_EICR_TIMESYNC)
313 ptp_clock_event(adapter->ptp_clock, &event);
314 break;
315 default:
316 break;
317 }
318}
319
320/**
321 * ixgbe_ptp_enable_sdp
322 * @hw - the hardware private structure
323 * @shift - the clock shift for calculating nanoseconds
324 *
325 * this function enables the clock out feature on the sdp0 for the
326 * X540 device. It will create a 1second periodic output that can be
327 * used as the PPS (via an interrupt).
328 *
329 * It calculates when the systime will be on an exact second, and then
330 * aligns the start of the PPS signal to that value. The shift is
331 * necessary because it can change based on the link speed.
332 */
333static void ixgbe_ptp_enable_sdp(struct ixgbe_hw *hw, int shift)
334{
335 u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh;
336 u64 clock_edge = 0;
337 u32 rem;
338
339 switch (hw->mac.type) {
340 case ixgbe_mac_X540:
341 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
342
343 /*
344 * enable the SDP0 pin as output, and connected to the native
345 * function for Timesync (ClockOut)
346 */
347 esdp |= (IXGBE_ESDP_SDP0_DIR |
348 IXGBE_ESDP_SDP0_NATIVE);
349
350 /*
351 * enable the Clock Out feature on SDP0, and allow interrupts
352 * to occur when the pin changes
353 */
354 tsauxc = (IXGBE_TSAUXC_EN_CLK |
355 IXGBE_TSAUXC_SYNCLK |
356 IXGBE_TSAUXC_SDP0_INT);
357
358 /* clock period (or pulse length) */
359 clktiml = (u32)(NSECS_PER_SEC << shift);
360 clktimh = (u32)((NSECS_PER_SEC << shift) >> 32);
361
362 clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
363 clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
364
365 /*
366 * account for the fact that we can't do u64 division
367 * with remainder, by converting the clock values into
368 * nanoseconds first
369 */
370 clock_edge >>= shift;
371 div_u64_rem(clock_edge, NSECS_PER_SEC, &rem);
372 clock_edge += (NSECS_PER_SEC - rem);
373 clock_edge <<= shift;
374
375 /* specify the initial clock start time */
376 trgttiml = (u32)clock_edge;
377 trgttimh = (u32)(clock_edge >> 32);
378
379 IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml);
380 IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh);
381 IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml);
382 IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh);
383
384 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
385 IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
386
387 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_TIMESYNC);
388 break;
389 default:
390 break;
391 }
392}
393
394/**
395 * ixgbe_ptp_disable_sdp
396 * @hw - the private hardware structure
397 *
398 * this function disables the auxiliary SDP clock out feature
399 */
400static void ixgbe_ptp_disable_sdp(struct ixgbe_hw *hw)
401{
402 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_TIMESYNC);
403 IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0);
404}
405
406/**
407 * ixgbe_ptp_overflow_check - delayed work to detect SYSTIME overflow
408 * @work: structure containing information about this work task
409 *
410 * this work function is scheduled to continue reading the timecounter
411 * in order to prevent missing when the system time registers wrap
412 * around. This needs to be run approximately twice a minute when no
413 * PTP activity is occurring.
414 */
415void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
416{
417 unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies;
418 struct timespec ts;
419
420 if ((adapter->flags2 & IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED) &&
421 (elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) {
422 ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
423 adapter->last_overflow_check = jiffies;
424 }
425}
426
427/**
428 * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp
429 * @q_vector: structure containing interrupt and ring information
430 * @skb: particular skb to send timestamp with
431 *
432 * if the timestamp is valid, we convert it into the timecounter ns
433 * value, then store that result into the shhwtstamps structure which
434 * is passed up the network stack
435 */
436void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
437 struct sk_buff *skb)
438{
439 struct ixgbe_adapter *adapter;
440 struct ixgbe_hw *hw;
441 struct skb_shared_hwtstamps shhwtstamps;
442 u64 regval = 0, ns;
443 u32 tsynctxctl;
444 unsigned long flags;
445
446 /* we cannot process timestamps on a ring without a q_vector */
447 if (!q_vector || !q_vector->adapter)
448 return;
449
450 adapter = q_vector->adapter;
451 hw = &adapter->hw;
452
453 tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
454 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
455 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32;
456
457 /*
458 * if TX timestamp is not valid, exit after clearing the
459 * timestamp registers
460 */
461 if (!(tsynctxctl & IXGBE_TSYNCTXCTL_VALID))
462 return;
463
464 spin_lock_irqsave(&adapter->tmreg_lock, flags);
465 ns = timecounter_cyc2time(&adapter->tc, regval);
466 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
467
468 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
469 shhwtstamps.hwtstamp = ns_to_ktime(ns);
470 skb_tstamp_tx(skb, &shhwtstamps);
471}
472
473/**
474 * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
475 * @q_vector: structure containing interrupt and ring information
476 * @skb: particular skb to send timestamp with
477 *
478 * if the timestamp is valid, we convert it into the timecounter ns
479 * value, then store that result into the shhwtstamps structure which
480 * is passed up the network stack
481 */
482void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
483 struct sk_buff *skb)
484{
485 struct ixgbe_adapter *adapter;
486 struct ixgbe_hw *hw;
487 struct skb_shared_hwtstamps *shhwtstamps;
488 u64 regval = 0, ns;
489 u32 tsyncrxctl;
490 unsigned long flags;
491
492 /* we cannot process timestamps on a ring without a q_vector */
493 if (!q_vector || !q_vector->adapter)
494 return;
495
496 adapter = q_vector->adapter;
497 hw = &adapter->hw;
498
499 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
500 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
501 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
502
503 /*
504 * If this bit is set, then the RX registers contain the time stamp. No
505 * other packet will be time stamped until we read these registers, so
506 * read the registers to make them available again. Because only one
507 * packet can be time stamped at a time, we know that the register
508 * values must belong to this one here and therefore we don't need to
509 * compare any of the additional attributes stored for it.
510 *
511 * If nothing went wrong, then it should have a skb_shared_tx that we
512 * can turn into a skb_shared_hwtstamps.
513 */
514 if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
515 return;
516
517 spin_lock_irqsave(&adapter->tmreg_lock, flags);
518 ns = timecounter_cyc2time(&adapter->tc, regval);
519 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
520
521 shhwtstamps = skb_hwtstamps(skb);
522 shhwtstamps->hwtstamp = ns_to_ktime(ns);
523}
524
525/**
526 * ixgbe_ptp_hwtstamp_ioctl - control hardware time stamping
527 * @adapter: pointer to adapter struct
528 * @ifreq: ioctl data
529 * @cmd: particular ioctl requested
530 *
531 * Outgoing time stamping can be enabled and disabled. Play nice and
532 * disable it when requested, although it shouldn't case any overhead
533 * when no packet needs it. At most one packet in the queue may be
534 * marked for time stamping, otherwise it would be impossible to tell
535 * for sure to which packet the hardware time stamp belongs.
536 *
537 * Incoming time stamping has to be configured via the hardware
538 * filters. Not all combinations are supported, in particular event
539 * type has to be specified. Matching the kind of event packet is
540 * not supported, with the exception of "all V2 events regardless of
541 * level 2 or 4".
542 */
543int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
544 struct ifreq *ifr, int cmd)
545{
546 struct ixgbe_hw *hw = &adapter->hw;
547 struct hwtstamp_config config;
548 u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
549 u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
550 u32 tsync_rx_mtrl = 0;
551 bool is_l4 = false;
552 bool is_l2 = false;
553 u32 regval;
554
555 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
556 return -EFAULT;
557
558 /* reserved for future extensions */
559 if (config.flags)
560 return -EINVAL;
561
562 switch (config.tx_type) {
563 case HWTSTAMP_TX_OFF:
564 tsync_tx_ctl = 0;
565 case HWTSTAMP_TX_ON:
566 break;
567 default:
568 return -ERANGE;
569 }
570
571 switch (config.rx_filter) {
572 case HWTSTAMP_FILTER_NONE:
573 tsync_rx_ctl = 0;
574 break;
575 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
576 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
577 tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG;
578 is_l4 = true;
579 break;
580 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
581 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
582 tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
583 is_l4 = true;
584 break;
585 case HWTSTAMP_FILTER_PTP_V2_SYNC:
586 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
587 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
588 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
589 tsync_rx_mtrl = IXGBE_RXMTRL_V2_SYNC_MSG;
590 is_l2 = true;
591 is_l4 = true;
592 config.rx_filter = HWTSTAMP_FILTER_SOME;
593 break;
594 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
595 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
596 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
597 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
598 tsync_rx_mtrl = IXGBE_RXMTRL_V2_DELAY_REQ_MSG;
599 is_l2 = true;
600 is_l4 = true;
601 config.rx_filter = HWTSTAMP_FILTER_SOME;
602 break;
603 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
604 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
605 case HWTSTAMP_FILTER_PTP_V2_EVENT:
606 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
607 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
608 is_l2 = true;
609 is_l4 = true;
610 break;
611 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
612 case HWTSTAMP_FILTER_ALL:
613 default:
614 /*
615 * register RXMTRL must be set, therefore it is not
616 * possible to time stamp both V1 Sync and Delay_Req messages
617 * and hardware does not support timestamping all packets
618 * => return error
619 */
620 return -ERANGE;
621 }
622
623 if (hw->mac.type == ixgbe_mac_82598EB) {
624 if (tsync_rx_ctl | tsync_tx_ctl)
625 return -ERANGE;
626 return 0;
627 }
628
629 /* define ethertype filter for timestamped packets */
630 if (is_l2)
631 IXGBE_WRITE_REG(hw, IXGBE_ETQF(3),
632 (IXGBE_ETQF_FILTER_EN | /* enable filter */
633 IXGBE_ETQF_1588 | /* enable timestamping */
634 ETH_P_1588)); /* 1588 eth protocol type */
635 else
636 IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), 0);
637
638#define PTP_PORT 319
639 /* L4 Queue Filter[3]: filter by destination port and protocol */
640 if (is_l4) {
641 u32 ftqf = (IXGBE_FTQF_PROTOCOL_UDP /* UDP */
642 | IXGBE_FTQF_POOL_MASK_EN /* Pool not compared */
643 | IXGBE_FTQF_QUEUE_ENABLE);
644
645 ftqf |= ((IXGBE_FTQF_PROTOCOL_COMP_MASK /* protocol check */
646 & IXGBE_FTQF_DEST_PORT_MASK /* dest check */
647 & IXGBE_FTQF_SOURCE_PORT_MASK) /* source check */
648 << IXGBE_FTQF_5TUPLE_MASK_SHIFT);
649
650 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(3),
651 (3 << IXGBE_IMIR_RX_QUEUE_SHIFT_82599 |
652 IXGBE_IMIR_SIZE_BP_82599));
653
654 /* enable port check */
655 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(3),
656 (htons(PTP_PORT) |
657 htons(PTP_PORT) << 16));
658
659 IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), ftqf);
660
661 tsync_rx_mtrl |= PTP_PORT << 16;
662 } else {
663 IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), 0);
664 }
665
666 /* enable/disable TX */
667 regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
668 regval &= ~IXGBE_TSYNCTXCTL_ENABLED;
669 regval |= tsync_tx_ctl;
670 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, regval);
671
672 /* enable/disable RX */
673 regval = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
674 regval &= ~(IXGBE_TSYNCRXCTL_ENABLED | IXGBE_TSYNCRXCTL_TYPE_MASK);
675 regval |= tsync_rx_ctl;
676 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, regval);
677
678 /* define which PTP packets are time stamped */
679 IXGBE_WRITE_REG(hw, IXGBE_RXMTRL, tsync_rx_mtrl);
680
681 IXGBE_WRITE_FLUSH(hw);
682
683 /* clear TX/RX time stamp registers, just to be sure */
684 regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
685 regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
686
687 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
688 -EFAULT : 0;
689}
690
691/**
692 * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw
693 * @adapter - pointer to the adapter structure
694 *
695 * this function initializes the timecounter and cyclecounter
696 * structures for use in generated a ns counter from the arbitrary
697 * fixed point cycles registers in the hardware.
698 *
699 * A change in link speed impacts the frequency of the DMA clock on
700 * the device, which is used to generate the cycle counter
701 * registers. Therefor this function is called whenever the link speed
702 * changes.
703 *
704 * This function also turns on the SDP pin for clock out feature (X540
705 * only), because this is where the shift is first calculated.
706 */
707void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
708{
709 struct ixgbe_hw *hw = &adapter->hw;
710 u32 incval = 0;
711 u32 shift = 0;
712 u32 cycle_speed;
713 unsigned long flags;
714
715 /**
716 * Determine what speed we need to set the cyclecounter
717 * for. It should be different for 100Mb, 1Gb, and 10Gb. Treat
718 * unknown speeds as 10Gb. (Hence why we can't just copy the
719 * link_speed.
720 */
721 switch (adapter->link_speed) {
722 case IXGBE_LINK_SPEED_100_FULL:
723 case IXGBE_LINK_SPEED_1GB_FULL:
724 case IXGBE_LINK_SPEED_10GB_FULL:
725 cycle_speed = adapter->link_speed;
726 break;
727 default:
728 /* cycle speed should be 10Gb when there is no link */
729 cycle_speed = IXGBE_LINK_SPEED_10GB_FULL;
730 break;
731 }
732
733 /* Bail if the cycle speed didn't change */
734 if (adapter->cycle_speed == cycle_speed)
735 return;
736
737 /* disable the SDP clock out */
738 ixgbe_ptp_disable_sdp(hw);
739
740 /**
741 * Scale the NIC cycle counter by a large factor so that
742 * relatively small corrections to the frequency can be added
743 * or subtracted. The drawbacks of a large factor include
744 * (a) the clock register overflows more quickly, (b) the cycle
745 * counter structure must be able to convert the systime value
746 * to nanoseconds using only a multiplier and a right-shift,
747 * and (c) the value must fit within the timinca register space
748 * => math based on internal DMA clock rate and available bits
749 */
750 switch (cycle_speed) {
751 case IXGBE_LINK_SPEED_100_FULL:
752 incval = IXGBE_INCVAL_100;
753 shift = IXGBE_INCVAL_SHIFT_100;
754 break;
755 case IXGBE_LINK_SPEED_1GB_FULL:
756 incval = IXGBE_INCVAL_1GB;
757 shift = IXGBE_INCVAL_SHIFT_1GB;
758 break;
759 case IXGBE_LINK_SPEED_10GB_FULL:
760 incval = IXGBE_INCVAL_10GB;
761 shift = IXGBE_INCVAL_SHIFT_10GB;
762 break;
763 }
764
765 /**
766 * Modify the calculated values to fit within the correct
767 * number of bits specified by the hardware. The 82599 doesn't
768 * have the same space as the X540, so bitshift the calculated
769 * values to fit.
770 */
771 switch (hw->mac.type) {
772 case ixgbe_mac_X540:
773 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
774 break;
775 case ixgbe_mac_82599EB:
776 incval >>= IXGBE_INCVAL_SHIFT_82599;
777 shift -= IXGBE_INCVAL_SHIFT_82599;
778 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
779 (1 << IXGBE_INCPER_SHIFT_82599) |
780 incval);
781 break;
782 default:
783 /* other devices aren't supported */
784 return;
785 }
786
787 /* reset the system time registers */
788 IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000);
789 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
790 IXGBE_WRITE_FLUSH(hw);
791
792 /* now that the shift has been calculated and the systime
793 * registers reset, (re-)enable the Clock out feature*/
794 ixgbe_ptp_enable_sdp(hw, shift);
795
796 /* store the new cycle speed */
797 adapter->cycle_speed = cycle_speed;
798
799 ACCESS_ONCE(adapter->base_incval) = incval;
800 smp_mb();
801
802 /* grab the ptp lock */
803 spin_lock_irqsave(&adapter->tmreg_lock, flags);
804
805 memset(&adapter->cc, 0, sizeof(adapter->cc));
806 adapter->cc.read = ixgbe_ptp_read;
807 adapter->cc.mask = CLOCKSOURCE_MASK(64);
808 adapter->cc.shift = shift;
809 adapter->cc.mult = 1;
810
811 /* reset the ns time counter */
812 timecounter_init(&adapter->tc, &adapter->cc,
813 ktime_to_ns(ktime_get_real()));
814
815 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
816}
817
818/**
819 * ixgbe_ptp_init
820 * @adapter - the ixgbe private adapter structure
821 *
822 * This function performs the required steps for enabling ptp
823 * support. If ptp support has already been loaded it simply calls the
824 * cyclecounter init routine and exits.
825 */
826void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
827{
828 struct net_device *netdev = adapter->netdev;
829
830 switch (adapter->hw.mac.type) {
831 case ixgbe_mac_X540:
832 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
833 adapter->ptp_caps.owner = THIS_MODULE;
834 adapter->ptp_caps.max_adj = 250000000;
835 adapter->ptp_caps.n_alarm = 0;
836 adapter->ptp_caps.n_ext_ts = 0;
837 adapter->ptp_caps.n_per_out = 0;
838 adapter->ptp_caps.pps = 1;
839 adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq;
840 adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
841 adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
842 adapter->ptp_caps.settime = ixgbe_ptp_settime;
843 adapter->ptp_caps.enable = ixgbe_ptp_enable;
844 break;
845 case ixgbe_mac_82599EB:
846 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
847 adapter->ptp_caps.owner = THIS_MODULE;
848 adapter->ptp_caps.max_adj = 250000000;
849 adapter->ptp_caps.n_alarm = 0;
850 adapter->ptp_caps.n_ext_ts = 0;
851 adapter->ptp_caps.n_per_out = 0;
852 adapter->ptp_caps.pps = 0;
853 adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq;
854 adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
855 adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
856 adapter->ptp_caps.settime = ixgbe_ptp_settime;
857 adapter->ptp_caps.enable = ixgbe_ptp_enable;
858 break;
859 default:
860 adapter->ptp_clock = NULL;
861 return;
862 }
863
864 spin_lock_init(&adapter->tmreg_lock);
865
866 ixgbe_ptp_start_cyclecounter(adapter);
867
868 /* (Re)start the overflow check */
869 adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED;
870
871 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps);
872 if (IS_ERR(adapter->ptp_clock)) {
873 adapter->ptp_clock = NULL;
874 e_dev_err("ptp_clock_register failed\n");
875 } else
876 e_dev_info("registered PHC device on %s\n", netdev->name);
877
878 return;
879}
880
881/**
882 * ixgbe_ptp_stop - disable ptp device and stop the overflow check
883 * @adapter: pointer to adapter struct
884 *
885 * this function stops the ptp support, and cancels the delayed work.
886 */
887void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
888{
889 ixgbe_ptp_disable_sdp(&adapter->hw);
890
891 /* stop the overflow check task */
892 adapter->flags2 &= ~IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED;
893
894 if (adapter->ptp_clock) {
895 ptp_clock_unregister(adapter->ptp_clock);
896 adapter->ptp_clock = NULL;
897 e_dev_info("removed PHC on %s\n",
898 adapter->netdev->name);
899 }
900}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 88a58cb08569..2d971d18696e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -544,13 +544,18 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
544 544
545 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); 545 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
546 546
547 if (retval) 547 if (retval) {
548 pr_err("Error receiving message from VF\n"); 548 pr_err("Error receiving message from VF\n");
549 return retval;
550 }
549 551
550 /* this is a message we already processed, do nothing */ 552 /* this is a message we already processed, do nothing */
551 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) 553 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
552 return retval; 554 return retval;
553 555
556 /* flush the ack before we write any messages back */
557 IXGBE_WRITE_FLUSH(hw);
558
554 /* 559 /*
555 * until the vf completes a virtual function reset it should not be 560 * until the vf completes a virtual function reset it should not be
556 * allowed to start any configuration. 561 * allowed to start any configuration.
@@ -637,6 +642,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
637 case IXGBE_VF_SET_MACVLAN: 642 case IXGBE_VF_SET_MACVLAN:
638 index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> 643 index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
639 IXGBE_VT_MSGINFO_SHIFT; 644 IXGBE_VT_MSGINFO_SHIFT;
645 if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
646 e_warn(drv, "VF %d requested MACVLAN filter but is "
647 "administratively denied\n", vf);
648 retval = -1;
649 break;
650 }
640 /* 651 /*
641 * If the VF is allowed to set MAC filters then turn off 652 * If the VF is allowed to set MAC filters then turn off
642 * anti-spoofing to avoid false positives. An index 653 * anti-spoofing to avoid false positives. An index
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
new file mode 100644
index 000000000000..1d80b1cefa6a
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -0,0 +1,245 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include "ixgbe.h"
29#include "ixgbe_common.h"
30#include "ixgbe_type.h"
31
32#include <linux/module.h>
33#include <linux/types.h>
34#include <linux/sysfs.h>
35#include <linux/kobject.h>
36#include <linux/device.h>
37#include <linux/netdevice.h>
38#include <linux/hwmon.h>
39
40#ifdef CONFIG_IXGBE_HWMON
41/* hwmon callback functions */
42static ssize_t ixgbe_hwmon_show_location(struct device *dev,
43 struct device_attribute *attr,
44 char *buf)
45{
46 struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr,
47 dev_attr);
48 return sprintf(buf, "loc%u\n",
49 ixgbe_attr->sensor->location);
50}
51
52static ssize_t ixgbe_hwmon_show_temp(struct device *dev,
53 struct device_attribute *attr,
54 char *buf)
55{
56 struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr,
57 dev_attr);
58 unsigned int value;
59
60 /* reset the temp field */
61 ixgbe_attr->hw->mac.ops.get_thermal_sensor_data(ixgbe_attr->hw);
62
63 value = ixgbe_attr->sensor->temp;
64
65 /* display millidegree */
66 value *= 1000;
67
68 return sprintf(buf, "%u\n", value);
69}
70
71static ssize_t ixgbe_hwmon_show_cautionthresh(struct device *dev,
72 struct device_attribute *attr,
73 char *buf)
74{
75 struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr,
76 dev_attr);
77 unsigned int value = ixgbe_attr->sensor->caution_thresh;
78
79 /* display millidegree */
80 value *= 1000;
81
82 return sprintf(buf, "%u\n", value);
83}
84
85static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev,
86 struct device_attribute *attr,
87 char *buf)
88{
89 struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr,
90 dev_attr);
91 unsigned int value = ixgbe_attr->sensor->max_op_thresh;
92
93 /* display millidegree */
94 value *= 1000;
95
96 return sprintf(buf, "%u\n", value);
97}
98
99/*
100 * ixgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
101 * @ adapter: pointer to the adapter structure
102 * @ offset: offset in the eeprom sensor data table
103 * @ type: type of sensor data to display
104 *
105 * For each file we want in hwmon's sysfs interface we need a device_attribute
106 * This is included in our hwmon_attr struct that contains the references to
107 * the data structures we need to get the data to display.
108 */
109static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
110 unsigned int offset, int type) {
111 int rc;
112 unsigned int n_attr;
113 struct hwmon_attr *ixgbe_attr;
114
115 n_attr = adapter->ixgbe_hwmon_buff.n_hwmon;
116 ixgbe_attr = &adapter->ixgbe_hwmon_buff.hwmon_list[n_attr];
117
118 switch (type) {
119 case IXGBE_HWMON_TYPE_LOC:
120 ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_location;
121 snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
122 "temp%u_label", offset);
123 break;
124 case IXGBE_HWMON_TYPE_TEMP:
125 ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_temp;
126 snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
127 "temp%u_input", offset);
128 break;
129 case IXGBE_HWMON_TYPE_CAUTION:
130 ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_cautionthresh;
131 snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
132 "temp%u_max", offset);
133 break;
134 case IXGBE_HWMON_TYPE_MAX:
135 ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_maxopthresh;
136 snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
137 "temp%u_crit", offset);
138 break;
139 default:
140 rc = -EPERM;
141 return rc;
142 }
143
144 /* These always the same regardless of type */
145 ixgbe_attr->sensor =
146 &adapter->hw.mac.thermal_sensor_data.sensor[offset];
147 ixgbe_attr->hw = &adapter->hw;
148 ixgbe_attr->dev_attr.store = NULL;
149 ixgbe_attr->dev_attr.attr.mode = S_IRUGO;
150 ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name;
151
152 rc = device_create_file(&adapter->pdev->dev,
153 &ixgbe_attr->dev_attr);
154
155 if (rc == 0)
156 ++adapter->ixgbe_hwmon_buff.n_hwmon;
157
158 return rc;
159}
160
161static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter)
162{
163 int i;
164
165 if (adapter == NULL)
166 return;
167
168 for (i = 0; i < adapter->ixgbe_hwmon_buff.n_hwmon; i++) {
169 device_remove_file(&adapter->pdev->dev,
170 &adapter->ixgbe_hwmon_buff.hwmon_list[i].dev_attr);
171 }
172
173 kfree(adapter->ixgbe_hwmon_buff.hwmon_list);
174
175 if (adapter->ixgbe_hwmon_buff.device)
176 hwmon_device_unregister(adapter->ixgbe_hwmon_buff.device);
177}
178
179/* called from ixgbe_main.c */
180void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter)
181{
182 ixgbe_sysfs_del_adapter(adapter);
183}
184
185/* called from ixgbe_main.c */
186int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
187{
188 struct hwmon_buff *ixgbe_hwmon = &adapter->ixgbe_hwmon_buff;
189 unsigned int i;
190 int n_attrs;
191 int rc = 0;
192
193 /* If this method isn't defined we don't support thermals */
194 if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) {
195 goto exit;
196 }
197
198 /* Don't create thermal hwmon interface if no sensors present */
199 if (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw))
200 goto exit;
201
202 /*
203 * Allocation space for max attributs
204 * max num sensors * values (loc, temp, max, caution)
205 */
206 n_attrs = IXGBE_MAX_SENSORS * 4;
207 ixgbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
208 GFP_KERNEL);
209 if (!ixgbe_hwmon->hwmon_list) {
210 rc = -ENOMEM;
211 goto err;
212 }
213
214 ixgbe_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
215 if (IS_ERR(ixgbe_hwmon->device)) {
216 rc = PTR_ERR(ixgbe_hwmon->device);
217 goto err;
218 }
219
220 for (i = 0; i < IXGBE_MAX_SENSORS; i++) {
221 /*
222 * Only create hwmon sysfs entries for sensors that have
223 * meaningful data for.
224 */
225 if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
226 continue;
227
228 /* Bail if any hwmon attr struct fails to initialize */
229 rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION);
230 rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC);
231 rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP);
232 rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX);
233 if (rc)
234 goto err;
235 }
236
237 goto exit;
238
239err:
240 ixgbe_sysfs_del_adapter(adapter);
241exit:
242 return rc;
243}
244#endif /* CONFIG_IXGBE_HWMON */
245
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 8636e8344fc9..204848d2448c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -110,6 +110,28 @@
110#define IXGBE_I2C_CLK_OUT 0x00000002 110#define IXGBE_I2C_CLK_OUT 0x00000002
111#define IXGBE_I2C_DATA_IN 0x00000004 111#define IXGBE_I2C_DATA_IN 0x00000004
112#define IXGBE_I2C_DATA_OUT 0x00000008 112#define IXGBE_I2C_DATA_OUT 0x00000008
113#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
114
115#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
116#define IXGBE_EMC_INTERNAL_DATA 0x00
117#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
118#define IXGBE_EMC_DIODE1_DATA 0x01
119#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19
120#define IXGBE_EMC_DIODE2_DATA 0x23
121#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A
122
123#define IXGBE_MAX_SENSORS 3
124
125struct ixgbe_thermal_diode_data {
126 u8 location;
127 u8 temp;
128 u8 caution_thresh;
129 u8 max_op_thresh;
130};
131
132struct ixgbe_thermal_sensor_data {
133 struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS];
134};
113 135
114/* Interrupt Registers */ 136/* Interrupt Registers */
115#define IXGBE_EICR 0x00800 137#define IXGBE_EICR 0x00800
@@ -802,6 +824,8 @@
802#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */ 824#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */
803#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */ 825#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */
804#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */ 826#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */
827#define IXGBE_CLKTIML 0x08C34 /* Clock Out Time Register Low - RW */
828#define IXGBE_CLKTIMH 0x08C38 /* Clock Out Time Register High - RW */
805#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */ 829#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */
806#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */ 830#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */
807#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ 831#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */
@@ -1287,6 +1311,7 @@ enum {
1287#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ 1311#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
1288#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ 1312#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
1289#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ 1313#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
1314#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */
1290#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ 1315#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
1291#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ 1316#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
1292#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */ 1317#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */
@@ -1304,6 +1329,7 @@ enum {
1304#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ 1329#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
1305#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ 1330#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
1306#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ 1331#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
1332#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
1307#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ 1333#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
1308#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ 1334#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
1309#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ 1335#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
@@ -1322,6 +1348,7 @@ enum {
1322#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ 1348#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
1323#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ 1349#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
1324#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */ 1350#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */
1351#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
1325#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ 1352#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
1326#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ 1353#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
1327#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ 1354#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
@@ -1339,6 +1366,7 @@ enum {
1339#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ 1366#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
1340#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ 1367#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
1341#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ 1368#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
1369#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
1342#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ 1370#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
1343#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ 1371#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
1344#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ 1372#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
@@ -1479,8 +1507,10 @@ enum {
1479#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ 1507#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
1480#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ 1508#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
1481#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ 1509#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
1510#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */
1482#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */ 1511#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */
1483#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ 1512#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
1513#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 Native Function */
1484 1514
1485/* LEDCTL Bit Masks */ 1515/* LEDCTL Bit Masks */
1486#define IXGBE_LED_IVRT_BASE 0x00000040 1516#define IXGBE_LED_IVRT_BASE 0x00000040
@@ -1677,11 +1707,29 @@ enum {
1677#define IXGBE_PBANUM0_PTR 0x15 1707#define IXGBE_PBANUM0_PTR 0x15
1678#define IXGBE_PBANUM1_PTR 0x16 1708#define IXGBE_PBANUM1_PTR 0x16
1679#define IXGBE_FREE_SPACE_PTR 0X3E 1709#define IXGBE_FREE_SPACE_PTR 0X3E
1710
1711/* External Thermal Sensor Config */
1712#define IXGBE_ETS_CFG 0x26
1713#define IXGBE_ETS_LTHRES_DELTA_MASK 0x07C0
1714#define IXGBE_ETS_LTHRES_DELTA_SHIFT 6
1715#define IXGBE_ETS_TYPE_MASK 0x0038
1716#define IXGBE_ETS_TYPE_SHIFT 3
1717#define IXGBE_ETS_TYPE_EMC 0x000
1718#define IXGBE_ETS_TYPE_EMC_SHIFTED 0x000
1719#define IXGBE_ETS_NUM_SENSORS_MASK 0x0007
1720#define IXGBE_ETS_DATA_LOC_MASK 0x3C00
1721#define IXGBE_ETS_DATA_LOC_SHIFT 10
1722#define IXGBE_ETS_DATA_INDEX_MASK 0x0300
1723#define IXGBE_ETS_DATA_INDEX_SHIFT 8
1724#define IXGBE_ETS_DATA_HTHRESH_MASK 0x00FF
1725
1680#define IXGBE_SAN_MAC_ADDR_PTR 0x28 1726#define IXGBE_SAN_MAC_ADDR_PTR 0x28
1681#define IXGBE_DEVICE_CAPS 0x2C 1727#define IXGBE_DEVICE_CAPS 0x2C
1682#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 1728#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
1683#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 1729#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
1730#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
1684#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 1731#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
1732#define IXGBE_MAX_MSIX_VECTORS_82598 0x13
1685 1733
1686/* MSI-X capability fields masks */ 1734/* MSI-X capability fields masks */
1687#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF 1735#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF
@@ -1839,6 +1887,40 @@ enum {
1839#define IXGBE_RXDCTL_RLPML_EN 0x00008000 1887#define IXGBE_RXDCTL_RLPML_EN 0x00008000
1840#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ 1888#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
1841 1889
1890#define IXGBE_TSAUXC_EN_CLK 0x00000004
1891#define IXGBE_TSAUXC_SYNCLK 0x00000008
1892#define IXGBE_TSAUXC_SDP0_INT 0x00000040
1893
1894#define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
1895#define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */
1896
1897#define IXGBE_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
1898#define IXGBE_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
1899#define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00
1900#define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02
1901#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
1902#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
1903#define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */
1904
1905#define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF
1906#define IXGBE_RXMTRL_V1_SYNC_MSG 0x00
1907#define IXGBE_RXMTRL_V1_DELAY_REQ_MSG 0x01
1908#define IXGBE_RXMTRL_V1_FOLLOWUP_MSG 0x02
1909#define IXGBE_RXMTRL_V1_DELAY_RESP_MSG 0x03
1910#define IXGBE_RXMTRL_V1_MGMT_MSG 0x04
1911
1912#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00
1913#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000
1914#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100
1915#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200
1916#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300
1917#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800
1918#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900
1919#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00
1920#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00
1921#define IXGBE_RXMTRL_V2_SIGNALING_MSG 0x0C00
1922#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00
1923
1842#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ 1924#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
1843#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ 1925#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
1844#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ 1926#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
@@ -1852,7 +1934,7 @@ enum {
1852#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */ 1934#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */
1853#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ 1935#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */
1854#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ 1936#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */
1855#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF0 /* Receive FC Mask */ 1937#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Receive FC Mask */
1856 1938
1857#define IXGBE_MFLCN_RPFCE_SHIFT 4 1939#define IXGBE_MFLCN_RPFCE_SHIFT 4
1858 1940
@@ -1968,6 +2050,7 @@ enum {
1968#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ 2050#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
1969#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ 2051#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
1970#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ 2052#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
2053#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE 1588 Time Stamp */
1971 2054
1972/* PSRTYPE bit definitions */ 2055/* PSRTYPE bit definitions */
1973#define IXGBE_PSRTYPE_TCPHDR 0x00000010 2056#define IXGBE_PSRTYPE_TCPHDR 0x00000010
@@ -2245,6 +2328,7 @@ struct ixgbe_adv_tx_context_desc {
2245/* Adv Transmit Descriptor Config Masks */ 2328/* Adv Transmit Descriptor Config Masks */
2246#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ 2329#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */
2247#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */ 2330#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */
2331#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE 1588 Time Stamp */
2248#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */ 2332#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */
2249#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */ 2333#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */
2250#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ 2334#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
@@ -2533,9 +2617,6 @@ enum ixgbe_fc_mode {
2533 ixgbe_fc_rx_pause, 2617 ixgbe_fc_rx_pause,
2534 ixgbe_fc_tx_pause, 2618 ixgbe_fc_tx_pause,
2535 ixgbe_fc_full, 2619 ixgbe_fc_full,
2536#ifdef CONFIG_DCB
2537 ixgbe_fc_pfc,
2538#endif
2539 ixgbe_fc_default 2620 ixgbe_fc_default
2540}; 2621};
2541 2622
@@ -2768,10 +2849,12 @@ struct ixgbe_mac_operations {
2768 void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); 2849 void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
2769 2850
2770 /* Flow Control */ 2851 /* Flow Control */
2771 s32 (*fc_enable)(struct ixgbe_hw *, s32); 2852 s32 (*fc_enable)(struct ixgbe_hw *);
2772 2853
2773 /* Manageability interface */ 2854 /* Manageability interface */
2774 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); 2855 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
2856 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
2857 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
2775}; 2858};
2776 2859
2777struct ixgbe_phy_operations { 2860struct ixgbe_phy_operations {
@@ -2813,6 +2896,7 @@ struct ixgbe_mac_info {
2813 u16 wwnn_prefix; 2896 u16 wwnn_prefix;
2814 /* prefix for World Wide Port Name (WWPN) */ 2897 /* prefix for World Wide Port Name (WWPN) */
2815 u16 wwpn_prefix; 2898 u16 wwpn_prefix;
2899 u16 max_msix_vectors;
2816#define IXGBE_MAX_MTA 128 2900#define IXGBE_MAX_MTA 128
2817 u32 mta_shadow[IXGBE_MAX_MTA]; 2901 u32 mta_shadow[IXGBE_MAX_MTA];
2818 s32 mc_filter_type; 2902 s32 mc_filter_type;
@@ -2823,12 +2907,12 @@ struct ixgbe_mac_info {
2823 u32 rx_pb_size; 2907 u32 rx_pb_size;
2824 u32 max_tx_queues; 2908 u32 max_tx_queues;
2825 u32 max_rx_queues; 2909 u32 max_rx_queues;
2826 u32 max_msix_vectors;
2827 u32 orig_autoc; 2910 u32 orig_autoc;
2828 u32 orig_autoc2; 2911 u32 orig_autoc2;
2829 bool orig_link_settings_stored; 2912 bool orig_link_settings_stored;
2830 bool autotry_restart; 2913 bool autotry_restart;
2831 u8 flags; 2914 u8 flags;
2915 struct ixgbe_thermal_sensor_data thermal_sensor_data;
2832}; 2916};
2833 2917
2834struct ixgbe_phy_info { 2918struct ixgbe_phy_info {
@@ -2938,7 +3022,6 @@ struct ixgbe_info {
2938#define IXGBE_ERR_OVERTEMP -26 3022#define IXGBE_ERR_OVERTEMP -26
2939#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 3023#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
2940#define IXGBE_ERR_FC_NOT_SUPPORTED -28 3024#define IXGBE_ERR_FC_NOT_SUPPORTED -28
2941#define IXGBE_ERR_FLOW_CONTROL -29
2942#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 3025#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
2943#define IXGBE_ERR_PBA_SECTION -31 3026#define IXGBE_ERR_PBA_SECTION -31
2944#define IXGBE_ERR_INVALID_ARGUMENT -32 3027#define IXGBE_ERR_INVALID_ARGUMENT -32
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 97a991403bbd..f90ec078ece2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -849,6 +849,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
849 .release_swfw_sync = &ixgbe_release_swfw_sync_X540, 849 .release_swfw_sync = &ixgbe_release_swfw_sync_X540,
850 .disable_rx_buff = &ixgbe_disable_rx_buff_generic, 850 .disable_rx_buff = &ixgbe_disable_rx_buff_generic,
851 .enable_rx_buff = &ixgbe_enable_rx_buff_generic, 851 .enable_rx_buff = &ixgbe_enable_rx_buff_generic,
852 .get_thermal_sensor_data = NULL,
853 .init_thermal_sensor_thresh = NULL,
852}; 854};
853 855
854static struct ixgbe_eeprom_operations eeprom_ops_X540 = { 856static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 947b5c830735..e09a6cc633bb 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -40,6 +40,7 @@
40typedef u32 ixgbe_link_speed; 40typedef u32 ixgbe_link_speed;
41#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 41#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
42#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 42#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
43#define IXGBE_LINK_SPEED_100_FULL 0x0008
43 44
44#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ 45#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
45#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ 46#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
@@ -48,6 +49,7 @@ typedef u32 ixgbe_link_speed;
48#define IXGBE_LINKS_SPEED_82599 0x30000000 49#define IXGBE_LINKS_SPEED_82599 0x30000000
49#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 50#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
50#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 51#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
52#define IXGBE_LINKS_SPEED_100_82599 0x10000000
51 53
52/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ 54/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
53#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 55#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 2bfe0d1d7958..e8dddf572d38 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -107,10 +107,20 @@ static int ixgbevf_get_settings(struct net_device *netdev,
107 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 107 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
108 108
109 if (link_up) { 109 if (link_up) {
110 ethtool_cmd_speed_set( 110 __u32 speed = SPEED_10000;
111 ecmd, 111 switch (link_speed) {
112 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 112 case IXGBE_LINK_SPEED_10GB_FULL:
113 SPEED_10000 : SPEED_1000); 113 speed = SPEED_10000;
114 break;
115 case IXGBE_LINK_SPEED_1GB_FULL:
116 speed = SPEED_1000;
117 break;
118 case IXGBE_LINK_SPEED_100_FULL:
119 speed = SPEED_100;
120 break;
121 }
122
123 ethtool_cmd_speed_set(ecmd, speed);
114 ecmd->duplex = DUPLEX_FULL; 124 ecmd->duplex = DUPLEX_FULL;
115 } else { 125 } else {
116 ethtool_cmd_speed_set(ecmd, -1); 126 ethtool_cmd_speed_set(ecmd, -1);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index dfed420a1bf6..0a1b99240d43 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -287,7 +287,7 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
287extern const char ixgbevf_driver_name[]; 287extern const char ixgbevf_driver_name[];
288extern const char ixgbevf_driver_version[]; 288extern const char ixgbevf_driver_version[];
289 289
290extern int ixgbevf_up(struct ixgbevf_adapter *adapter); 290extern void ixgbevf_up(struct ixgbevf_adapter *adapter);
291extern void ixgbevf_down(struct ixgbevf_adapter *adapter); 291extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
292extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); 292extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
293extern void ixgbevf_reset(struct ixgbevf_adapter *adapter); 293extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 307611ae831d..f69ec4288b10 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -57,7 +57,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
57static const char ixgbevf_driver_string[] = 57static const char ixgbevf_driver_string[] =
58 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 58 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
59 59
60#define DRV_VERSION "2.2.0-k" 60#define DRV_VERSION "2.6.0-k"
61const char ixgbevf_driver_version[] = DRV_VERSION; 61const char ixgbevf_driver_version[] = DRV_VERSION;
62static char ixgbevf_copyright[] = 62static char ixgbevf_copyright[] =
63 "Copyright (c) 2009 - 2012 Intel Corporation."; 63 "Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -1608,13 +1608,14 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1608 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; 1608 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1609} 1609}
1610 1610
1611static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1611static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1612{ 1612{
1613 struct net_device *netdev = adapter->netdev; 1613 struct net_device *netdev = adapter->netdev;
1614 struct ixgbe_hw *hw = &adapter->hw; 1614 struct ixgbe_hw *hw = &adapter->hw;
1615 int i, j = 0; 1615 int i, j = 0;
1616 int num_rx_rings = adapter->num_rx_queues; 1616 int num_rx_rings = adapter->num_rx_queues;
1617 u32 txdctl, rxdctl; 1617 u32 txdctl, rxdctl;
1618 u32 msg[2];
1618 1619
1619 for (i = 0; i < adapter->num_tx_queues; i++) { 1620 for (i = 0; i < adapter->num_tx_queues; i++) {
1620 j = adapter->tx_ring[i].reg_idx; 1621 j = adapter->tx_ring[i].reg_idx;
@@ -1653,6 +1654,10 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1653 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1654 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1654 } 1655 }
1655 1656
1657 msg[0] = IXGBE_VF_SET_LPE;
1658 msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1659 hw->mbx.ops.write_posted(hw, msg, 2);
1660
1656 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1661 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1657 ixgbevf_napi_enable_all(adapter); 1662 ixgbevf_napi_enable_all(adapter);
1658 1663
@@ -1667,24 +1672,20 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1667 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 1672 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1668 adapter->link_check_timeout = jiffies; 1673 adapter->link_check_timeout = jiffies;
1669 mod_timer(&adapter->watchdog_timer, jiffies); 1674 mod_timer(&adapter->watchdog_timer, jiffies);
1670 return 0;
1671} 1675}
1672 1676
1673int ixgbevf_up(struct ixgbevf_adapter *adapter) 1677void ixgbevf_up(struct ixgbevf_adapter *adapter)
1674{ 1678{
1675 int err;
1676 struct ixgbe_hw *hw = &adapter->hw; 1679 struct ixgbe_hw *hw = &adapter->hw;
1677 1680
1678 ixgbevf_configure(adapter); 1681 ixgbevf_configure(adapter);
1679 1682
1680 err = ixgbevf_up_complete(adapter); 1683 ixgbevf_up_complete(adapter);
1681 1684
1682 /* clear any pending interrupts, may auto mask */ 1685 /* clear any pending interrupts, may auto mask */
1683 IXGBE_READ_REG(hw, IXGBE_VTEICR); 1686 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1684 1687
1685 ixgbevf_irq_enable(adapter, true, true); 1688 ixgbevf_irq_enable(adapter, true, true);
1686
1687 return err;
1688} 1689}
1689 1690
1690/** 1691/**
@@ -2673,9 +2674,7 @@ static int ixgbevf_open(struct net_device *netdev)
2673 */ 2674 */
2674 ixgbevf_map_rings_to_vectors(adapter); 2675 ixgbevf_map_rings_to_vectors(adapter);
2675 2676
2676 err = ixgbevf_up_complete(adapter); 2677 ixgbevf_up_complete(adapter);
2677 if (err)
2678 goto err_up;
2679 2678
2680 /* clear any pending interrupts, may auto mask */ 2679 /* clear any pending interrupts, may auto mask */
2681 IXGBE_READ_REG(hw, IXGBE_VTEICR); 2680 IXGBE_READ_REG(hw, IXGBE_VTEICR);
@@ -2689,7 +2688,6 @@ static int ixgbevf_open(struct net_device *netdev)
2689 2688
2690err_req_irq: 2689err_req_irq:
2691 ixgbevf_down(adapter); 2690 ixgbevf_down(adapter);
2692err_up:
2693 ixgbevf_free_irq(adapter); 2691 ixgbevf_free_irq(adapter);
2694err_setup_rx: 2692err_setup_rx:
2695 ixgbevf_free_all_rx_resources(adapter); 2693 ixgbevf_free_all_rx_resources(adapter);
@@ -3196,9 +3194,11 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3196 /* must set new MTU before calling down or up */ 3194 /* must set new MTU before calling down or up */
3197 netdev->mtu = new_mtu; 3195 netdev->mtu = new_mtu;
3198 3196
3199 msg[0] = IXGBE_VF_SET_LPE; 3197 if (!netif_running(netdev)) {
3200 msg[1] = max_frame; 3198 msg[0] = IXGBE_VF_SET_LPE;
3201 hw->mbx.ops.write_posted(hw, msg, 2); 3199 msg[1] = max_frame;
3200 hw->mbx.ops.write_posted(hw, msg, 2);
3201 }
3202 3202
3203 if (netif_running(netdev)) 3203 if (netif_running(netdev))
3204 ixgbevf_reinit_locked(adapter); 3204 ixgbevf_reinit_locked(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 74be7411242a..ec89b86f7ca4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -404,11 +404,17 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
404 else 404 else
405 *link_up = false; 405 *link_up = false;
406 406
407 if ((links_reg & IXGBE_LINKS_SPEED_82599) == 407 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
408 IXGBE_LINKS_SPEED_10G_82599) 408 case IXGBE_LINKS_SPEED_10G_82599:
409 *speed = IXGBE_LINK_SPEED_10GB_FULL; 409 *speed = IXGBE_LINK_SPEED_10GB_FULL;
410 else 410 break;
411 case IXGBE_LINKS_SPEED_1G_82599:
411 *speed = IXGBE_LINK_SPEED_1GB_FULL; 412 *speed = IXGBE_LINK_SPEED_1GB_FULL;
413 break;
414 case IXGBE_LINKS_SPEED_100_82599:
415 *speed = IXGBE_LINK_SPEED_100_FULL;
416 break;
417 }
412 418
413 return 0; 419 return 0;
414} 420}
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 5e1ca0f05090..c8950da60e6b 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1665,6 +1665,7 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
1665 .get_strings = mv643xx_eth_get_strings, 1665 .get_strings = mv643xx_eth_get_strings,
1666 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, 1666 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
1667 .get_sset_count = mv643xx_eth_get_sset_count, 1667 .get_sset_count = mv643xx_eth_get_sset_count,
1668 .get_ts_info = ethtool_op_get_ts_info,
1668}; 1669};
1669 1670
1670 1671
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index efec6b60b327..1db023b075a1 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1456,6 +1456,7 @@ static const struct ethtool_ops pxa168_ethtool_ops = {
1456 .set_settings = pxa168_set_settings, 1456 .set_settings = pxa168_set_settings,
1457 .get_drvinfo = pxa168_get_drvinfo, 1457 .get_drvinfo = pxa168_get_drvinfo,
1458 .get_link = ethtool_op_get_link, 1458 .get_link = ethtool_op_get_link,
1459 .get_ts_info = ethtool_op_get_ts_info,
1459}; 1460};
1460 1461
1461static const struct net_device_ops pxa168_eth_netdev_ops = { 1462static const struct net_device_ops pxa168_eth_netdev_ops = {
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index c9b504e2dfc3..cace36f2ab92 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2494,8 +2494,13 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
2494 skb_copy_from_linear_data(re->skb, skb->data, length); 2494 skb_copy_from_linear_data(re->skb, skb->data, length);
2495 skb->ip_summed = re->skb->ip_summed; 2495 skb->ip_summed = re->skb->ip_summed;
2496 skb->csum = re->skb->csum; 2496 skb->csum = re->skb->csum;
2497 skb->rxhash = re->skb->rxhash;
2498 skb->vlan_tci = re->skb->vlan_tci;
2499
2497 pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, 2500 pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
2498 length, PCI_DMA_FROMDEVICE); 2501 length, PCI_DMA_FROMDEVICE);
2502 re->skb->vlan_tci = 0;
2503 re->skb->rxhash = 0;
2499 re->skb->ip_summed = CHECKSUM_NONE; 2504 re->skb->ip_summed = CHECKSUM_NONE;
2500 skb_put(skb, length); 2505 skb_put(skb, length);
2501 } 2506 }
@@ -2580,9 +2585,6 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
2580 struct sk_buff *skb = NULL; 2585 struct sk_buff *skb = NULL;
2581 u16 count = (status & GMR_FS_LEN) >> 16; 2586 u16 count = (status & GMR_FS_LEN) >> 16;
2582 2587
2583 if (status & GMR_FS_VLAN)
2584 count -= VLAN_HLEN; /* Account for vlan tag */
2585
2586 netif_printk(sky2, rx_status, KERN_DEBUG, dev, 2588 netif_printk(sky2, rx_status, KERN_DEBUG, dev,
2587 "rx slot %u status 0x%x len %d\n", 2589 "rx slot %u status 0x%x len %d\n",
2588 sky2->rx_next, status, length); 2590 sky2->rx_next, status, length);
@@ -2590,6 +2592,9 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
2590 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; 2592 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
2591 prefetch(sky2->rx_ring + sky2->rx_next); 2593 prefetch(sky2->rx_ring + sky2->rx_next);
2592 2594
2595 if (vlan_tx_tag_present(re->skb))
2596 count -= VLAN_HLEN; /* Account for vlan tag */
2597
2593 /* This chip has hardware problems that generates bogus status. 2598 /* This chip has hardware problems that generates bogus status.
2594 * So do only marginal checking and expect higher level protocols 2599 * So do only marginal checking and expect higher level protocols
2595 * to handle crap frames. 2600 * to handle crap frames.
@@ -2647,11 +2652,8 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
2647} 2652}
2648 2653
2649static inline void sky2_skb_rx(const struct sky2_port *sky2, 2654static inline void sky2_skb_rx(const struct sky2_port *sky2,
2650 u32 status, struct sk_buff *skb) 2655 struct sk_buff *skb)
2651{ 2656{
2652 if (status & GMR_FS_VLAN)
2653 __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag));
2654
2655 if (skb->ip_summed == CHECKSUM_NONE) 2657 if (skb->ip_summed == CHECKSUM_NONE)
2656 netif_receive_skb(skb); 2658 netif_receive_skb(skb);
2657 else 2659 else
@@ -2705,6 +2707,14 @@ static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
2705 } 2707 }
2706} 2708}
2707 2709
2710static void sky2_rx_tag(struct sky2_port *sky2, u16 length)
2711{
2712 struct sk_buff *skb;
2713
2714 skb = sky2->rx_ring[sky2->rx_next].skb;
2715 __vlan_hwaccel_put_tag(skb, be16_to_cpu(length));
2716}
2717
2708static void sky2_rx_hash(struct sky2_port *sky2, u32 status) 2718static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
2709{ 2719{
2710 struct sk_buff *skb; 2720 struct sk_buff *skb;
@@ -2763,8 +2773,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2763 } 2773 }
2764 2774
2765 skb->protocol = eth_type_trans(skb, dev); 2775 skb->protocol = eth_type_trans(skb, dev);
2766 2776 sky2_skb_rx(sky2, skb);
2767 sky2_skb_rx(sky2, status, skb);
2768 2777
2769 /* Stop after net poll weight */ 2778 /* Stop after net poll weight */
2770 if (++work_done >= to_do) 2779 if (++work_done >= to_do)
@@ -2772,11 +2781,11 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2772 break; 2781 break;
2773 2782
2774 case OP_RXVLAN: 2783 case OP_RXVLAN:
2775 sky2->rx_tag = length; 2784 sky2_rx_tag(sky2, length);
2776 break; 2785 break;
2777 2786
2778 case OP_RXCHKSVLAN: 2787 case OP_RXCHKSVLAN:
2779 sky2->rx_tag = length; 2788 sky2_rx_tag(sky2, length);
2780 /* fall through */ 2789 /* fall through */
2781 case OP_RXCHKS: 2790 case OP_RXCHKS:
2782 if (likely(dev->features & NETIF_F_RXCSUM)) 2791 if (likely(dev->features & NETIF_F_RXCSUM))
@@ -4816,14 +4825,14 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
4816 4825
4817 init_waitqueue_head(&hw->msi_wait); 4826 init_waitqueue_head(&hw->msi_wait);
4818 4827
4819 sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
4820
4821 err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw); 4828 err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
4822 if (err) { 4829 if (err) {
4823 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); 4830 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
4824 return err; 4831 return err;
4825 } 4832 }
4826 4833
4834 sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
4835
4827 sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); 4836 sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
4828 sky2_read8(hw, B0_CTST); 4837 sky2_read8(hw, B0_CTST);
4829 4838
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index ff6f58bf822a..3c896ce80b71 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -2241,7 +2241,6 @@ struct sky2_port {
2241 u16 rx_pending; 2241 u16 rx_pending;
2242 u16 rx_data_size; 2242 u16 rx_data_size;
2243 u16 rx_nfrags; 2243 u16 rx_nfrags;
2244 u16 rx_tag;
2245 2244
2246 struct { 2245 struct {
2247 unsigned long last; 2246 unsigned long last;
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 1bb93531f1ba..5f027f95cc84 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -11,6 +11,18 @@ config MLX4_EN
11 This driver supports Mellanox Technologies ConnectX Ethernet 11 This driver supports Mellanox Technologies ConnectX Ethernet
12 devices. 12 devices.
13 13
14config MLX4_EN_DCB
15 bool "Data Center Bridging (DCB) Support"
16 default y
17 depends on MLX4_EN && DCB
18 ---help---
19 Say Y here if you want to use Data Center Bridging (DCB) in the
20 driver.
21 If set to N, will not be able to configure QoS and ratelimit attributes.
22 This flag is depended on the kernel's DCB support.
23
24 If unsure, set to Y
25
14config MLX4_CORE 26config MLX4_CORE
15 tristate 27 tristate
16 depends on PCI 28 depends on PCI
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index 4a40ab967eeb..293127d28b33 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_MLX4_EN) += mlx4_en.o
7 7
8mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \ 8mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
9 en_resources.o en_netdev.o en_selftest.o 9 en_resources.o en_netdev.o en_selftest.o
10mlx4_en-$(CONFIG_MLX4_EN_DCB) += en_dcb_nl.o
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 773c70ea3f62..1bcead1fa2f6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1254,7 +1254,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1254 struct mlx4_priv *priv = mlx4_priv(dev); 1254 struct mlx4_priv *priv = mlx4_priv(dev);
1255 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; 1255 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1256 u32 reply; 1256 u32 reply;
1257 u32 slave_status = 0;
1258 u8 is_going_down = 0; 1257 u8 is_going_down = 0;
1259 int i; 1258 int i;
1260 1259
@@ -1274,10 +1273,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1274 } 1273 }
1275 /*check if we are in the middle of FLR process, 1274 /*check if we are in the middle of FLR process,
1276 if so return "retry" status to the slave*/ 1275 if so return "retry" status to the slave*/
1277 if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) { 1276 if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
1278 slave_status = MLX4_DELAY_RESET_SLAVE;
1279 goto inform_slave_state; 1277 goto inform_slave_state;
1280 }
1281 1278
1282 /* write the version in the event field */ 1279 /* write the version in the event field */
1283 reply |= mlx4_comm_get_version(); 1280 reply |= mlx4_comm_get_version();
@@ -1557,7 +1554,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
1557 return 0; 1554 return 0;
1558 1555
1559err_resource: 1556err_resource:
1560 mlx4_free_resource_tracker(dev); 1557 mlx4_free_resource_tracker(dev, RES_TR_FREE_ALL);
1561err_thread: 1558err_thread:
1562 flush_workqueue(priv->mfunc.master.comm_wq); 1559 flush_workqueue(priv->mfunc.master.comm_wq);
1563 destroy_workqueue(priv->mfunc.master.comm_wq); 1560 destroy_workqueue(priv->mfunc.master.comm_wq);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 00b81272e314..908a460d8db6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -124,11 +124,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
124 cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; 124 cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
125 cq->mcq.event = mlx4_en_cq_event; 125 cq->mcq.event = mlx4_en_cq_event;
126 126
127 if (cq->is_tx) { 127 if (!cq->is_tx) {
128 init_timer(&cq->timer);
129 cq->timer.function = mlx4_en_poll_tx_cq;
130 cq->timer.data = (unsigned long) cq;
131 } else {
132 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); 128 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
133 napi_enable(&cq->napi); 129 napi_enable(&cq->napi);
134 } 130 }
@@ -151,16 +147,12 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
151 147
152void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 148void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
153{ 149{
154 struct mlx4_en_dev *mdev = priv->mdev; 150 if (!cq->is_tx) {
155
156 if (cq->is_tx)
157 del_timer(&cq->timer);
158 else {
159 napi_disable(&cq->napi); 151 napi_disable(&cq->napi);
160 netif_napi_del(&cq->napi); 152 netif_napi_del(&cq->napi);
161 } 153 }
162 154
163 mlx4_cq_free(mdev->dev, &cq->mcq); 155 mlx4_cq_free(priv->mdev->dev, &cq->mcq);
164} 156}
165 157
166/* Set rx cq moderation parameters */ 158/* Set rx cq moderation parameters */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
new file mode 100644
index 000000000000..5d36795877cb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -0,0 +1,255 @@
1/*
2 * Copyright (c) 2011 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/dcbnl.h>
35#include <linux/math64.h>
36
37#include "mlx4_en.h"
38
39static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
40 struct ieee_ets *ets)
41{
42 struct mlx4_en_priv *priv = netdev_priv(dev);
43 struct ieee_ets *my_ets = &priv->ets;
44
45 /* No IEEE PFC settings available */
46 if (!my_ets)
47 return -EINVAL;
48
49 ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
50 ets->cbs = my_ets->cbs;
51 memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
52 memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
53 memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
54
55 return 0;
56}
57
58static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
59{
60 int i;
61 int total_ets_bw = 0;
62 int has_ets_tc = 0;
63
64 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
65 if (ets->prio_tc[i] > MLX4_EN_NUM_UP) {
66 en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
67 i, ets->prio_tc[i]);
68 return -EINVAL;
69 }
70
71 switch (ets->tc_tsa[i]) {
72 case IEEE_8021QAZ_TSA_STRICT:
73 break;
74 case IEEE_8021QAZ_TSA_ETS:
75 has_ets_tc = 1;
76 total_ets_bw += ets->tc_tx_bw[i];
77 break;
78 default:
79 en_err(priv, "TC[%d]: Not supported TSA: %d\n",
80 i, ets->tc_tsa[i]);
81 return -ENOTSUPP;
82 }
83 }
84
85 if (has_ets_tc && total_ets_bw != MLX4_EN_BW_MAX) {
86 en_err(priv, "Bad ETS BW sum: %d. Should be exactly 100%%\n",
87 total_ets_bw);
88 return -EINVAL;
89 }
90
91 return 0;
92}
93
94static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
95 struct ieee_ets *ets, u16 *ratelimit)
96{
97 struct mlx4_en_dev *mdev = priv->mdev;
98 int num_strict = 0;
99 int i;
100 __u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = { 0 };
101 __u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 };
102
103 ets = ets ?: &priv->ets;
104 ratelimit = ratelimit ?: priv->maxrate;
105
106 /* higher TC means higher priority => lower pg */
107 for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
108 switch (ets->tc_tsa[i]) {
109 case IEEE_8021QAZ_TSA_STRICT:
110 pg[i] = num_strict++;
111 tc_tx_bw[i] = MLX4_EN_BW_MAX;
112 break;
113 case IEEE_8021QAZ_TSA_ETS:
114 pg[i] = MLX4_EN_TC_ETS;
115 tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX4_EN_BW_MIN;
116 break;
117 }
118 }
119
120 return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg,
121 ratelimit);
122}
123
124static int
125mlx4_en_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
126{
127 struct mlx4_en_priv *priv = netdev_priv(dev);
128 struct mlx4_en_dev *mdev = priv->mdev;
129 int err;
130
131 err = mlx4_en_ets_validate(priv, ets);
132 if (err)
133 return err;
134
135 err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc);
136 if (err)
137 return err;
138
139 err = mlx4_en_config_port_scheduler(priv, ets, NULL);
140 if (err)
141 return err;
142
143 memcpy(&priv->ets, ets, sizeof(priv->ets));
144
145 return 0;
146}
147
148static int mlx4_en_dcbnl_ieee_getpfc(struct net_device *dev,
149 struct ieee_pfc *pfc)
150{
151 struct mlx4_en_priv *priv = netdev_priv(dev);
152
153 pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
154 pfc->pfc_en = priv->prof->tx_ppp;
155
156 return 0;
157}
158
159static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
160 struct ieee_pfc *pfc)
161{
162 struct mlx4_en_priv *priv = netdev_priv(dev);
163 struct mlx4_en_dev *mdev = priv->mdev;
164 int err;
165
166 en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
167 pfc->pfc_cap,
168 pfc->pfc_en,
169 pfc->mbc,
170 pfc->delay);
171
172 priv->prof->rx_pause = priv->prof->tx_pause = !!pfc->pfc_en;
173 priv->prof->rx_ppp = priv->prof->tx_ppp = pfc->pfc_en;
174
175 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
176 priv->rx_skb_size + ETH_FCS_LEN,
177 priv->prof->tx_pause,
178 priv->prof->tx_ppp,
179 priv->prof->rx_pause,
180 priv->prof->rx_ppp);
181 if (err)
182 en_err(priv, "Failed setting pause params\n");
183
184 return err;
185}
186
187static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
188{
189 return DCB_CAP_DCBX_VER_IEEE;
190}
191
192static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
193{
194 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
195 (mode & DCB_CAP_DCBX_VER_CEE) ||
196 !(mode & DCB_CAP_DCBX_VER_IEEE) ||
197 !(mode & DCB_CAP_DCBX_HOST))
198 return 1;
199
200 return 0;
201}
202
203#define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */
204static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev,
205 struct ieee_maxrate *maxrate)
206{
207 struct mlx4_en_priv *priv = netdev_priv(dev);
208 int i;
209
210 if (!priv->maxrate)
211 return -EINVAL;
212
213 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
214 maxrate->tc_maxrate[i] =
215 priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB;
216
217 return 0;
218}
219
220static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
221 struct ieee_maxrate *maxrate)
222{
223 struct mlx4_en_priv *priv = netdev_priv(dev);
224 u16 tmp[IEEE_8021QAZ_MAX_TCS];
225 int i, err;
226
227 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
228 /* Convert from Kbps into HW units, rounding result up.
229 * Setting to 0, means unlimited BW.
230 */
231 tmp[i] = div_u64(maxrate->tc_maxrate[i] +
232 MLX4_RATELIMIT_UNITS_IN_KB - 1,
233 MLX4_RATELIMIT_UNITS_IN_KB);
234 }
235
236 err = mlx4_en_config_port_scheduler(priv, NULL, tmp);
237 if (err)
238 return err;
239
240 memcpy(priv->maxrate, tmp, sizeof(*priv->maxrate));
241
242 return 0;
243}
244
245const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
246 .ieee_getets = mlx4_en_dcbnl_ieee_getets,
247 .ieee_setets = mlx4_en_dcbnl_ieee_setets,
248 .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
249 .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
250 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
251 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
252
253 .getdcbx = mlx4_en_dcbnl_getdcbx,
254 .setdcbx = mlx4_en_dcbnl_setdcbx,
255};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 70346fd7f9c4..72901ce2b088 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -83,7 +83,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
83#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS) 83#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
84 84
85static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= { 85static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
86 "Interupt Test", 86 "Interrupt Test",
87 "Link Test", 87 "Link Test",
88 "Speed Test", 88 "Speed Test",
89 "Register Test", 89 "Register Test",
@@ -359,8 +359,8 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
359{ 359{
360 struct mlx4_en_priv *priv = netdev_priv(dev); 360 struct mlx4_en_priv *priv = netdev_priv(dev);
361 361
362 coal->tx_coalesce_usecs = 0; 362 coal->tx_coalesce_usecs = priv->tx_usecs;
363 coal->tx_max_coalesced_frames = 0; 363 coal->tx_max_coalesced_frames = priv->tx_frames;
364 coal->rx_coalesce_usecs = priv->rx_usecs; 364 coal->rx_coalesce_usecs = priv->rx_usecs;
365 coal->rx_max_coalesced_frames = priv->rx_frames; 365 coal->rx_max_coalesced_frames = priv->rx_frames;
366 366
@@ -388,6 +388,21 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
388 MLX4_EN_RX_COAL_TIME : 388 MLX4_EN_RX_COAL_TIME :
389 coal->rx_coalesce_usecs; 389 coal->rx_coalesce_usecs;
390 390
391 /* Setting TX coalescing parameters */
392 if (coal->tx_coalesce_usecs != priv->tx_usecs ||
393 coal->tx_max_coalesced_frames != priv->tx_frames) {
394 priv->tx_usecs = coal->tx_coalesce_usecs;
395 priv->tx_frames = coal->tx_max_coalesced_frames;
396 for (i = 0; i < priv->tx_ring_num; i++) {
397 priv->tx_cq[i].moder_cnt = priv->tx_frames;
398 priv->tx_cq[i].moder_time = priv->tx_usecs;
399 if (mlx4_en_set_cq_moder(priv, &priv->tx_cq[i])) {
400 en_warn(priv, "Failed changing moderation "
401 "for TX cq %d\n", i);
402 }
403 }
404 }
405
391 /* Set adaptive coalescing params */ 406 /* Set adaptive coalescing params */
392 priv->pkt_rate_low = coal->pkt_rate_low; 407 priv->pkt_rate_low = coal->pkt_rate_low;
393 priv->rx_usecs_low = coal->rx_coalesce_usecs_low; 408 priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 2097a7d3c5b8..988b2424e1c6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -101,6 +101,8 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
101 int i; 101 int i;
102 102
103 params->udp_rss = udp_rss; 103 params->udp_rss = udp_rss;
104 params->num_tx_rings_p_up = min_t(int, num_online_cpus(),
105 MLX4_EN_MAX_TX_RING_P_UP);
104 if (params->udp_rss && !(mdev->dev->caps.flags 106 if (params->udp_rss && !(mdev->dev->caps.flags
105 & MLX4_DEV_CAP_FLAG_UDP_RSS)) { 107 & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
106 mlx4_warn(mdev, "UDP RSS is not supported on this device.\n"); 108 mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
@@ -113,8 +115,8 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
113 params->prof[i].tx_ppp = pfctx; 115 params->prof[i].tx_ppp = pfctx;
114 params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; 116 params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
115 params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; 117 params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
116 params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS + 118 params->prof[i].tx_ring_num = params->num_tx_rings_p_up *
117 (!!pfcrx) * MLX4_EN_NUM_PPP_RINGS; 119 MLX4_EN_NUM_UP;
118 params->prof[i].rss_rings = 0; 120 params->prof[i].rss_rings = 0;
119 } 121 }
120 122
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 31b455a49273..926d8aac941c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -45,6 +45,27 @@
45#include "mlx4_en.h" 45#include "mlx4_en.h"
46#include "en_port.h" 46#include "en_port.h"
47 47
48static int mlx4_en_setup_tc(struct net_device *dev, u8 up)
49{
50 struct mlx4_en_priv *priv = netdev_priv(dev);
51 int i;
52 unsigned int q, offset = 0;
53
54 if (up && up != MLX4_EN_NUM_UP)
55 return -EINVAL;
56
57 netdev_set_num_tc(dev, up);
58
59 /* Partition Tx queues evenly amongst UP's */
60 q = priv->tx_ring_num / up;
61 for (i = 0; i < up; i++) {
62 netdev_set_tc_queue(dev, i, q, offset);
63 offset += q;
64 }
65
66 return 0;
67}
68
48static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 69static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
49{ 70{
50 struct mlx4_en_priv *priv = netdev_priv(dev); 71 struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -421,6 +442,8 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
421 */ 442 */
422 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 443 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
423 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 444 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
445 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
446 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
424 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - " 447 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
425 "rx_frames:%d rx_usecs:%d\n", 448 "rx_frames:%d rx_usecs:%d\n",
426 priv->dev->mtu, priv->rx_frames, priv->rx_usecs); 449 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
@@ -437,8 +460,8 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
437 460
438 for (i = 0; i < priv->tx_ring_num; i++) { 461 for (i = 0; i < priv->tx_ring_num; i++) {
439 cq = &priv->tx_cq[i]; 462 cq = &priv->tx_cq[i];
440 cq->moder_cnt = MLX4_EN_TX_COAL_PKTS; 463 cq->moder_cnt = priv->tx_frames;
441 cq->moder_time = MLX4_EN_TX_COAL_TIME; 464 cq->moder_time = priv->tx_usecs;
442 } 465 }
443 466
444 /* Reset auto-moderation params */ 467 /* Reset auto-moderation params */
@@ -650,12 +673,18 @@ int mlx4_en_start_port(struct net_device *dev)
650 673
651 /* Configure ring */ 674 /* Configure ring */
652 tx_ring = &priv->tx_ring[i]; 675 tx_ring = &priv->tx_ring[i];
653 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn); 676 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
677 i / priv->mdev->profile.num_tx_rings_p_up);
654 if (err) { 678 if (err) {
655 en_err(priv, "Failed allocating Tx ring\n"); 679 en_err(priv, "Failed allocating Tx ring\n");
656 mlx4_en_deactivate_cq(priv, cq); 680 mlx4_en_deactivate_cq(priv, cq);
657 goto tx_err; 681 goto tx_err;
658 } 682 }
683 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
684
685 /* Arm CQ for TX completions */
686 mlx4_en_arm_cq(priv, cq);
687
659 /* Set initial ownership of all Tx TXBBs to SW (1) */ 688 /* Set initial ownership of all Tx TXBBs to SW (1) */
660 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 689 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
661 *((u32 *) (tx_ring->buf + j)) = 0xffffffff; 690 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
@@ -797,12 +826,15 @@ static void mlx4_en_restart(struct work_struct *work)
797 watchdog_task); 826 watchdog_task);
798 struct mlx4_en_dev *mdev = priv->mdev; 827 struct mlx4_en_dev *mdev = priv->mdev;
799 struct net_device *dev = priv->dev; 828 struct net_device *dev = priv->dev;
829 int i;
800 830
801 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 831 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
802 832
803 mutex_lock(&mdev->state_lock); 833 mutex_lock(&mdev->state_lock);
804 if (priv->port_up) { 834 if (priv->port_up) {
805 mlx4_en_stop_port(dev); 835 mlx4_en_stop_port(dev);
836 for (i = 0; i < priv->tx_ring_num; i++)
837 netdev_tx_reset_queue(priv->tx_ring[i].tx_queue);
806 if (mlx4_en_start_port(dev)) 838 if (mlx4_en_start_port(dev))
807 en_err(priv, "Failed restarting port %d\n", priv->port); 839 en_err(priv, "Failed restarting port %d\n", priv->port);
808 } 840 }
@@ -966,6 +998,10 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
966 mutex_unlock(&mdev->state_lock); 998 mutex_unlock(&mdev->state_lock);
967 999
968 mlx4_en_free_resources(priv); 1000 mlx4_en_free_resources(priv);
1001
1002 kfree(priv->tx_ring);
1003 kfree(priv->tx_cq);
1004
969 free_netdev(dev); 1005 free_netdev(dev);
970} 1006}
971 1007
@@ -1036,6 +1072,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
1036 .ndo_poll_controller = mlx4_en_netpoll, 1072 .ndo_poll_controller = mlx4_en_netpoll,
1037#endif 1073#endif
1038 .ndo_set_features = mlx4_en_set_features, 1074 .ndo_set_features = mlx4_en_set_features,
1075 .ndo_setup_tc = mlx4_en_setup_tc,
1039}; 1076};
1040 1077
1041int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 1078int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1070,6 +1107,18 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1070 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | 1107 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
1071 MLX4_WQE_CTRL_SOLICITED); 1108 MLX4_WQE_CTRL_SOLICITED);
1072 priv->tx_ring_num = prof->tx_ring_num; 1109 priv->tx_ring_num = prof->tx_ring_num;
1110 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) *
1111 priv->tx_ring_num, GFP_KERNEL);
1112 if (!priv->tx_ring) {
1113 err = -ENOMEM;
1114 goto out;
1115 }
1116 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * priv->tx_ring_num,
1117 GFP_KERNEL);
1118 if (!priv->tx_cq) {
1119 err = -ENOMEM;
1120 goto out;
1121 }
1073 priv->rx_ring_num = prof->rx_ring_num; 1122 priv->rx_ring_num = prof->rx_ring_num;
1074 priv->mac_index = -1; 1123 priv->mac_index = -1;
1075 priv->msg_enable = MLX4_EN_MSG_LEVEL; 1124 priv->msg_enable = MLX4_EN_MSG_LEVEL;
@@ -1079,6 +1128,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1079 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 1128 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
1080 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 1129 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
1081 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 1130 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
1131#ifdef CONFIG_MLX4_EN_DCB
1132 if (!mlx4_is_slave(priv->mdev->dev))
1133 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
1134#endif
1082 1135
1083 /* Query for default mac and max mtu */ 1136 /* Query for default mac and max mtu */
1084 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 1137 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h
index 6934fd7e66ed..745090b49d9e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.h
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h
@@ -39,6 +39,8 @@
39#define SET_PORT_PROMISC_SHIFT 31 39#define SET_PORT_PROMISC_SHIFT 31
40#define SET_PORT_MC_PROMISC_SHIFT 30 40#define SET_PORT_MC_PROMISC_SHIFT 30
41 41
42#define MLX4_EN_NUM_TC 8
43
42#define VLAN_FLTR_SIZE 128 44#define VLAN_FLTR_SIZE 128
43struct mlx4_set_vlan_fltr_mbox { 45struct mlx4_set_vlan_fltr_mbox {
44 __be32 entry[VLAN_FLTR_SIZE]; 46 __be32 entry[VLAN_FLTR_SIZE];
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index bcbc54c16947..10c24c784b70 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -39,7 +39,7 @@
39 39
40void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, 40void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
41 int is_tx, int rss, int qpn, int cqn, 41 int is_tx, int rss, int qpn, int cqn,
42 struct mlx4_qp_context *context) 42 int user_prio, struct mlx4_qp_context *context)
43{ 43{
44 struct mlx4_en_dev *mdev = priv->mdev; 44 struct mlx4_en_dev *mdev = priv->mdev;
45 45
@@ -57,6 +57,10 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
57 context->local_qpn = cpu_to_be32(qpn); 57 context->local_qpn = cpu_to_be32(qpn);
58 context->pri_path.ackto = 1 & 0x07; 58 context->pri_path.ackto = 1 & 0x07;
59 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; 59 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
60 if (user_prio >= 0) {
61 context->pri_path.sched_queue |= user_prio << 3;
62 context->pri_path.feup = 1 << 6;
63 }
60 context->pri_path.counter_index = 0xff; 64 context->pri_path.counter_index = 0xff;
61 context->cqn_send = cpu_to_be32(cqn); 65 context->cqn_send = cpu_to_be32(cqn);
62 context->cqn_recv = cpu_to_be32(cqn); 66 context->cqn_recv = cpu_to_be32(cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9adbd53da525..d49a7ac3187d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -823,7 +823,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
823 823
824 memset(context, 0, sizeof *context); 824 memset(context, 0, sizeof *context);
825 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, 825 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
826 qpn, ring->cqn, context); 826 qpn, ring->cqn, -1, context);
827 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); 827 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
828 828
829 /* Cancel FCS removal if FW allows */ 829 /* Cancel FCS removal if FW allows */
@@ -890,7 +890,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
890 } 890 }
891 rss_map->indir_qp.event = mlx4_en_sqp_event; 891 rss_map->indir_qp.event = mlx4_en_sqp_event;
892 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, 892 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
893 priv->rx_ring[0].cqn, &context); 893 priv->rx_ring[0].cqn, -1, &context);
894 894
895 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) 895 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
896 rss_rings = priv->rx_ring_num; 896 rss_rings = priv->rx_ring_num;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 17968244c399..019d856b1334 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -67,8 +67,6 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
67 67
68 inline_thold = min(inline_thold, MAX_INLINE); 68 inline_thold = min(inline_thold, MAX_INLINE);
69 69
70 spin_lock_init(&ring->comp_lock);
71
72 tmp = size * sizeof(struct mlx4_en_tx_info); 70 tmp = size * sizeof(struct mlx4_en_tx_info);
73 ring->tx_info = vmalloc(tmp); 71 ring->tx_info = vmalloc(tmp);
74 if (!ring->tx_info) 72 if (!ring->tx_info)
@@ -156,7 +154,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
156 154
157int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 155int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
158 struct mlx4_en_tx_ring *ring, 156 struct mlx4_en_tx_ring *ring,
159 int cq) 157 int cq, int user_prio)
160{ 158{
161 struct mlx4_en_dev *mdev = priv->mdev; 159 struct mlx4_en_dev *mdev = priv->mdev;
162 int err; 160 int err;
@@ -174,7 +172,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
174 ring->doorbell_qpn = ring->qp.qpn << 8; 172 ring->doorbell_qpn = ring->qp.qpn << 8;
175 173
176 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 174 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
177 ring->cqn, &ring->context); 175 ring->cqn, user_prio, &ring->context);
178 if (ring->bf_enabled) 176 if (ring->bf_enabled)
179 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); 177 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
180 178
@@ -317,6 +315,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
317 int size = cq->size; 315 int size = cq->size;
318 u32 size_mask = ring->size_mask; 316 u32 size_mask = ring->size_mask;
319 struct mlx4_cqe *buf = cq->buf; 317 struct mlx4_cqe *buf = cq->buf;
318 u32 packets = 0;
319 u32 bytes = 0;
320 320
321 if (!priv->port_up) 321 if (!priv->port_up)
322 return; 322 return;
@@ -345,6 +345,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
345 priv, ring, ring_index, 345 priv, ring, ring_index,
346 !!((ring->cons + txbbs_skipped) & 346 !!((ring->cons + txbbs_skipped) &
347 ring->size)); 347 ring->size));
348 packets++;
349 bytes += ring->tx_info[ring_index].nr_bytes;
348 } while (ring_index != new_index); 350 } while (ring_index != new_index);
349 351
350 ++cons_index; 352 ++cons_index;
@@ -361,13 +363,14 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
361 mlx4_cq_set_ci(mcq); 363 mlx4_cq_set_ci(mcq);
362 wmb(); 364 wmb();
363 ring->cons += txbbs_skipped; 365 ring->cons += txbbs_skipped;
366 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
364 367
365 /* Wakeup Tx queue if this ring stopped it */ 368 /* Wakeup Tx queue if this ring stopped it */
366 if (unlikely(ring->blocked)) { 369 if (unlikely(ring->blocked)) {
367 if ((u32) (ring->prod - ring->cons) <= 370 if ((u32) (ring->prod - ring->cons) <=
368 ring->size - HEADROOM - MAX_DESC_TXBBS) { 371 ring->size - HEADROOM - MAX_DESC_TXBBS) {
369 ring->blocked = 0; 372 ring->blocked = 0;
370 netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring)); 373 netif_tx_wake_queue(ring->tx_queue);
371 priv->port_stats.wake_queue++; 374 priv->port_stats.wake_queue++;
372 } 375 }
373 } 376 }
@@ -377,41 +380,12 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
377{ 380{
378 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); 381 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
379 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 382 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
380 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
381 383
382 if (!spin_trylock(&ring->comp_lock))
383 return;
384 mlx4_en_process_tx_cq(cq->dev, cq); 384 mlx4_en_process_tx_cq(cq->dev, cq);
385 mod_timer(&cq->timer, jiffies + 1); 385 mlx4_en_arm_cq(priv, cq);
386 spin_unlock(&ring->comp_lock);
387} 386}
388 387
389 388
390void mlx4_en_poll_tx_cq(unsigned long data)
391{
392 struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
393 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
394 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
395 u32 inflight;
396
397 INC_PERF_COUNTER(priv->pstats.tx_poll);
398
399 if (!spin_trylock_irq(&ring->comp_lock)) {
400 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
401 return;
402 }
403 mlx4_en_process_tx_cq(cq->dev, cq);
404 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
405
406 /* If there are still packets in flight and the timer has not already
407 * been scheduled by the Tx routine then schedule it here to guarantee
408 * completion processing of these packets */
409 if (inflight && priv->port_up)
410 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
411
412 spin_unlock_irq(&ring->comp_lock);
413}
414
415static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, 389static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
416 struct mlx4_en_tx_ring *ring, 390 struct mlx4_en_tx_ring *ring,
417 u32 index, 391 u32 index,
@@ -440,25 +414,6 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
440 return ring->buf + index * TXBB_SIZE; 414 return ring->buf + index * TXBB_SIZE;
441} 415}
442 416
443static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
444{
445 struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
446 struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
447 unsigned long flags;
448
449 /* If we don't have a pending timer, set one up to catch our recent
450 post in case the interface becomes idle */
451 if (!timer_pending(&cq->timer))
452 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
453
454 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
455 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
456 if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
457 mlx4_en_process_tx_cq(priv->dev, cq);
458 spin_unlock_irqrestore(&ring->comp_lock, flags);
459 }
460}
461
462static int is_inline(struct sk_buff *skb, void **pfrag) 417static int is_inline(struct sk_buff *skb, void **pfrag)
463{ 418{
464 void *ptr; 419 void *ptr;
@@ -571,17 +526,16 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
571u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) 526u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
572{ 527{
573 struct mlx4_en_priv *priv = netdev_priv(dev); 528 struct mlx4_en_priv *priv = netdev_priv(dev);
574 u16 vlan_tag = 0; 529 u16 rings_p_up = priv->mdev->profile.num_tx_rings_p_up;
530 u8 up = 0;
575 531
576 /* If we support per priority flow control and the packet contains 532 if (dev->num_tc)
577 * a vlan tag, send the packet to the TX ring assigned to that priority 533 return skb_tx_hash(dev, skb);
578 */
579 if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) {
580 vlan_tag = vlan_tx_tag_get(skb);
581 return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
582 }
583 534
584 return skb_tx_hash(dev, skb); 535 if (vlan_tx_tag_present(skb))
536 up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
537
538 return __skb_tx_hash(dev, skb, rings_p_up) + up * rings_p_up;
585} 539}
586 540
587static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt) 541static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
@@ -594,7 +548,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
594 struct mlx4_en_priv *priv = netdev_priv(dev); 548 struct mlx4_en_priv *priv = netdev_priv(dev);
595 struct mlx4_en_dev *mdev = priv->mdev; 549 struct mlx4_en_dev *mdev = priv->mdev;
596 struct mlx4_en_tx_ring *ring; 550 struct mlx4_en_tx_ring *ring;
597 struct mlx4_en_cq *cq;
598 struct mlx4_en_tx_desc *tx_desc; 551 struct mlx4_en_tx_desc *tx_desc;
599 struct mlx4_wqe_data_seg *data; 552 struct mlx4_wqe_data_seg *data;
600 struct skb_frag_struct *frag; 553 struct skb_frag_struct *frag;
@@ -638,13 +591,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
638 if (unlikely(((int)(ring->prod - ring->cons)) > 591 if (unlikely(((int)(ring->prod - ring->cons)) >
639 ring->size - HEADROOM - MAX_DESC_TXBBS)) { 592 ring->size - HEADROOM - MAX_DESC_TXBBS)) {
640 /* every full Tx ring stops queue */ 593 /* every full Tx ring stops queue */
641 netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind)); 594 netif_tx_stop_queue(ring->tx_queue);
642 ring->blocked = 1; 595 ring->blocked = 1;
643 priv->port_stats.queue_stopped++; 596 priv->port_stats.queue_stopped++;
644 597
645 /* Use interrupts to find out when queue opened */
646 cq = &priv->tx_cq[tx_ind];
647 mlx4_en_arm_cq(priv, cq);
648 return NETDEV_TX_BUSY; 598 return NETDEV_TX_BUSY;
649 } 599 }
650 600
@@ -707,7 +657,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
707 priv->port_stats.tso_packets++; 657 priv->port_stats.tso_packets++;
708 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + 658 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
709 !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size); 659 !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
710 ring->bytes += skb->len + (i - 1) * lso_header_size; 660 tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size;
711 ring->packets += i; 661 ring->packets += i;
712 } else { 662 } else {
713 /* Normal (Non LSO) packet */ 663 /* Normal (Non LSO) packet */
@@ -715,10 +665,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
715 ((ring->prod & ring->size) ? 665 ((ring->prod & ring->size) ?
716 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); 666 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
717 data = &tx_desc->data; 667 data = &tx_desc->data;
718 ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN); 668 tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
719 ring->packets++; 669 ring->packets++;
720 670
721 } 671 }
672 ring->bytes += tx_info->nr_bytes;
673 netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
722 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); 674 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
723 675
724 676
@@ -792,9 +744,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
792 iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); 744 iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
793 } 745 }
794 746
795 /* Poll CQ here */
796 mlx4_en_xmit_poll(priv, tx_ind);
797
798 return NETDEV_TX_OK; 747 return NETDEV_TX_OK;
799 748
800tx_drop: 749tx_drop:
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 2a02ba522e60..24429a99190d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -1164,9 +1164,8 @@ int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
1164 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1164 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1165 if (err) 1165 if (err)
1166 return err; 1166 return err;
1167 priv->mfunc.master.slave_state[slave].init_port_mask |=
1168 (1 << port);
1169 } 1167 }
1168 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1170 ++priv->mfunc.master.init_port_ref[port]; 1169 ++priv->mfunc.master.init_port_ref[port];
1171 return 0; 1170 return 0;
1172} 1171}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 8bb05b46db86..984ace44104f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1306,7 +1306,7 @@ static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
1306 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); 1306 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
1307} 1307}
1308 1308
1309int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 1309int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
1310{ 1310{
1311 struct mlx4_priv *priv = mlx4_priv(dev); 1311 struct mlx4_priv *priv = mlx4_priv(dev);
1312 1312
@@ -1319,13 +1319,44 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
1319 1319
1320 return 0; 1320 return 0;
1321} 1321}
1322
1323int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
1324{
1325 u64 out_param;
1326 int err;
1327
1328 if (mlx4_is_mfunc(dev)) {
1329 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER,
1330 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
1331 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1332 if (!err)
1333 *idx = get_param_l(&out_param);
1334
1335 return err;
1336 }
1337 return __mlx4_counter_alloc(dev, idx);
1338}
1322EXPORT_SYMBOL_GPL(mlx4_counter_alloc); 1339EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
1323 1340
1324void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 1341void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1325{ 1342{
1326 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx); 1343 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
1327 return; 1344 return;
1328} 1345}
1346
1347void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1348{
1349 u64 in_param;
1350
1351 if (mlx4_is_mfunc(dev)) {
1352 set_param_l(&in_param, idx);
1353 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
1354 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
1355 MLX4_CMD_WRAPPED);
1356 return;
1357 }
1358 __mlx4_counter_free(dev, idx);
1359}
1329EXPORT_SYMBOL_GPL(mlx4_counter_free); 1360EXPORT_SYMBOL_GPL(mlx4_counter_free);
1330 1361
1331static int mlx4_setup_hca(struct mlx4_dev *dev) 1362static int mlx4_setup_hca(struct mlx4_dev *dev)
@@ -1865,7 +1896,6 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1865 mlx4_err(dev, "Failed to enable sriov," 1896 mlx4_err(dev, "Failed to enable sriov,"
1866 "continuing without sriov enabled" 1897 "continuing without sriov enabled"
1867 " (err = %d).\n", err); 1898 " (err = %d).\n", err);
1868 num_vfs = 0;
1869 err = 0; 1899 err = 0;
1870 } else { 1900 } else {
1871 mlx4_warn(dev, "Running in master mode\n"); 1901 mlx4_warn(dev, "Running in master mode\n");
@@ -2022,7 +2052,7 @@ err_cmd:
2022 mlx4_cmd_cleanup(dev); 2052 mlx4_cmd_cleanup(dev);
2023 2053
2024err_sriov: 2054err_sriov:
2025 if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) 2055 if (dev->flags & MLX4_FLAG_SRIOV)
2026 pci_disable_sriov(pdev); 2056 pci_disable_sriov(pdev);
2027 2057
2028err_rel_own: 2058err_rel_own:
@@ -2070,6 +2100,10 @@ static void mlx4_remove_one(struct pci_dev *pdev)
2070 mlx4_CLOSE_PORT(dev, p); 2100 mlx4_CLOSE_PORT(dev, p);
2071 } 2101 }
2072 2102
2103 if (mlx4_is_master(dev))
2104 mlx4_free_resource_tracker(dev,
2105 RES_TR_FREE_SLAVES_ONLY);
2106
2073 mlx4_cleanup_counters_table(dev); 2107 mlx4_cleanup_counters_table(dev);
2074 mlx4_cleanup_mcg_table(dev); 2108 mlx4_cleanup_mcg_table(dev);
2075 mlx4_cleanup_qp_table(dev); 2109 mlx4_cleanup_qp_table(dev);
@@ -2082,7 +2116,8 @@ static void mlx4_remove_one(struct pci_dev *pdev)
2082 mlx4_cleanup_pd_table(dev); 2116 mlx4_cleanup_pd_table(dev);
2083 2117
2084 if (mlx4_is_master(dev)) 2118 if (mlx4_is_master(dev))
2085 mlx4_free_resource_tracker(dev); 2119 mlx4_free_resource_tracker(dev,
2120 RES_TR_FREE_STRUCTS_ONLY);
2086 2121
2087 iounmap(priv->kar); 2122 iounmap(priv->kar);
2088 mlx4_uar_free(dev, &priv->driver_uar); 2123 mlx4_uar_free(dev, &priv->driver_uar);
@@ -2099,7 +2134,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
2099 2134
2100 if (dev->flags & MLX4_FLAG_MSI_X) 2135 if (dev->flags & MLX4_FLAG_MSI_X)
2101 pci_disable_msix(pdev); 2136 pci_disable_msix(pdev);
2102 if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) { 2137 if (dev->flags & MLX4_FLAG_SRIOV) {
2103 mlx4_warn(dev, "Disabling sriov\n"); 2138 mlx4_warn(dev, "Disabling sriov\n");
2104 pci_disable_sriov(pdev); 2139 pci_disable_sriov(pdev);
2105 } 2140 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 4799e824052f..f4a8f98e402a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -357,7 +357,6 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
357 u32 prot; 357 u32 prot;
358 int i; 358 int i;
359 bool found; 359 bool found;
360 int last_index;
361 int err; 360 int err;
362 struct mlx4_priv *priv = mlx4_priv(dev); 361 struct mlx4_priv *priv = mlx4_priv(dev);
363 362
@@ -419,7 +418,6 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
419 if (err) 418 if (err)
420 goto out_mailbox; 419 goto out_mailbox;
421 } 420 }
422 last_index = entry->index;
423 } 421 }
424 422
425 /* add the new qpn to list of promisc qps */ 423 /* add the new qpn to list of promisc qps */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 2a0ff2cc7182..86b6e5a2fabf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -53,6 +53,26 @@
53#define DRV_VERSION "1.1" 53#define DRV_VERSION "1.1"
54#define DRV_RELDATE "Dec, 2011" 54#define DRV_RELDATE "Dec, 2011"
55 55
56#define MLX4_NUM_UP 8
57#define MLX4_NUM_TC 8
58#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
59#define MLX4_RATELIMIT_DEFAULT 0xffff
60
61struct mlx4_set_port_prio2tc_context {
62 u8 prio2tc[4];
63};
64
65struct mlx4_port_scheduler_tc_cfg_be {
66 __be16 pg;
67 __be16 bw_precentage;
68 __be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */
69 __be16 max_bw_value;
70};
71
72struct mlx4_set_port_scheduler_context {
73 struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC];
74};
75
56enum { 76enum {
57 MLX4_HCR_BASE = 0x80680, 77 MLX4_HCR_BASE = 0x80680,
58 MLX4_HCR_SIZE = 0x0001c, 78 MLX4_HCR_SIZE = 0x0001c,
@@ -126,6 +146,11 @@ enum mlx4_alloc_mode {
126 RES_OP_MAP_ICM, 146 RES_OP_MAP_ICM,
127}; 147};
128 148
149enum mlx4_res_tracker_free_type {
150 RES_TR_FREE_ALL,
151 RES_TR_FREE_SLAVES_ONLY,
152 RES_TR_FREE_STRUCTS_ONLY,
153};
129 154
130/* 155/*
131 *Virtual HCR structures. 156 *Virtual HCR structures.
@@ -851,6 +876,10 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
851int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); 876int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
852int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 877int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
853 int start_index, int npages, u64 *page_list); 878 int start_index, int npages, u64 *page_list);
879int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
880void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
881int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
882void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
854 883
855void mlx4_start_catas_poll(struct mlx4_dev *dev); 884void mlx4_start_catas_poll(struct mlx4_dev *dev);
856void mlx4_stop_catas_poll(struct mlx4_dev *dev); 885void mlx4_stop_catas_poll(struct mlx4_dev *dev);
@@ -1007,7 +1036,8 @@ int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
1007void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id); 1036void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
1008int mlx4_init_resource_tracker(struct mlx4_dev *dev); 1037int mlx4_init_resource_tracker(struct mlx4_dev *dev);
1009 1038
1010void mlx4_free_resource_tracker(struct mlx4_dev *dev); 1039void mlx4_free_resource_tracker(struct mlx4_dev *dev,
1040 enum mlx4_res_tracker_free_type type);
1011 1041
1012int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, 1042int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
1013 struct mlx4_vhcr *vhcr, 1043 struct mlx4_vhcr *vhcr,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d69fee41f24a..6ae350921b1a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -40,6 +40,9 @@
40#include <linux/mutex.h> 40#include <linux/mutex.h>
41#include <linux/netdevice.h> 41#include <linux/netdevice.h>
42#include <linux/if_vlan.h> 42#include <linux/if_vlan.h>
43#ifdef CONFIG_MLX4_EN_DCB
44#include <linux/dcbnl.h>
45#endif
43 46
44#include <linux/mlx4/device.h> 47#include <linux/mlx4/device.h>
45#include <linux/mlx4/qp.h> 48#include <linux/mlx4/qp.h>
@@ -108,9 +111,8 @@ enum {
108#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE) 111#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
109 112
110#define MLX4_EN_SMALL_PKT_SIZE 64 113#define MLX4_EN_SMALL_PKT_SIZE 64
111#define MLX4_EN_NUM_TX_RINGS 8 114#define MLX4_EN_MAX_TX_RING_P_UP 32
112#define MLX4_EN_NUM_PPP_RINGS 8 115#define MLX4_EN_NUM_UP 8
113#define MAX_TX_RINGS (MLX4_EN_NUM_TX_RINGS + MLX4_EN_NUM_PPP_RINGS)
114#define MLX4_EN_DEF_TX_RING_SIZE 512 116#define MLX4_EN_DEF_TX_RING_SIZE 512
115#define MLX4_EN_DEF_RX_RING_SIZE 1024 117#define MLX4_EN_DEF_RX_RING_SIZE 1024
116 118
@@ -118,7 +120,7 @@ enum {
118#define MLX4_EN_RX_COAL_TARGET 44 120#define MLX4_EN_RX_COAL_TARGET 44
119#define MLX4_EN_RX_COAL_TIME 0x10 121#define MLX4_EN_RX_COAL_TIME 0x10
120 122
121#define MLX4_EN_TX_COAL_PKTS 5 123#define MLX4_EN_TX_COAL_PKTS 16
122#define MLX4_EN_TX_COAL_TIME 0x80 124#define MLX4_EN_TX_COAL_TIME 0x80
123 125
124#define MLX4_EN_RX_RATE_LOW 400000 126#define MLX4_EN_RX_RATE_LOW 400000
@@ -196,6 +198,7 @@ enum cq_type {
196struct mlx4_en_tx_info { 198struct mlx4_en_tx_info {
197 struct sk_buff *skb; 199 struct sk_buff *skb;
198 u32 nr_txbb; 200 u32 nr_txbb;
201 u32 nr_bytes;
199 u8 linear; 202 u8 linear;
200 u8 data_offset; 203 u8 data_offset;
201 u8 inl; 204 u8 inl;
@@ -251,9 +254,9 @@ struct mlx4_en_tx_ring {
251 unsigned long bytes; 254 unsigned long bytes;
252 unsigned long packets; 255 unsigned long packets;
253 unsigned long tx_csum; 256 unsigned long tx_csum;
254 spinlock_t comp_lock;
255 struct mlx4_bf bf; 257 struct mlx4_bf bf;
256 bool bf_enabled; 258 bool bf_enabled;
259 struct netdev_queue *tx_queue;
257}; 260};
258 261
259struct mlx4_en_rx_desc { 262struct mlx4_en_rx_desc {
@@ -304,8 +307,6 @@ struct mlx4_en_cq {
304 spinlock_t lock; 307 spinlock_t lock;
305 struct net_device *dev; 308 struct net_device *dev;
306 struct napi_struct napi; 309 struct napi_struct napi;
307 /* Per-core Tx cq processing support */
308 struct timer_list timer;
309 int size; 310 int size;
310 int buf_size; 311 int buf_size;
311 unsigned vector; 312 unsigned vector;
@@ -336,6 +337,7 @@ struct mlx4_en_profile {
336 u32 active_ports; 337 u32 active_ports;
337 u32 small_pkt_int; 338 u32 small_pkt_int;
338 u8 no_reset; 339 u8 no_reset;
340 u8 num_tx_rings_p_up;
339 struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1]; 341 struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
340}; 342};
341 343
@@ -411,6 +413,15 @@ struct mlx4_en_frag_info {
411 413
412}; 414};
413 415
416#ifdef CONFIG_MLX4_EN_DCB
417/* Minimal TC BW - setting to 0 will block traffic */
418#define MLX4_EN_BW_MIN 1
419#define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
420
421#define MLX4_EN_TC_ETS 7
422
423#endif
424
414struct mlx4_en_priv { 425struct mlx4_en_priv {
415 struct mlx4_en_dev *mdev; 426 struct mlx4_en_dev *mdev;
416 struct mlx4_en_port_profile *prof; 427 struct mlx4_en_port_profile *prof;
@@ -465,9 +476,9 @@ struct mlx4_en_priv {
465 u16 num_frags; 476 u16 num_frags;
466 u16 log_rx_info; 477 u16 log_rx_info;
467 478
468 struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS]; 479 struct mlx4_en_tx_ring *tx_ring;
469 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; 480 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
470 struct mlx4_en_cq tx_cq[MAX_TX_RINGS]; 481 struct mlx4_en_cq *tx_cq;
471 struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; 482 struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
472 struct work_struct mcast_task; 483 struct work_struct mcast_task;
473 struct work_struct mac_task; 484 struct work_struct mac_task;
@@ -484,6 +495,11 @@ struct mlx4_en_priv {
484 int vids[128]; 495 int vids[128];
485 bool wol; 496 bool wol;
486 struct device *ddev; 497 struct device *ddev;
498
499#ifdef CONFIG_MLX4_EN_DCB
500 struct ieee_ets ets;
501 u16 maxrate[IEEE_8021QAZ_MAX_TCS];
502#endif
487}; 503};
488 504
489enum mlx4_en_wol { 505enum mlx4_en_wol {
@@ -512,7 +528,6 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
512int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 528int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
513int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 529int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
514 530
515void mlx4_en_poll_tx_cq(unsigned long data);
516void mlx4_en_tx_irq(struct mlx4_cq *mcq); 531void mlx4_en_tx_irq(struct mlx4_cq *mcq);
517u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); 532u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
518netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 533netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -522,7 +537,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ri
522void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); 537void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
523int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 538int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
524 struct mlx4_en_tx_ring *ring, 539 struct mlx4_en_tx_ring *ring,
525 int cq); 540 int cq, int user_prio);
526void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, 541void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
527 struct mlx4_en_tx_ring *ring); 542 struct mlx4_en_tx_ring *ring);
528 543
@@ -540,8 +555,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
540 int budget); 555 int budget);
541int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget); 556int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
542void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, 557void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
543 int is_tx, int rss, int qpn, int cqn, 558 int is_tx, int rss, int qpn, int cqn, int user_prio,
544 struct mlx4_qp_context *context); 559 struct mlx4_qp_context *context);
545void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event); 560void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
546int mlx4_en_map_buffer(struct mlx4_buf *buf); 561int mlx4_en_map_buffer(struct mlx4_buf *buf);
547void mlx4_en_unmap_buffer(struct mlx4_buf *buf); 562void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
@@ -558,6 +573,10 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv);
558int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset); 573int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
559int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port); 574int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
560 575
576#ifdef CONFIG_MLX4_EN_DCB
577extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
578#endif
579
561#define MLX4_EN_NUM_SELF_TEST 5 580#define MLX4_EN_NUM_SELF_TEST 5
562void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf); 581void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
563u64 mlx4_en_mac_to_u64(u8 *addr); 582u64 mlx4_en_mac_to_u64(u8 *addr);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index fe2ac8449c19..af55b7ce5341 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -788,7 +788,6 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
788 int max_maps, u8 page_shift, struct mlx4_fmr *fmr) 788 int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
789{ 789{
790 struct mlx4_priv *priv = mlx4_priv(dev); 790 struct mlx4_priv *priv = mlx4_priv(dev);
791 u64 mtt_offset;
792 int err = -ENOMEM; 791 int err = -ENOMEM;
793 792
794 if (max_maps > dev->caps.max_fmr_maps) 793 if (max_maps > dev->caps.max_fmr_maps)
@@ -811,8 +810,6 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
811 if (err) 810 if (err)
812 return err; 811 return err;
813 812
814 mtt_offset = fmr->mr.mtt.offset * dev->caps.mtt_entry_sz;
815
816 fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, 813 fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
817 fmr->mr.mtt.offset, 814 fmr->mr.mtt.offset,
818 &fmr->dma_handle); 815 &fmr->dma_handle);
@@ -895,6 +892,6 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_free);
895int mlx4_SYNC_TPT(struct mlx4_dev *dev) 892int mlx4_SYNC_TPT(struct mlx4_dev *dev)
896{ 893{
897 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000, 894 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
898 MLX4_CMD_WRAPPED); 895 MLX4_CMD_NATIVE);
899} 896}
900EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); 897EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index db4746d0dca7..1ac88637ad9d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -63,7 +63,7 @@ void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn)
63} 63}
64EXPORT_SYMBOL_GPL(mlx4_pd_free); 64EXPORT_SYMBOL_GPL(mlx4_pd_free);
65 65
66int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn) 66int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn)
67{ 67{
68 struct mlx4_priv *priv = mlx4_priv(dev); 68 struct mlx4_priv *priv = mlx4_priv(dev);
69 69
@@ -73,12 +73,47 @@ int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn)
73 73
74 return 0; 74 return 0;
75} 75}
76
77int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn)
78{
79 u64 out_param;
80 int err;
81
82 if (mlx4_is_mfunc(dev)) {
83 err = mlx4_cmd_imm(dev, 0, &out_param,
84 RES_XRCD, RES_OP_RESERVE,
85 MLX4_CMD_ALLOC_RES,
86 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
87 if (err)
88 return err;
89
90 *xrcdn = get_param_l(&out_param);
91 return 0;
92 }
93 return __mlx4_xrcd_alloc(dev, xrcdn);
94}
76EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc); 95EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc);
77 96
78void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) 97void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
79{ 98{
80 mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn); 99 mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn);
81} 100}
101
102void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
103{
104 u64 in_param;
105 int err;
106
107 if (mlx4_is_mfunc(dev)) {
108 set_param_l(&in_param, xrcdn);
109 err = mlx4_cmd(dev, in_param, RES_XRCD,
110 RES_OP_RESERVE, MLX4_CMD_FREE_RES,
111 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
112 if (err)
113 mlx4_warn(dev, "Failed to release xrcdn %d\n", xrcdn);
114 } else
115 __mlx4_xrcd_free(dev, xrcdn);
116}
82EXPORT_SYMBOL_GPL(mlx4_xrcd_free); 117EXPORT_SYMBOL_GPL(mlx4_xrcd_free);
83 118
84int mlx4_init_pd_table(struct mlx4_dev *dev) 119int mlx4_init_pd_table(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 77535ff18f1b..1fe2c7a8b40c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -338,13 +338,12 @@ EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
338void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) 338void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
339{ 339{
340 u64 out_param; 340 u64 out_param;
341 int err;
342 341
343 if (mlx4_is_mfunc(dev)) { 342 if (mlx4_is_mfunc(dev)) {
344 set_param_l(&out_param, port); 343 set_param_l(&out_param, port);
345 err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, 344 (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
346 RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, 345 RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
347 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 346 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
348 return; 347 return;
349 } 348 }
350 __mlx4_unregister_mac(dev, port, mac); 349 __mlx4_unregister_mac(dev, port, mac);
@@ -834,6 +833,68 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
834} 833}
835EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc); 834EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
836 835
836int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
837{
838 struct mlx4_cmd_mailbox *mailbox;
839 struct mlx4_set_port_prio2tc_context *context;
840 int err;
841 u32 in_mod;
842 int i;
843
844 mailbox = mlx4_alloc_cmd_mailbox(dev);
845 if (IS_ERR(mailbox))
846 return PTR_ERR(mailbox);
847 context = mailbox->buf;
848 memset(context, 0, sizeof *context);
849
850 for (i = 0; i < MLX4_NUM_UP; i += 2)
851 context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
852
853 in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
854 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
855 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
856
857 mlx4_free_cmd_mailbox(dev, mailbox);
858 return err;
859}
860EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
861
862int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
863 u8 *pg, u16 *ratelimit)
864{
865 struct mlx4_cmd_mailbox *mailbox;
866 struct mlx4_set_port_scheduler_context *context;
867 int err;
868 u32 in_mod;
869 int i;
870
871 mailbox = mlx4_alloc_cmd_mailbox(dev);
872 if (IS_ERR(mailbox))
873 return PTR_ERR(mailbox);
874 context = mailbox->buf;
875 memset(context, 0, sizeof *context);
876
877 for (i = 0; i < MLX4_NUM_TC; i++) {
878 struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
879 u16 r = ratelimit && ratelimit[i] ? ratelimit[i] :
880 MLX4_RATELIMIT_DEFAULT;
881
882 tc->pg = htons(pg[i]);
883 tc->bw_precentage = htons(tc_tx_bw[i]);
884
885 tc->max_bw_units = htons(MLX4_RATELIMIT_UNITS);
886 tc->max_bw_value = htons(r);
887 }
888
889 in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
890 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
891 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
892
893 mlx4_free_cmd_mailbox(dev, mailbox);
894 return err;
895}
896EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
897
837int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, 898int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
838 struct mlx4_vhcr *vhcr, 899 struct mlx4_vhcr *vhcr,
839 struct mlx4_cmd_mailbox *inbox, 900 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 8752e6e08169..b45d0e7f6ab0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -89,17 +89,6 @@ enum res_qp_states {
89 RES_QP_HW 89 RES_QP_HW
90}; 90};
91 91
92static inline const char *qp_states_str(enum res_qp_states state)
93{
94 switch (state) {
95 case RES_QP_BUSY: return "RES_QP_BUSY";
96 case RES_QP_RESERVED: return "RES_QP_RESERVED";
97 case RES_QP_MAPPED: return "RES_QP_MAPPED";
98 case RES_QP_HW: return "RES_QP_HW";
99 default: return "Unknown";
100 }
101}
102
103struct res_qp { 92struct res_qp {
104 struct res_common com; 93 struct res_common com;
105 struct res_mtt *mtt; 94 struct res_mtt *mtt;
@@ -173,16 +162,6 @@ enum res_srq_states {
173 RES_SRQ_HW, 162 RES_SRQ_HW,
174}; 163};
175 164
176static inline const char *srq_states_str(enum res_srq_states state)
177{
178 switch (state) {
179 case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
180 case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
181 case RES_SRQ_HW: return "RES_SRQ_HW";
182 default: return "Unknown";
183 }
184}
185
186struct res_srq { 165struct res_srq {
187 struct res_common com; 166 struct res_common com;
188 struct res_mtt *mtt; 167 struct res_mtt *mtt;
@@ -195,20 +174,21 @@ enum res_counter_states {
195 RES_COUNTER_ALLOCATED, 174 RES_COUNTER_ALLOCATED,
196}; 175};
197 176
198static inline const char *counter_states_str(enum res_counter_states state)
199{
200 switch (state) {
201 case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
202 case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
203 default: return "Unknown";
204 }
205}
206
207struct res_counter { 177struct res_counter {
208 struct res_common com; 178 struct res_common com;
209 int port; 179 int port;
210}; 180};
211 181
182enum res_xrcdn_states {
183 RES_XRCD_BUSY = RES_ANY_BUSY,
184 RES_XRCD_ALLOCATED,
185};
186
187struct res_xrcdn {
188 struct res_common com;
189 int port;
190};
191
212/* For Debug uses */ 192/* For Debug uses */
213static const char *ResourceType(enum mlx4_resource rt) 193static const char *ResourceType(enum mlx4_resource rt)
214{ 194{
@@ -221,6 +201,7 @@ static const char *ResourceType(enum mlx4_resource rt)
221 case RES_MAC: return "RES_MAC"; 201 case RES_MAC: return "RES_MAC";
222 case RES_EQ: return "RES_EQ"; 202 case RES_EQ: return "RES_EQ";
223 case RES_COUNTER: return "RES_COUNTER"; 203 case RES_COUNTER: return "RES_COUNTER";
204 case RES_XRCD: return "RES_XRCD";
224 default: return "Unknown resource type !!!"; 205 default: return "Unknown resource type !!!";
225 }; 206 };
226} 207}
@@ -254,16 +235,23 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
254 return 0 ; 235 return 0 ;
255} 236}
256 237
257void mlx4_free_resource_tracker(struct mlx4_dev *dev) 238void mlx4_free_resource_tracker(struct mlx4_dev *dev,
239 enum mlx4_res_tracker_free_type type)
258{ 240{
259 struct mlx4_priv *priv = mlx4_priv(dev); 241 struct mlx4_priv *priv = mlx4_priv(dev);
260 int i; 242 int i;
261 243
262 if (priv->mfunc.master.res_tracker.slave_list) { 244 if (priv->mfunc.master.res_tracker.slave_list) {
263 for (i = 0 ; i < dev->num_slaves; i++) 245 if (type != RES_TR_FREE_STRUCTS_ONLY)
264 mlx4_delete_all_resources_for_slave(dev, i); 246 for (i = 0 ; i < dev->num_slaves; i++)
265 247 if (type == RES_TR_FREE_ALL ||
266 kfree(priv->mfunc.master.res_tracker.slave_list); 248 dev->caps.function != i)
249 mlx4_delete_all_resources_for_slave(dev, i);
250
251 if (type != RES_TR_FREE_SLAVES_ONLY) {
252 kfree(priv->mfunc.master.res_tracker.slave_list);
253 priv->mfunc.master.res_tracker.slave_list = NULL;
254 }
267 } 255 }
268} 256}
269 257
@@ -471,6 +459,20 @@ static struct res_common *alloc_counter_tr(int id)
471 return &ret->com; 459 return &ret->com;
472} 460}
473 461
462static struct res_common *alloc_xrcdn_tr(int id)
463{
464 struct res_xrcdn *ret;
465
466 ret = kzalloc(sizeof *ret, GFP_KERNEL);
467 if (!ret)
468 return NULL;
469
470 ret->com.res_id = id;
471 ret->com.state = RES_XRCD_ALLOCATED;
472
473 return &ret->com;
474}
475
474static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave, 476static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
475 int extra) 477 int extra)
476{ 478{
@@ -501,7 +503,9 @@ static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
501 case RES_COUNTER: 503 case RES_COUNTER:
502 ret = alloc_counter_tr(id); 504 ret = alloc_counter_tr(id);
503 break; 505 break;
504 506 case RES_XRCD:
507 ret = alloc_xrcdn_tr(id);
508 break;
505 default: 509 default:
506 return NULL; 510 return NULL;
507 } 511 }
@@ -624,6 +628,16 @@ static int remove_counter_ok(struct res_counter *res)
624 return 0; 628 return 0;
625} 629}
626 630
631static int remove_xrcdn_ok(struct res_xrcdn *res)
632{
633 if (res->com.state == RES_XRCD_BUSY)
634 return -EBUSY;
635 else if (res->com.state != RES_XRCD_ALLOCATED)
636 return -EPERM;
637
638 return 0;
639}
640
627static int remove_cq_ok(struct res_cq *res) 641static int remove_cq_ok(struct res_cq *res)
628{ 642{
629 if (res->com.state == RES_CQ_BUSY) 643 if (res->com.state == RES_CQ_BUSY)
@@ -663,6 +677,8 @@ static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
663 return remove_eq_ok((struct res_eq *)res); 677 return remove_eq_ok((struct res_eq *)res);
664 case RES_COUNTER: 678 case RES_COUNTER:
665 return remove_counter_ok((struct res_counter *)res); 679 return remove_counter_ok((struct res_counter *)res);
680 case RES_XRCD:
681 return remove_xrcdn_ok((struct res_xrcdn *)res);
666 default: 682 default:
667 return -EINVAL; 683 return -EINVAL;
668 } 684 }
@@ -1269,6 +1285,50 @@ static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1269 return 0; 1285 return 0;
1270} 1286}
1271 1287
1288static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1289 u64 in_param, u64 *out_param)
1290{
1291 u32 index;
1292 int err;
1293
1294 if (op != RES_OP_RESERVE)
1295 return -EINVAL;
1296
1297 err = __mlx4_counter_alloc(dev, &index);
1298 if (err)
1299 return err;
1300
1301 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1302 if (err)
1303 __mlx4_counter_free(dev, index);
1304 else
1305 set_param_l(out_param, index);
1306
1307 return err;
1308}
1309
1310static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1311 u64 in_param, u64 *out_param)
1312{
1313 u32 xrcdn;
1314 int err;
1315
1316 if (op != RES_OP_RESERVE)
1317 return -EINVAL;
1318
1319 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1320 if (err)
1321 return err;
1322
1323 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1324 if (err)
1325 __mlx4_xrcd_free(dev, xrcdn);
1326 else
1327 set_param_l(out_param, xrcdn);
1328
1329 return err;
1330}
1331
1272int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, 1332int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1273 struct mlx4_vhcr *vhcr, 1333 struct mlx4_vhcr *vhcr,
1274 struct mlx4_cmd_mailbox *inbox, 1334 struct mlx4_cmd_mailbox *inbox,
@@ -1314,6 +1374,16 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1314 vhcr->in_param, &vhcr->out_param); 1374 vhcr->in_param, &vhcr->out_param);
1315 break; 1375 break;
1316 1376
1377 case RES_COUNTER:
1378 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1379 vhcr->in_param, &vhcr->out_param);
1380 break;
1381
1382 case RES_XRCD:
1383 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1384 vhcr->in_param, &vhcr->out_param);
1385 break;
1386
1317 default: 1387 default:
1318 err = -EINVAL; 1388 err = -EINVAL;
1319 break; 1389 break;
@@ -1496,6 +1566,44 @@ static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1496 return 0; 1566 return 0;
1497} 1567}
1498 1568
1569static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1570 u64 in_param, u64 *out_param)
1571{
1572 int index;
1573 int err;
1574
1575 if (op != RES_OP_RESERVE)
1576 return -EINVAL;
1577
1578 index = get_param_l(&in_param);
1579 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1580 if (err)
1581 return err;
1582
1583 __mlx4_counter_free(dev, index);
1584
1585 return err;
1586}
1587
1588static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1589 u64 in_param, u64 *out_param)
1590{
1591 int xrcdn;
1592 int err;
1593
1594 if (op != RES_OP_RESERVE)
1595 return -EINVAL;
1596
1597 xrcdn = get_param_l(&in_param);
1598 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1599 if (err)
1600 return err;
1601
1602 __mlx4_xrcd_free(dev, xrcdn);
1603
1604 return err;
1605}
1606
1499int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, 1607int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1500 struct mlx4_vhcr *vhcr, 1608 struct mlx4_vhcr *vhcr,
1501 struct mlx4_cmd_mailbox *inbox, 1609 struct mlx4_cmd_mailbox *inbox,
@@ -1541,6 +1649,15 @@ int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1541 vhcr->in_param, &vhcr->out_param); 1649 vhcr->in_param, &vhcr->out_param);
1542 break; 1650 break;
1543 1651
1652 case RES_COUNTER:
1653 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1654 vhcr->in_param, &vhcr->out_param);
1655 break;
1656
1657 case RES_XRCD:
1658 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1659 vhcr->in_param, &vhcr->out_param);
1660
1544 default: 1661 default:
1545 break; 1662 break;
1546 } 1663 }
@@ -2536,7 +2653,7 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2536 struct mlx4_qp qp; /* dummy for calling attach/detach */ 2653 struct mlx4_qp qp; /* dummy for calling attach/detach */
2537 u8 *gid = inbox->buf; 2654 u8 *gid = inbox->buf;
2538 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7; 2655 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2539 int err, err1; 2656 int err;
2540 int qpn; 2657 int qpn;
2541 struct res_qp *rqp; 2658 struct res_qp *rqp;
2542 int attach = vhcr->op_modifier; 2659 int attach = vhcr->op_modifier;
@@ -2571,7 +2688,7 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2571 2688
2572ex_rem: 2689ex_rem:
2573 /* ignore error return below, already in error */ 2690 /* ignore error return below, already in error */
2574 err1 = rem_mcg_res(dev, slave, rqp, gid, prot, type); 2691 (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
2575ex_put: 2692ex_put:
2576 put_res(dev, slave, qpn, RES_QP); 2693 put_res(dev, slave, qpn, RES_QP);
2577 2694
@@ -2604,13 +2721,12 @@ static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2604{ 2721{
2605 struct res_gid *rgid; 2722 struct res_gid *rgid;
2606 struct res_gid *tmp; 2723 struct res_gid *tmp;
2607 int err;
2608 struct mlx4_qp qp; /* dummy for calling attach/detach */ 2724 struct mlx4_qp qp; /* dummy for calling attach/detach */
2609 2725
2610 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) { 2726 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2611 qp.qpn = rqp->local_qpn; 2727 qp.qpn = rqp->local_qpn;
2612 err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot, 2728 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2613 rgid->steer); 2729 rgid->steer);
2614 list_del(&rgid->list); 2730 list_del(&rgid->list);
2615 kfree(rgid); 2731 kfree(rgid);
2616 } 2732 }
@@ -3036,14 +3152,13 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3036 MLX4_CMD_HW2SW_EQ, 3152 MLX4_CMD_HW2SW_EQ,
3037 MLX4_CMD_TIME_CLASS_A, 3153 MLX4_CMD_TIME_CLASS_A,
3038 MLX4_CMD_NATIVE); 3154 MLX4_CMD_NATIVE);
3039 mlx4_dbg(dev, "rem_slave_eqs: failed" 3155 if (err)
3040 " to move slave %d eqs %d to" 3156 mlx4_dbg(dev, "rem_slave_eqs: failed"
3041 " SW ownership\n", slave, eqn); 3157 " to move slave %d eqs %d to"
3158 " SW ownership\n", slave, eqn);
3042 mlx4_free_cmd_mailbox(dev, mailbox); 3159 mlx4_free_cmd_mailbox(dev, mailbox);
3043 if (!err) { 3160 atomic_dec(&eq->mtt->ref_count);
3044 atomic_dec(&eq->mtt->ref_count); 3161 state = RES_EQ_RESERVED;
3045 state = RES_EQ_RESERVED;
3046 }
3047 break; 3162 break;
3048 3163
3049 default: 3164 default:
@@ -3056,6 +3171,64 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3056 spin_unlock_irq(mlx4_tlock(dev)); 3171 spin_unlock_irq(mlx4_tlock(dev));
3057} 3172}
3058 3173
3174static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3175{
3176 struct mlx4_priv *priv = mlx4_priv(dev);
3177 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3178 struct list_head *counter_list =
3179 &tracker->slave_list[slave].res_list[RES_COUNTER];
3180 struct res_counter *counter;
3181 struct res_counter *tmp;
3182 int err;
3183 int index;
3184
3185 err = move_all_busy(dev, slave, RES_COUNTER);
3186 if (err)
3187 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3188 "busy for slave %d\n", slave);
3189
3190 spin_lock_irq(mlx4_tlock(dev));
3191 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3192 if (counter->com.owner == slave) {
3193 index = counter->com.res_id;
3194 radix_tree_delete(&tracker->res_tree[RES_COUNTER], index);
3195 list_del(&counter->com.list);
3196 kfree(counter);
3197 __mlx4_counter_free(dev, index);
3198 }
3199 }
3200 spin_unlock_irq(mlx4_tlock(dev));
3201}
3202
3203static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3204{
3205 struct mlx4_priv *priv = mlx4_priv(dev);
3206 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3207 struct list_head *xrcdn_list =
3208 &tracker->slave_list[slave].res_list[RES_XRCD];
3209 struct res_xrcdn *xrcd;
3210 struct res_xrcdn *tmp;
3211 int err;
3212 int xrcdn;
3213
3214 err = move_all_busy(dev, slave, RES_XRCD);
3215 if (err)
3216 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3217 "busy for slave %d\n", slave);
3218
3219 spin_lock_irq(mlx4_tlock(dev));
3220 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3221 if (xrcd->com.owner == slave) {
3222 xrcdn = xrcd->com.res_id;
3223 radix_tree_delete(&tracker->res_tree[RES_XRCD], xrcdn);
3224 list_del(&xrcd->com.list);
3225 kfree(xrcd);
3226 __mlx4_xrcd_free(dev, xrcdn);
3227 }
3228 }
3229 spin_unlock_irq(mlx4_tlock(dev));
3230}
3231
3059void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) 3232void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3060{ 3233{
3061 struct mlx4_priv *priv = mlx4_priv(dev); 3234 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -3069,5 +3242,7 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3069 rem_slave_mrs(dev, slave); 3242 rem_slave_mrs(dev, slave);
3070 rem_slave_eqs(dev, slave); 3243 rem_slave_eqs(dev, slave);
3071 rem_slave_mtts(dev, slave); 3244 rem_slave_mtts(dev, slave);
3245 rem_slave_counters(dev, slave);
3246 rem_slave_xrcdns(dev, slave);
3072 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 3247 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3073} 3248}
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index f84dd2dc82b6..24fb049ac2f2 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1262,7 +1262,7 @@ static struct platform_driver ks8842_platform_driver = {
1262 .owner = THIS_MODULE, 1262 .owner = THIS_MODULE,
1263 }, 1263 },
1264 .probe = ks8842_probe, 1264 .probe = ks8842_probe,
1265 .remove = ks8842_remove, 1265 .remove = __devexit_p(ks8842_remove),
1266}; 1266};
1267 1267
1268module_platform_driver(ks8842_platform_driver); 1268module_platform_driver(ks8842_platform_driver);
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index c722aa607d07..5e313e9a252f 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -618,10 +618,8 @@ static void ks8851_irq_work(struct work_struct *work)
618 netif_dbg(ks, intr, ks->netdev, 618 netif_dbg(ks, intr, ks->netdev,
619 "%s: status 0x%04x\n", __func__, status); 619 "%s: status 0x%04x\n", __func__, status);
620 620
621 if (status & IRQ_LCI) { 621 if (status & IRQ_LCI)
622 /* should do something about checking link status */
623 handled |= IRQ_LCI; 622 handled |= IRQ_LCI;
624 }
625 623
626 if (status & IRQ_LDI) { 624 if (status & IRQ_LDI) {
627 u16 pmecr = ks8851_rdreg16(ks, KS_PMECR); 625 u16 pmecr = ks8851_rdreg16(ks, KS_PMECR);
@@ -684,6 +682,9 @@ static void ks8851_irq_work(struct work_struct *work)
684 682
685 mutex_unlock(&ks->lock); 683 mutex_unlock(&ks->lock);
686 684
685 if (status & IRQ_LCI)
686 mii_check_link(&ks->mii);
687
687 if (status & IRQ_TXI) 688 if (status & IRQ_TXI)
688 netif_wake_queue(ks->netdev); 689 netif_wake_queue(ks->netdev);
689 690
@@ -889,16 +890,17 @@ static int ks8851_net_stop(struct net_device *dev)
889 netif_stop_queue(dev); 890 netif_stop_queue(dev);
890 891
891 mutex_lock(&ks->lock); 892 mutex_lock(&ks->lock);
893 /* turn off the IRQs and ack any outstanding */
894 ks8851_wrreg16(ks, KS_IER, 0x0000);
895 ks8851_wrreg16(ks, KS_ISR, 0xffff);
896 mutex_unlock(&ks->lock);
892 897
893 /* stop any outstanding work */ 898 /* stop any outstanding work */
894 flush_work(&ks->irq_work); 899 flush_work(&ks->irq_work);
895 flush_work(&ks->tx_work); 900 flush_work(&ks->tx_work);
896 flush_work(&ks->rxctrl_work); 901 flush_work(&ks->rxctrl_work);
897 902
898 /* turn off the IRQs and ack any outstanding */ 903 mutex_lock(&ks->lock);
899 ks8851_wrreg16(ks, KS_IER, 0x0000);
900 ks8851_wrreg16(ks, KS_ISR, 0xffff);
901
902 /* shutdown RX process */ 904 /* shutdown RX process */
903 ks8851_wrreg16(ks, KS_RXCR1, 0x0000); 905 ks8851_wrreg16(ks, KS_RXCR1, 0x0000);
904 906
@@ -907,6 +909,7 @@ static int ks8851_net_stop(struct net_device *dev)
907 909
908 /* set powermode to soft power down to save power */ 910 /* set powermode to soft power down to save power */
909 ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN); 911 ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
912 mutex_unlock(&ks->lock);
910 913
911 /* ensure any queued tx buffers are dumped */ 914 /* ensure any queued tx buffers are dumped */
912 while (!skb_queue_empty(&ks->txq)) { 915 while (!skb_queue_empty(&ks->txq)) {
@@ -918,7 +921,6 @@ static int ks8851_net_stop(struct net_device *dev)
918 dev_kfree_skb(txb); 921 dev_kfree_skb(txb);
919 } 922 }
920 923
921 mutex_unlock(&ks->lock);
922 return 0; 924 return 0;
923} 925}
924 926
@@ -1418,6 +1420,7 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1418 struct net_device *ndev; 1420 struct net_device *ndev;
1419 struct ks8851_net *ks; 1421 struct ks8851_net *ks;
1420 int ret; 1422 int ret;
1423 unsigned cider;
1421 1424
1422 ndev = alloc_etherdev(sizeof(struct ks8851_net)); 1425 ndev = alloc_etherdev(sizeof(struct ks8851_net));
1423 if (!ndev) 1426 if (!ndev)
@@ -1484,8 +1487,8 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1484 ks8851_soft_reset(ks, GRR_GSR); 1487 ks8851_soft_reset(ks, GRR_GSR);
1485 1488
1486 /* simple check for a valid chip being connected to the bus */ 1489 /* simple check for a valid chip being connected to the bus */
1487 1490 cider = ks8851_rdreg16(ks, KS_CIDER);
1488 if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { 1491 if ((cider & ~CIDER_REV_MASK) != CIDER_ID) {
1489 dev_err(&spi->dev, "failed to read device ID\n"); 1492 dev_err(&spi->dev, "failed to read device ID\n");
1490 ret = -ENODEV; 1493 ret = -ENODEV;
1491 goto err_id; 1494 goto err_id;
@@ -1516,15 +1519,14 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1516 } 1519 }
1517 1520
1518 netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n", 1521 netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
1519 CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)), 1522 CIDER_REV_GET(cider), ndev->dev_addr, ndev->irq,
1520 ndev->dev_addr, ndev->irq,
1521 ks->rc_ccr & CCR_EEPROM ? "has" : "no"); 1523 ks->rc_ccr & CCR_EEPROM ? "has" : "no");
1522 1524
1523 return 0; 1525 return 0;
1524 1526
1525 1527
1526err_netdev: 1528err_netdev:
1527 free_irq(ndev->irq, ndev); 1529 free_irq(ndev->irq, ks);
1528 1530
1529err_id: 1531err_id:
1530err_irq: 1532err_irq:
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index b8104d9f4081..5ffde23ac8fb 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -40,7 +40,7 @@
40#define DRV_NAME "ks8851_mll" 40#define DRV_NAME "ks8851_mll"
41 41
42static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; 42static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
43#define MAX_RECV_FRAMES 32 43#define MAX_RECV_FRAMES 255
44#define MAX_BUF_SIZE 2048 44#define MAX_BUF_SIZE 2048
45#define TX_BUF_SIZE 2000 45#define TX_BUF_SIZE 2000
46#define RX_BUF_SIZE 2000 46#define RX_BUF_SIZE 2000
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index ef723b185d85..eaf9ff0262a9 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5675,7 +5675,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr)
5675 memcpy(hw->override_addr, mac->sa_data, ETH_ALEN); 5675 memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
5676 } 5676 }
5677 5677
5678 memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN); 5678 memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN);
5679 5679
5680 interrupt = hw_block_intr(hw); 5680 interrupt = hw_block_intr(hw);
5681 5681
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 27273ae1a6e6..90153fc983cb 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -4033,7 +4033,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4033 4033
4034 netdev->netdev_ops = &myri10ge_netdev_ops; 4034 netdev->netdev_ops = &myri10ge_netdev_ops;
4035 netdev->mtu = myri10ge_initial_mtu; 4035 netdev->mtu = myri10ge_initial_mtu;
4036 netdev->base_addr = mgp->iomem_base;
4037 netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM; 4036 netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM;
4038 netdev->features = netdev->hw_features; 4037 netdev->features = netdev->hw_features;
4039 4038
@@ -4047,12 +4046,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4047 netdev->vlan_features &= ~NETIF_F_TSO; 4046 netdev->vlan_features &= ~NETIF_F_TSO;
4048 4047
4049 /* make sure we can get an irq, and that MSI can be 4048 /* make sure we can get an irq, and that MSI can be
4050 * setup (if available). Also ensure netdev->irq 4049 * setup (if available). */
4051 * is set to correct value if MSI is enabled */
4052 status = myri10ge_request_irq(mgp); 4050 status = myri10ge_request_irq(mgp);
4053 if (status != 0) 4051 if (status != 0)
4054 goto abort_with_firmware; 4052 goto abort_with_firmware;
4055 netdev->irq = pdev->irq;
4056 myri10ge_free_irq(mgp); 4053 myri10ge_free_irq(mgp);
4057 4054
4058 /* Save configuration space to be restored if the 4055 /* Save configuration space to be restored if the
@@ -4077,7 +4074,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4077 else 4074 else
4078 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", 4075 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
4079 mgp->msi_enabled ? "MSI" : "xPIC", 4076 mgp->msi_enabled ? "MSI" : "xPIC",
4080 netdev->irq, mgp->tx_boundary, mgp->fw_name, 4077 pdev->irq, mgp->tx_boundary, mgp->fw_name,
4081 (mgp->wc_enabled ? "Enabled" : "Disabled")); 4078 (mgp->wc_enabled ? "Enabled" : "Disabled"));
4082 4079
4083 board_number++; 4080 board_number++;
diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig
index eb836f770f50..f157334579fd 100644
--- a/drivers/net/ethernet/natsemi/Kconfig
+++ b/drivers/net/ethernet/natsemi/Kconfig
@@ -6,9 +6,8 @@ config NET_VENDOR_NATSEMI
6 bool "National Semi-conductor devices" 6 bool "National Semi-conductor devices"
7 default y 7 default y
8 depends on AMIGA_PCMCIA || ARM || EISA || EXPERIMENTAL || H8300 || \ 8 depends on AMIGA_PCMCIA || ARM || EISA || EXPERIMENTAL || H8300 || \
9 ISA || M32R || MAC || MACH_JAZZ || MACH_TX49XX || MCA || \ 9 ISA || M32R || MAC || MACH_JAZZ || MACH_TX49XX || MIPS || \
10 MCA_LEGACY || MIPS || PCI || PCMCIA || SUPERH || \ 10 PCI || PCMCIA || SUPERH || XTENSA_PLATFORM_XT2000 || ZORRO
11 XTENSA_PLATFORM_XT2000 || ZORRO
12 ---help--- 11 ---help---
13 If you have a network (Ethernet) card belonging to this class, say Y 12 If you have a network (Ethernet) card belonging to this class, say Y
14 and read the Ethernet-HOWTO, available from 13 and read the Ethernet-HOWTO, available from
@@ -21,21 +20,6 @@ config NET_VENDOR_NATSEMI
21 20
22if NET_VENDOR_NATSEMI 21if NET_VENDOR_NATSEMI
23 22
24config IBMLANA
25 tristate "IBM LAN Adapter/A support"
26 depends on MCA
27 ---help---
28 This is a Micro Channel Ethernet adapter. You need to set
29 CONFIG_MCA to use this driver. It is both available as an in-kernel
30 driver and as a module.
31
32 To compile this driver as a module, choose M here. The only
33 currently supported card is the IBM LAN Adapter/A for Ethernet. It
34 will both support 16K and 32K memory windows, however a 32K window
35 gives a better security against packet losses. Usage of multiple
36 boards with this driver should be possible, but has not been tested
37 up to now due to lack of hardware.
38
39config MACSONIC 23config MACSONIC
40 tristate "Macintosh SONIC based ethernet (onboard, NuBus, LC, CS)" 24 tristate "Macintosh SONIC based ethernet (onboard, NuBus, LC, CS)"
41 depends on MAC 25 depends on MAC
diff --git a/drivers/net/ethernet/natsemi/Makefile b/drivers/net/ethernet/natsemi/Makefile
index 9aa5dea52b3e..764c532a96d1 100644
--- a/drivers/net/ethernet/natsemi/Makefile
+++ b/drivers/net/ethernet/natsemi/Makefile
@@ -2,7 +2,6 @@
2# Makefile for the National Semi-conductor Sonic devices. 2# Makefile for the National Semi-conductor Sonic devices.
3# 3#
4 4
5obj-$(CONFIG_IBMLANA) += ibmlana.o
6obj-$(CONFIG_MACSONIC) += macsonic.o 5obj-$(CONFIG_MACSONIC) += macsonic.o
7obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o 6obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o
8obj-$(CONFIG_NATSEMI) += natsemi.o 7obj-$(CONFIG_NATSEMI) += natsemi.o
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index d38e48d4f430..5b61d12f8b91 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -547,6 +547,7 @@ struct netdev_private {
547 struct sk_buff *tx_skbuff[TX_RING_SIZE]; 547 struct sk_buff *tx_skbuff[TX_RING_SIZE];
548 dma_addr_t tx_dma[TX_RING_SIZE]; 548 dma_addr_t tx_dma[TX_RING_SIZE];
549 struct net_device *dev; 549 struct net_device *dev;
550 void __iomem *ioaddr;
550 struct napi_struct napi; 551 struct napi_struct napi;
551 /* Media monitoring timer */ 552 /* Media monitoring timer */
552 struct timer_list timer; 553 struct timer_list timer;
@@ -699,7 +700,9 @@ static ssize_t natsemi_set_dspcfg_workaround(struct device *dev,
699 700
700static inline void __iomem *ns_ioaddr(struct net_device *dev) 701static inline void __iomem *ns_ioaddr(struct net_device *dev)
701{ 702{
702 return (void __iomem *) dev->base_addr; 703 struct netdev_private *np = netdev_priv(dev);
704
705 return np->ioaddr;
703} 706}
704 707
705static inline void natsemi_irq_enable(struct net_device *dev) 708static inline void natsemi_irq_enable(struct net_device *dev)
@@ -863,10 +866,9 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
863 /* Store MAC Address in perm_addr */ 866 /* Store MAC Address in perm_addr */
864 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); 867 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
865 868
866 dev->base_addr = (unsigned long __force) ioaddr;
867 dev->irq = irq;
868
869 np = netdev_priv(dev); 869 np = netdev_priv(dev);
870 np->ioaddr = ioaddr;
871
870 netif_napi_add(dev, &np->napi, natsemi_poll, 64); 872 netif_napi_add(dev, &np->napi, natsemi_poll, 64);
871 np->dev = dev; 873 np->dev = dev;
872 874
@@ -914,9 +916,6 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
914 } 916 }
915 917
916 option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; 918 option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
917 if (dev->mem_start)
918 option = dev->mem_start;
919
920 /* The lower four bits are the media type. */ 919 /* The lower four bits are the media type. */
921 if (option) { 920 if (option) {
922 if (option & 0x200) 921 if (option & 0x200)
@@ -1532,20 +1531,21 @@ static int netdev_open(struct net_device *dev)
1532{ 1531{
1533 struct netdev_private *np = netdev_priv(dev); 1532 struct netdev_private *np = netdev_priv(dev);
1534 void __iomem * ioaddr = ns_ioaddr(dev); 1533 void __iomem * ioaddr = ns_ioaddr(dev);
1534 const int irq = np->pci_dev->irq;
1535 int i; 1535 int i;
1536 1536
1537 /* Reset the chip, just in case. */ 1537 /* Reset the chip, just in case. */
1538 natsemi_reset(dev); 1538 natsemi_reset(dev);
1539 1539
1540 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); 1540 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
1541 if (i) return i; 1541 if (i) return i;
1542 1542
1543 if (netif_msg_ifup(np)) 1543 if (netif_msg_ifup(np))
1544 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", 1544 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1545 dev->name, dev->irq); 1545 dev->name, irq);
1546 i = alloc_ring(dev); 1546 i = alloc_ring(dev);
1547 if (i < 0) { 1547 if (i < 0) {
1548 free_irq(dev->irq, dev); 1548 free_irq(irq, dev);
1549 return i; 1549 return i;
1550 } 1550 }
1551 napi_enable(&np->napi); 1551 napi_enable(&np->napi);
@@ -1794,6 +1794,7 @@ static void netdev_timer(unsigned long data)
1794 struct netdev_private *np = netdev_priv(dev); 1794 struct netdev_private *np = netdev_priv(dev);
1795 void __iomem * ioaddr = ns_ioaddr(dev); 1795 void __iomem * ioaddr = ns_ioaddr(dev);
1796 int next_tick = NATSEMI_TIMER_FREQ; 1796 int next_tick = NATSEMI_TIMER_FREQ;
1797 const int irq = np->pci_dev->irq;
1797 1798
1798 if (netif_msg_timer(np)) { 1799 if (netif_msg_timer(np)) {
1799 /* DO NOT read the IntrStatus register, 1800 /* DO NOT read the IntrStatus register,
@@ -1817,14 +1818,14 @@ static void netdev_timer(unsigned long data)
1817 if (netif_msg_drv(np)) 1818 if (netif_msg_drv(np))
1818 printk(KERN_NOTICE "%s: possible phy reset: " 1819 printk(KERN_NOTICE "%s: possible phy reset: "
1819 "re-initializing\n", dev->name); 1820 "re-initializing\n", dev->name);
1820 disable_irq(dev->irq); 1821 disable_irq(irq);
1821 spin_lock_irq(&np->lock); 1822 spin_lock_irq(&np->lock);
1822 natsemi_stop_rxtx(dev); 1823 natsemi_stop_rxtx(dev);
1823 dump_ring(dev); 1824 dump_ring(dev);
1824 reinit_ring(dev); 1825 reinit_ring(dev);
1825 init_registers(dev); 1826 init_registers(dev);
1826 spin_unlock_irq(&np->lock); 1827 spin_unlock_irq(&np->lock);
1827 enable_irq(dev->irq); 1828 enable_irq(irq);
1828 } else { 1829 } else {
1829 /* hurry back */ 1830 /* hurry back */
1830 next_tick = HZ; 1831 next_tick = HZ;
@@ -1841,10 +1842,10 @@ static void netdev_timer(unsigned long data)
1841 spin_unlock_irq(&np->lock); 1842 spin_unlock_irq(&np->lock);
1842 } 1843 }
1843 if (np->oom) { 1844 if (np->oom) {
1844 disable_irq(dev->irq); 1845 disable_irq(irq);
1845 np->oom = 0; 1846 np->oom = 0;
1846 refill_rx(dev); 1847 refill_rx(dev);
1847 enable_irq(dev->irq); 1848 enable_irq(irq);
1848 if (!np->oom) { 1849 if (!np->oom) {
1849 writel(RxOn, ioaddr + ChipCmd); 1850 writel(RxOn, ioaddr + ChipCmd);
1850 } else { 1851 } else {
@@ -1885,8 +1886,9 @@ static void ns_tx_timeout(struct net_device *dev)
1885{ 1886{
1886 struct netdev_private *np = netdev_priv(dev); 1887 struct netdev_private *np = netdev_priv(dev);
1887 void __iomem * ioaddr = ns_ioaddr(dev); 1888 void __iomem * ioaddr = ns_ioaddr(dev);
1889 const int irq = np->pci_dev->irq;
1888 1890
1889 disable_irq(dev->irq); 1891 disable_irq(irq);
1890 spin_lock_irq(&np->lock); 1892 spin_lock_irq(&np->lock);
1891 if (!np->hands_off) { 1893 if (!np->hands_off) {
1892 if (netif_msg_tx_err(np)) 1894 if (netif_msg_tx_err(np))
@@ -1905,7 +1907,7 @@ static void ns_tx_timeout(struct net_device *dev)
1905 dev->name); 1907 dev->name);
1906 } 1908 }
1907 spin_unlock_irq(&np->lock); 1909 spin_unlock_irq(&np->lock);
1908 enable_irq(dev->irq); 1910 enable_irq(irq);
1909 1911
1910 dev->trans_start = jiffies; /* prevent tx timeout */ 1912 dev->trans_start = jiffies; /* prevent tx timeout */
1911 dev->stats.tx_errors++; 1913 dev->stats.tx_errors++;
@@ -2470,9 +2472,12 @@ static struct net_device_stats *get_stats(struct net_device *dev)
2470#ifdef CONFIG_NET_POLL_CONTROLLER 2472#ifdef CONFIG_NET_POLL_CONTROLLER
2471static void natsemi_poll_controller(struct net_device *dev) 2473static void natsemi_poll_controller(struct net_device *dev)
2472{ 2474{
2473 disable_irq(dev->irq); 2475 struct netdev_private *np = netdev_priv(dev);
2474 intr_handler(dev->irq, dev); 2476 const int irq = np->pci_dev->irq;
2475 enable_irq(dev->irq); 2477
2478 disable_irq(irq);
2479 intr_handler(irq, dev);
2480 enable_irq(irq);
2476} 2481}
2477#endif 2482#endif
2478 2483
@@ -2523,8 +2528,9 @@ static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
2523 if (netif_running(dev)) { 2528 if (netif_running(dev)) {
2524 struct netdev_private *np = netdev_priv(dev); 2529 struct netdev_private *np = netdev_priv(dev);
2525 void __iomem * ioaddr = ns_ioaddr(dev); 2530 void __iomem * ioaddr = ns_ioaddr(dev);
2531 const int irq = np->pci_dev->irq;
2526 2532
2527 disable_irq(dev->irq); 2533 disable_irq(irq);
2528 spin_lock(&np->lock); 2534 spin_lock(&np->lock);
2529 /* stop engines */ 2535 /* stop engines */
2530 natsemi_stop_rxtx(dev); 2536 natsemi_stop_rxtx(dev);
@@ -2537,7 +2543,7 @@ static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
2537 /* restart engines */ 2543 /* restart engines */
2538 writel(RxOn | TxOn, ioaddr + ChipCmd); 2544 writel(RxOn | TxOn, ioaddr + ChipCmd);
2539 spin_unlock(&np->lock); 2545 spin_unlock(&np->lock);
2540 enable_irq(dev->irq); 2546 enable_irq(irq);
2541 } 2547 }
2542 return 0; 2548 return 0;
2543} 2549}
@@ -3135,6 +3141,7 @@ static int netdev_close(struct net_device *dev)
3135{ 3141{
3136 void __iomem * ioaddr = ns_ioaddr(dev); 3142 void __iomem * ioaddr = ns_ioaddr(dev);
3137 struct netdev_private *np = netdev_priv(dev); 3143 struct netdev_private *np = netdev_priv(dev);
3144 const int irq = np->pci_dev->irq;
3138 3145
3139 if (netif_msg_ifdown(np)) 3146 if (netif_msg_ifdown(np))
3140 printk(KERN_DEBUG 3147 printk(KERN_DEBUG
@@ -3156,14 +3163,14 @@ static int netdev_close(struct net_device *dev)
3156 */ 3163 */
3157 3164
3158 del_timer_sync(&np->timer); 3165 del_timer_sync(&np->timer);
3159 disable_irq(dev->irq); 3166 disable_irq(irq);
3160 spin_lock_irq(&np->lock); 3167 spin_lock_irq(&np->lock);
3161 natsemi_irq_disable(dev); 3168 natsemi_irq_disable(dev);
3162 np->hands_off = 1; 3169 np->hands_off = 1;
3163 spin_unlock_irq(&np->lock); 3170 spin_unlock_irq(&np->lock);
3164 enable_irq(dev->irq); 3171 enable_irq(irq);
3165 3172
3166 free_irq(dev->irq, dev); 3173 free_irq(irq, dev);
3167 3174
3168 /* Interrupt disabled, interrupt handler released, 3175 /* Interrupt disabled, interrupt handler released,
3169 * queue stopped, timer deleted, rtnl_lock held 3176 * queue stopped, timer deleted, rtnl_lock held
@@ -3256,9 +3263,11 @@ static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
3256 3263
3257 rtnl_lock(); 3264 rtnl_lock();
3258 if (netif_running (dev)) { 3265 if (netif_running (dev)) {
3266 const int irq = np->pci_dev->irq;
3267
3259 del_timer_sync(&np->timer); 3268 del_timer_sync(&np->timer);
3260 3269
3261 disable_irq(dev->irq); 3270 disable_irq(irq);
3262 spin_lock_irq(&np->lock); 3271 spin_lock_irq(&np->lock);
3263 3272
3264 natsemi_irq_disable(dev); 3273 natsemi_irq_disable(dev);
@@ -3267,7 +3276,7 @@ static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
3267 netif_stop_queue(dev); 3276 netif_stop_queue(dev);
3268 3277
3269 spin_unlock_irq(&np->lock); 3278 spin_unlock_irq(&np->lock);
3270 enable_irq(dev->irq); 3279 enable_irq(irq);
3271 3280
3272 napi_disable(&np->napi); 3281 napi_disable(&np->napi);
3273 3282
@@ -3307,6 +3316,8 @@ static int natsemi_resume (struct pci_dev *pdev)
3307 if (netif_device_present(dev)) 3316 if (netif_device_present(dev))
3308 goto out; 3317 goto out;
3309 if (netif_running(dev)) { 3318 if (netif_running(dev)) {
3319 const int irq = np->pci_dev->irq;
3320
3310 BUG_ON(!np->hands_off); 3321 BUG_ON(!np->hands_off);
3311 ret = pci_enable_device(pdev); 3322 ret = pci_enable_device(pdev);
3312 if (ret < 0) { 3323 if (ret < 0) {
@@ -3320,13 +3331,13 @@ static int natsemi_resume (struct pci_dev *pdev)
3320 3331
3321 natsemi_reset(dev); 3332 natsemi_reset(dev);
3322 init_ring(dev); 3333 init_ring(dev);
3323 disable_irq(dev->irq); 3334 disable_irq(irq);
3324 spin_lock_irq(&np->lock); 3335 spin_lock_irq(&np->lock);
3325 np->hands_off = 0; 3336 np->hands_off = 0;
3326 init_registers(dev); 3337 init_registers(dev);
3327 netif_device_attach(dev); 3338 netif_device_attach(dev);
3328 spin_unlock_irq(&np->lock); 3339 spin_unlock_irq(&np->lock);
3329 enable_irq(dev->irq); 3340 enable_irq(irq);
3330 3341
3331 mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ)); 3342 mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ));
3332 } 3343 }
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 6338ef8606ae..bb367582c1e8 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2846,6 +2846,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
2846static void s2io_netpoll(struct net_device *dev) 2846static void s2io_netpoll(struct net_device *dev)
2847{ 2847{
2848 struct s2io_nic *nic = netdev_priv(dev); 2848 struct s2io_nic *nic = netdev_priv(dev);
2849 const int irq = nic->pdev->irq;
2849 struct XENA_dev_config __iomem *bar0 = nic->bar0; 2850 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2850 u64 val64 = 0xFFFFFFFFFFFFFFFFULL; 2851 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2851 int i; 2852 int i;
@@ -2855,7 +2856,7 @@ static void s2io_netpoll(struct net_device *dev)
2855 if (pci_channel_offline(nic->pdev)) 2856 if (pci_channel_offline(nic->pdev))
2856 return; 2857 return;
2857 2858
2858 disable_irq(dev->irq); 2859 disable_irq(irq);
2859 2860
2860 writeq(val64, &bar0->rx_traffic_int); 2861 writeq(val64, &bar0->rx_traffic_int);
2861 writeq(val64, &bar0->tx_traffic_int); 2862 writeq(val64, &bar0->tx_traffic_int);
@@ -2884,7 +2885,7 @@ static void s2io_netpoll(struct net_device *dev)
2884 break; 2885 break;
2885 } 2886 }
2886 } 2887 }
2887 enable_irq(dev->irq); 2888 enable_irq(irq);
2888} 2889}
2889#endif 2890#endif
2890 2891
@@ -3897,9 +3898,7 @@ static void remove_msix_isr(struct s2io_nic *sp)
3897 3898
3898static void remove_inta_isr(struct s2io_nic *sp) 3899static void remove_inta_isr(struct s2io_nic *sp)
3899{ 3900{
3900 struct net_device *dev = sp->dev; 3901 free_irq(sp->pdev->irq, sp->dev);
3901
3902 free_irq(sp->pdev->irq, dev);
3903} 3902}
3904 3903
3905/* ********************************************************* * 3904/* ********************************************************* *
@@ -7046,7 +7045,7 @@ static int s2io_add_isr(struct s2io_nic *sp)
7046 } 7045 }
7047 } 7046 }
7048 if (sp->config.intr_type == INTA) { 7047 if (sp->config.intr_type == INTA) {
7049 err = request_irq((int)sp->pdev->irq, s2io_isr, IRQF_SHARED, 7048 err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
7050 sp->name, dev); 7049 sp->name, dev);
7051 if (err) { 7050 if (err) {
7052 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n", 7051 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
@@ -7908,9 +7907,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7908 goto bar1_remap_failed; 7907 goto bar1_remap_failed;
7909 } 7908 }
7910 7909
7911 dev->irq = pdev->irq;
7912 dev->base_addr = (unsigned long)sp->bar0;
7913
7914 /* Initializing the BAR1 address as the start of the FIFO pointer. */ 7910 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7915 for (j = 0; j < MAX_TX_FIFOS; j++) { 7911 for (j = 0; j < MAX_TX_FIFOS; j++) {
7916 mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000); 7912 mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index ef76725454d2..51387c31914b 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1882,25 +1882,24 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
1882 */ 1882 */
1883static void vxge_netpoll(struct net_device *dev) 1883static void vxge_netpoll(struct net_device *dev)
1884{ 1884{
1885 struct __vxge_hw_device *hldev; 1885 struct vxgedev *vdev = netdev_priv(dev);
1886 struct vxgedev *vdev; 1886 struct pci_dev *pdev = vdev->pdev;
1887 1887 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
1888 vdev = netdev_priv(dev); 1888 const int irq = pdev->irq;
1889 hldev = pci_get_drvdata(vdev->pdev);
1890 1889
1891 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 1890 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1892 1891
1893 if (pci_channel_offline(vdev->pdev)) 1892 if (pci_channel_offline(pdev))
1894 return; 1893 return;
1895 1894
1896 disable_irq(dev->irq); 1895 disable_irq(irq);
1897 vxge_hw_device_clear_tx_rx(hldev); 1896 vxge_hw_device_clear_tx_rx(hldev);
1898 1897
1899 vxge_hw_device_clear_tx_rx(hldev); 1898 vxge_hw_device_clear_tx_rx(hldev);
1900 VXGE_COMPLETE_ALL_RX(vdev); 1899 VXGE_COMPLETE_ALL_RX(vdev);
1901 VXGE_COMPLETE_ALL_TX(vdev); 1900 VXGE_COMPLETE_ALL_TX(vdev);
1902 1901
1903 enable_irq(dev->irq); 1902 enable_irq(irq);
1904 1903
1905 vxge_debug_entryexit(VXGE_TRACE, 1904 vxge_debug_entryexit(VXGE_TRACE,
1906 "%s:%d Exiting...", __func__, __LINE__); 1905 "%s:%d Exiting...", __func__, __LINE__);
@@ -2860,12 +2859,12 @@ static int vxge_open(struct net_device *dev)
2860 vdev->config.rx_pause_enable); 2859 vdev->config.rx_pause_enable);
2861 2860
2862 if (vdev->vp_reset_timer.function == NULL) 2861 if (vdev->vp_reset_timer.function == NULL)
2863 vxge_os_timer(vdev->vp_reset_timer, 2862 vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset, vdev,
2864 vxge_poll_vp_reset, vdev, (HZ/2)); 2863 HZ / 2);
2865 2864
2866 /* There is no need to check for RxD leak and RxD lookup on Titan1A */ 2865 /* There is no need to check for RxD leak and RxD lookup on Titan1A */
2867 if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL) 2866 if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
2868 vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev, 2867 vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
2869 HZ / 2); 2868 HZ / 2);
2870 2869
2871 set_bit(__VXGE_STATE_CARD_UP, &vdev->state); 2870 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -3424,9 +3423,6 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3424 ndev->features |= ndev->hw_features | 3423 ndev->features |= ndev->hw_features |
3425 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; 3424 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3426 3425
3427 /* Driver entry points */
3428 ndev->irq = vdev->pdev->irq;
3429 ndev->base_addr = (unsigned long) hldev->bar0;
3430 3426
3431 ndev->netdev_ops = &vxge_netdev_ops; 3427 ndev->netdev_ops = &vxge_netdev_ops;
3432 3428
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index f52a42d1dbb7..35f3e7552ec2 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -416,12 +416,15 @@ struct vxge_tx_priv {
416 static int p = val; \ 416 static int p = val; \
417 module_param(p, int, 0) 417 module_param(p, int, 0)
418 418
419#define vxge_os_timer(timer, handle, arg, exp) do { \ 419static inline
420 init_timer(&timer); \ 420void vxge_os_timer(struct timer_list *timer, void (*func)(unsigned long data),
421 timer.function = handle; \ 421 struct vxgedev *vdev, unsigned long timeout)
422 timer.data = (unsigned long) arg; \ 422{
423 mod_timer(&timer, (jiffies + exp)); \ 423 init_timer(timer);
424 } while (0); 424 timer->function = func;
425 timer->data = (unsigned long)vdev;
426 mod_timer(timer, jiffies + timeout);
427}
425 428
426void vxge_initialize_ethtool_ops(struct net_device *ndev); 429void vxge_initialize_ethtool_ops(struct net_device *ndev);
427enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); 430enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index aca13046e432..928913c4f3ff 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2279,6 +2279,8 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2279 2279
2280 netdev_sent_queue(np->dev, skb->len); 2280 netdev_sent_queue(np->dev, skb->len);
2281 2281
2282 skb_tx_timestamp(skb);
2283
2282 np->put_tx.orig = put_tx; 2284 np->put_tx.orig = put_tx;
2283 2285
2284 spin_unlock_irqrestore(&np->lock, flags); 2286 spin_unlock_irqrestore(&np->lock, flags);
@@ -2426,6 +2428,8 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2426 2428
2427 netdev_sent_queue(np->dev, skb->len); 2429 netdev_sent_queue(np->dev, skb->len);
2428 2430
2431 skb_tx_timestamp(skb);
2432
2429 np->put_tx.ex = put_tx; 2433 np->put_tx.ex = put_tx;
2430 2434
2431 spin_unlock_irqrestore(&np->lock, flags); 2435 spin_unlock_irqrestore(&np->lock, flags);
@@ -3942,13 +3946,11 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3942 ret = pci_enable_msi(np->pci_dev); 3946 ret = pci_enable_msi(np->pci_dev);
3943 if (ret == 0) { 3947 if (ret == 0) {
3944 np->msi_flags |= NV_MSI_ENABLED; 3948 np->msi_flags |= NV_MSI_ENABLED;
3945 dev->irq = np->pci_dev->irq;
3946 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { 3949 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3947 netdev_info(dev, "request_irq failed %d\n", 3950 netdev_info(dev, "request_irq failed %d\n",
3948 ret); 3951 ret);
3949 pci_disable_msi(np->pci_dev); 3952 pci_disable_msi(np->pci_dev);
3950 np->msi_flags &= ~NV_MSI_ENABLED; 3953 np->msi_flags &= ~NV_MSI_ENABLED;
3951 dev->irq = np->pci_dev->irq;
3952 goto out_err; 3954 goto out_err;
3953 } 3955 }
3954 3956
@@ -5649,9 +5651,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5649 np->base = ioremap(addr, np->register_size); 5651 np->base = ioremap(addr, np->register_size);
5650 if (!np->base) 5652 if (!np->base)
5651 goto out_relreg; 5653 goto out_relreg;
5652 dev->base_addr = (unsigned long)np->base;
5653
5654 dev->irq = pci_dev->irq;
5655 5654
5656 np->rx_ring_size = RX_RING_DEFAULT; 5655 np->rx_ring_size = RX_RING_DEFAULT;
5657 np->tx_ring_size = TX_RING_DEFAULT; 5656 np->tx_ring_size = TX_RING_DEFAULT;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 6dfc26d85e47..d3469d8e3f0d 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -990,10 +990,10 @@ static int __lpc_handle_recv(struct net_device *ndev, int budget)
990 ndev->stats.rx_errors++; 990 ndev->stats.rx_errors++;
991 } else { 991 } else {
992 /* Packet is good */ 992 /* Packet is good */
993 skb = dev_alloc_skb(len + 8); 993 skb = dev_alloc_skb(len);
994 if (!skb) 994 if (!skb) {
995 ndev->stats.rx_dropped++; 995 ndev->stats.rx_dropped++;
996 else { 996 } else {
997 prdbuf = skb_put(skb, len); 997 prdbuf = skb_put(skb, len);
998 998
999 /* Copy packet from buffer */ 999 /* Copy packet from buffer */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index dd14915f54bb..b07311eaa693 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -584,7 +584,6 @@ struct pch_gbe_hw_stats {
584/** 584/**
585 * struct pch_gbe_adapter - board specific private data structure 585 * struct pch_gbe_adapter - board specific private data structure
586 * @stats_lock: Spinlock structure for status 586 * @stats_lock: Spinlock structure for status
587 * @tx_queue_lock: Spinlock structure for transmit
588 * @ethtool_lock: Spinlock structure for ethtool 587 * @ethtool_lock: Spinlock structure for ethtool
589 * @irq_sem: Semaphore for interrupt 588 * @irq_sem: Semaphore for interrupt
590 * @netdev: Pointer of network device structure 589 * @netdev: Pointer of network device structure
@@ -609,7 +608,6 @@ struct pch_gbe_hw_stats {
609 608
610struct pch_gbe_adapter { 609struct pch_gbe_adapter {
611 spinlock_t stats_lock; 610 spinlock_t stats_lock;
612 spinlock_t tx_queue_lock;
613 spinlock_t ethtool_lock; 611 spinlock_t ethtool_lock;
614 atomic_t irq_sem; 612 atomic_t irq_sem;
615 struct net_device *netdev; 613 struct net_device *netdev;
@@ -660,6 +658,7 @@ extern u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
660extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev); 658extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
661extern u64 pch_rx_snap_read(struct pci_dev *pdev); 659extern u64 pch_rx_snap_read(struct pci_dev *pdev);
662extern u64 pch_tx_snap_read(struct pci_dev *pdev); 660extern u64 pch_tx_snap_read(struct pci_dev *pdev);
661extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
663#endif 662#endif
664 663
665/* pch_gbe_param.c */ 664/* pch_gbe_param.c */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 8035e5ff6e06..3787c64ee71c 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -79,7 +79,6 @@ const char pch_driver_version[] = DRV_VERSION;
79#define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888 79#define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888
80#define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF 80#define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
81 81
82#define PCH_GBE_ETH_ALEN 6
83 82
84/* This defines the bits that are set in the Interrupt Mask 83/* This defines the bits that are set in the Interrupt Mask
85 * Set/Read Register. Each bit is documented below: 84 * Set/Read Register. Each bit is documented below:
@@ -101,18 +100,19 @@ const char pch_driver_version[] = DRV_VERSION;
101 100
102#ifdef CONFIG_PCH_PTP 101#ifdef CONFIG_PCH_PTP
103/* Macros for ieee1588 */ 102/* Macros for ieee1588 */
104#define TICKS_NS_SHIFT 5
105
106/* 0x40 Time Synchronization Channel Control Register Bits */ 103/* 0x40 Time Synchronization Channel Control Register Bits */
107#define MASTER_MODE (1<<0) 104#define MASTER_MODE (1<<0)
108#define SLAVE_MODE (0<<0) 105#define SLAVE_MODE (0)
109#define V2_MODE (1<<31) 106#define V2_MODE (1<<31)
110#define CAP_MODE0 (0<<16) 107#define CAP_MODE0 (0)
111#define CAP_MODE2 (1<<17) 108#define CAP_MODE2 (1<<17)
112 109
113/* 0x44 Time Synchronization Channel Event Register Bits */ 110/* 0x44 Time Synchronization Channel Event Register Bits */
114#define TX_SNAPSHOT_LOCKED (1<<0) 111#define TX_SNAPSHOT_LOCKED (1<<0)
115#define RX_SNAPSHOT_LOCKED (1<<1) 112#define RX_SNAPSHOT_LOCKED (1<<1)
113
114#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
115#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
116#endif 116#endif
117 117
118static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; 118static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
@@ -120,6 +120,7 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
120static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); 120static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
121static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, 121static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
122 int data); 122 int data);
123static void pch_gbe_set_multi(struct net_device *netdev);
123 124
124#ifdef CONFIG_PCH_PTP 125#ifdef CONFIG_PCH_PTP
125static struct sock_filter ptp_filter[] = { 126static struct sock_filter ptp_filter[] = {
@@ -133,10 +134,8 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
133 u16 *hi, *id; 134 u16 *hi, *id;
134 u32 lo; 135 u32 lo;
135 136
136 if ((sk_run_filter(skb, ptp_filter) != PTP_CLASS_V2_IPV4) && 137 if (sk_run_filter(skb, ptp_filter) == PTP_CLASS_NONE)
137 (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)) {
138 return 0; 138 return 0;
139 }
140 139
141 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; 140 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
142 141
@@ -153,8 +152,8 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
153 seqid == *id); 152 seqid == *id);
154} 153}
155 154
156static void pch_rx_timestamp( 155static void
157 struct pch_gbe_adapter *adapter, struct sk_buff *skb) 156pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
158{ 157{
159 struct skb_shared_hwtstamps *shhwtstamps; 158 struct skb_shared_hwtstamps *shhwtstamps;
160 struct pci_dev *pdev; 159 struct pci_dev *pdev;
@@ -183,7 +182,6 @@ static void pch_rx_timestamp(
183 goto out; 182 goto out;
184 183
185 ns = pch_rx_snap_read(pdev); 184 ns = pch_rx_snap_read(pdev);
186 ns <<= TICKS_NS_SHIFT;
187 185
188 shhwtstamps = skb_hwtstamps(skb); 186 shhwtstamps = skb_hwtstamps(skb);
189 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 187 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
@@ -192,8 +190,8 @@ out:
192 pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED); 190 pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
193} 191}
194 192
195static void pch_tx_timestamp( 193static void
196 struct pch_gbe_adapter *adapter, struct sk_buff *skb) 194pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
197{ 195{
198 struct skb_shared_hwtstamps shhwtstamps; 196 struct skb_shared_hwtstamps shhwtstamps;
199 struct pci_dev *pdev; 197 struct pci_dev *pdev;
@@ -202,17 +200,16 @@ static void pch_tx_timestamp(
202 u32 cnt, val; 200 u32 cnt, val;
203 201
204 shtx = skb_shinfo(skb); 202 shtx = skb_shinfo(skb);
205 if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)) 203 if (likely(!(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)))
206 shtx->tx_flags |= SKBTX_IN_PROGRESS;
207 else
208 return; 204 return;
209 205
206 shtx->tx_flags |= SKBTX_IN_PROGRESS;
207
210 /* Get ieee1588's dev information */ 208 /* Get ieee1588's dev information */
211 pdev = adapter->ptp_pdev; 209 pdev = adapter->ptp_pdev;
212 210
213 /* 211 /*
214 * This really stinks, but we have to poll for the Tx time stamp. 212 * This really stinks, but we have to poll for the Tx time stamp.
215 * Usually, the time stamp is ready after 4 to 6 microseconds.
216 */ 213 */
217 for (cnt = 0; cnt < 100; cnt++) { 214 for (cnt = 0; cnt < 100; cnt++) {
218 val = pch_ch_event_read(pdev); 215 val = pch_ch_event_read(pdev);
@@ -226,7 +223,6 @@ static void pch_tx_timestamp(
226 } 223 }
227 224
228 ns = pch_tx_snap_read(pdev); 225 ns = pch_tx_snap_read(pdev);
229 ns <<= TICKS_NS_SHIFT;
230 226
231 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 227 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
232 shhwtstamps.hwtstamp = ns_to_ktime(ns); 228 shhwtstamps.hwtstamp = ns_to_ktime(ns);
@@ -240,6 +236,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
240 struct hwtstamp_config cfg; 236 struct hwtstamp_config cfg;
241 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 237 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
242 struct pci_dev *pdev; 238 struct pci_dev *pdev;
239 u8 station[20];
243 240
244 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 241 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
245 return -EFAULT; 242 return -EFAULT;
@@ -267,15 +264,23 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
267 break; 264 break;
268 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 265 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
269 adapter->hwts_rx_en = 0; 266 adapter->hwts_rx_en = 0;
270 pch_ch_control_write(pdev, (SLAVE_MODE | CAP_MODE0)); 267 pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0);
271 break; 268 break;
272 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 269 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
273 adapter->hwts_rx_en = 1; 270 adapter->hwts_rx_en = 1;
274 pch_ch_control_write(pdev, (MASTER_MODE | CAP_MODE0)); 271 pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0);
272 break;
273 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
274 adapter->hwts_rx_en = 1;
275 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
276 strcpy(station, PTP_L4_MULTICAST_SA);
277 pch_set_station_address(station, pdev);
275 break; 278 break;
276 case HWTSTAMP_FILTER_PTP_V2_EVENT: 279 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
277 adapter->hwts_rx_en = 1; 280 adapter->hwts_rx_en = 1;
278 pch_ch_control_write(pdev, (V2_MODE | CAP_MODE2)); 281 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
282 strcpy(station, PTP_L2_MULTICAST_SA);
283 pch_set_station_address(station, pdev);
279 break; 284 break;
280 default: 285 default:
281 return -ERANGE; 286 return -ERANGE;
@@ -399,18 +404,18 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
399 iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE); 404 iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
400#endif 405#endif
401 pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST); 406 pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
402 /* Setup the receive address */ 407 /* Setup the receive addresses */
403 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); 408 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
404 return; 409 return;
405} 410}
406 411
407static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw) 412static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
408{ 413{
409 /* Read the MAC address. and store to the private data */ 414 /* Read the MAC addresses. and store to the private data */
410 pch_gbe_mac_read_mac_addr(hw); 415 pch_gbe_mac_read_mac_addr(hw);
411 iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET); 416 iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
412 pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST); 417 pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
413 /* Setup the MAC address */ 418 /* Setup the MAC addresses */
414 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); 419 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
415 return; 420 return;
416} 421}
@@ -460,7 +465,7 @@ static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
460 if (mc_addr_count) { 465 if (mc_addr_count) {
461 pch_gbe_mac_mar_set(hw, mc_addr_list, i); 466 pch_gbe_mac_mar_set(hw, mc_addr_list, i);
462 mc_addr_count--; 467 mc_addr_count--;
463 mc_addr_list += PCH_GBE_ETH_ALEN; 468 mc_addr_list += ETH_ALEN;
464 } else { 469 } else {
465 /* Clear MAC address mask */ 470 /* Clear MAC address mask */
466 adrmask = ioread32(&hw->reg->ADDR_MASK); 471 adrmask = ioread32(&hw->reg->ADDR_MASK);
@@ -640,14 +645,11 @@ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
640 */ 645 */
641static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter) 646static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
642{ 647{
643 int size; 648 adapter->tx_ring = kzalloc(sizeof(*adapter->tx_ring), GFP_KERNEL);
644
645 size = (int)sizeof(struct pch_gbe_tx_ring);
646 adapter->tx_ring = kzalloc(size, GFP_KERNEL);
647 if (!adapter->tx_ring) 649 if (!adapter->tx_ring)
648 return -ENOMEM; 650 return -ENOMEM;
649 size = (int)sizeof(struct pch_gbe_rx_ring); 651
650 adapter->rx_ring = kzalloc(size, GFP_KERNEL); 652 adapter->rx_ring = kzalloc(sizeof(*adapter->rx_ring), GFP_KERNEL);
651 if (!adapter->rx_ring) { 653 if (!adapter->rx_ring) {
652 kfree(adapter->tx_ring); 654 kfree(adapter->tx_ring);
653 return -ENOMEM; 655 return -ENOMEM;
@@ -778,6 +780,8 @@ void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
778void pch_gbe_reset(struct pch_gbe_adapter *adapter) 780void pch_gbe_reset(struct pch_gbe_adapter *adapter)
779{ 781{
780 pch_gbe_mac_reset_hw(&adapter->hw); 782 pch_gbe_mac_reset_hw(&adapter->hw);
783 /* reprogram multicast address register after reset */
784 pch_gbe_set_multi(adapter->netdev);
781 /* Setup the receive address. */ 785 /* Setup the receive address. */
782 pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES); 786 pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
783 if (pch_gbe_hal_init_hw(&adapter->hw)) 787 if (pch_gbe_hal_init_hw(&adapter->hw))
@@ -1162,7 +1166,6 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1162 struct sk_buff *tmp_skb; 1166 struct sk_buff *tmp_skb;
1163 unsigned int frame_ctrl; 1167 unsigned int frame_ctrl;
1164 unsigned int ring_num; 1168 unsigned int ring_num;
1165 unsigned long flags;
1166 1169
1167 /*-- Set frame control --*/ 1170 /*-- Set frame control --*/
1168 frame_ctrl = 0; 1171 frame_ctrl = 0;
@@ -1182,8 +1185,6 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1182 if (skb->protocol == htons(ETH_P_IP)) { 1185 if (skb->protocol == htons(ETH_P_IP)) {
1183 struct iphdr *iph = ip_hdr(skb); 1186 struct iphdr *iph = ip_hdr(skb);
1184 unsigned int offset; 1187 unsigned int offset;
1185 iph->check = 0;
1186 iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
1187 offset = skb_transport_offset(skb); 1188 offset = skb_transport_offset(skb);
1188 if (iph->protocol == IPPROTO_TCP) { 1189 if (iph->protocol == IPPROTO_TCP) {
1189 skb->csum = 0; 1190 skb->csum = 0;
@@ -1211,14 +1212,14 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1211 } 1212 }
1212 } 1213 }
1213 } 1214 }
1214 spin_lock_irqsave(&tx_ring->tx_lock, flags); 1215
1215 ring_num = tx_ring->next_to_use; 1216 ring_num = tx_ring->next_to_use;
1216 if (unlikely((ring_num + 1) == tx_ring->count)) 1217 if (unlikely((ring_num + 1) == tx_ring->count))
1217 tx_ring->next_to_use = 0; 1218 tx_ring->next_to_use = 0;
1218 else 1219 else
1219 tx_ring->next_to_use = ring_num + 1; 1220 tx_ring->next_to_use = ring_num + 1;
1220 1221
1221 spin_unlock_irqrestore(&tx_ring->tx_lock, flags); 1222
1222 buffer_info = &tx_ring->buffer_info[ring_num]; 1223 buffer_info = &tx_ring->buffer_info[ring_num];
1223 tmp_skb = buffer_info->skb; 1224 tmp_skb = buffer_info->skb;
1224 1225
@@ -1342,6 +1343,8 @@ static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
1342 /* Stop Receive */ 1343 /* Stop Receive */
1343 pch_gbe_mac_reset_rx(hw); 1344 pch_gbe_mac_reset_rx(hw);
1344 } 1345 }
1346 /* reprogram multicast address register after reset */
1347 pch_gbe_set_multi(adapter->netdev);
1345} 1348}
1346 1349
1347static void pch_gbe_start_receive(struct pch_gbe_hw *hw) 1350static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
@@ -1518,7 +1521,7 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1518 &rx_ring->rx_buff_pool_logic, 1521 &rx_ring->rx_buff_pool_logic,
1519 GFP_KERNEL); 1522 GFP_KERNEL);
1520 if (!rx_ring->rx_buff_pool) { 1523 if (!rx_ring->rx_buff_pool) {
1521 pr_err("Unable to allocate memory for the receive poll buffer\n"); 1524 pr_err("Unable to allocate memory for the receive pool buffer\n");
1522 return -ENOMEM; 1525 return -ENOMEM;
1523 } 1526 }
1524 memset(rx_ring->rx_buff_pool, 0, size); 1527 memset(rx_ring->rx_buff_pool, 0, size);
@@ -1637,15 +1640,17 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1637 pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", 1640 pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1638 cleaned_count); 1641 cleaned_count);
1639 /* Recover from running out of Tx resources in xmit_frame */ 1642 /* Recover from running out of Tx resources in xmit_frame */
1643 spin_lock(&tx_ring->tx_lock);
1640 if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) { 1644 if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
1641 netif_wake_queue(adapter->netdev); 1645 netif_wake_queue(adapter->netdev);
1642 adapter->stats.tx_restart_count++; 1646 adapter->stats.tx_restart_count++;
1643 pr_debug("Tx wake queue\n"); 1647 pr_debug("Tx wake queue\n");
1644 } 1648 }
1645 spin_lock(&adapter->tx_queue_lock); 1649
1646 tx_ring->next_to_clean = i; 1650 tx_ring->next_to_clean = i;
1647 spin_unlock(&adapter->tx_queue_lock); 1651
1648 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); 1652 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1653 spin_unlock(&tx_ring->tx_lock);
1649 return cleaned; 1654 return cleaned;
1650} 1655}
1651 1656
@@ -1924,7 +1929,6 @@ static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1924} 1929}
1925 1930
1926 1931
1927static void pch_gbe_set_multi(struct net_device *netdev);
1928/** 1932/**
1929 * pch_gbe_up - Up GbE network device 1933 * pch_gbe_up - Up GbE network device
1930 * @adapter: Board private structure 1934 * @adapter: Board private structure
@@ -2037,7 +2041,6 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
2037 return -ENOMEM; 2041 return -ENOMEM;
2038 } 2042 }
2039 spin_lock_init(&adapter->hw.miim_lock); 2043 spin_lock_init(&adapter->hw.miim_lock);
2040 spin_lock_init(&adapter->tx_queue_lock);
2041 spin_lock_init(&adapter->stats_lock); 2044 spin_lock_init(&adapter->stats_lock);
2042 spin_lock_init(&adapter->ethtool_lock); 2045 spin_lock_init(&adapter->ethtool_lock);
2043 atomic_set(&adapter->irq_sem, 0); 2046 atomic_set(&adapter->irq_sem, 0);
@@ -2142,10 +2145,10 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2142 tx_ring->next_to_use, tx_ring->next_to_clean); 2145 tx_ring->next_to_use, tx_ring->next_to_clean);
2143 return NETDEV_TX_BUSY; 2146 return NETDEV_TX_BUSY;
2144 } 2147 }
2145 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2146 2148
2147 /* CRC,ITAG no support */ 2149 /* CRC,ITAG no support */
2148 pch_gbe_tx_queue(adapter, tx_ring, skb); 2150 pch_gbe_tx_queue(adapter, tx_ring, skb);
2151 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2149 return NETDEV_TX_OK; 2152 return NETDEV_TX_OK;
2150} 2153}
2151 2154
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 0d29f5f4b8e4..c2367158350e 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -683,8 +683,6 @@ static int __devinit hamachi_init_one (struct pci_dev *pdev,
683 } 683 }
684 684
685 hmp->base = ioaddr; 685 hmp->base = ioaddr;
686 dev->base_addr = (unsigned long)ioaddr;
687 dev->irq = irq;
688 pci_set_drvdata(pdev, dev); 686 pci_set_drvdata(pdev, dev);
689 687
690 hmp->chip_id = chip_id; 688 hmp->chip_id = chip_id;
@@ -859,14 +857,11 @@ static int hamachi_open(struct net_device *dev)
859 u32 rx_int_var, tx_int_var; 857 u32 rx_int_var, tx_int_var;
860 u16 fifo_info; 858 u16 fifo_info;
861 859
862 i = request_irq(dev->irq, hamachi_interrupt, IRQF_SHARED, dev->name, dev); 860 i = request_irq(hmp->pci_dev->irq, hamachi_interrupt, IRQF_SHARED,
861 dev->name, dev);
863 if (i) 862 if (i)
864 return i; 863 return i;
865 864
866 if (hamachi_debug > 1)
867 printk(KERN_DEBUG "%s: hamachi_open() irq %d.\n",
868 dev->name, dev->irq);
869
870 hamachi_init_ring(dev); 865 hamachi_init_ring(dev);
871 866
872#if ADDRLEN == 64 867#if ADDRLEN == 64
@@ -1705,7 +1700,7 @@ static int hamachi_close(struct net_device *dev)
1705 } 1700 }
1706#endif /* __i386__ debugging only */ 1701#endif /* __i386__ debugging only */
1707 1702
1708 free_irq(dev->irq, dev); 1703 free_irq(hmp->pci_dev->irq, dev);
1709 1704
1710 del_timer_sync(&hmp->timer); 1705 del_timer_sync(&hmp->timer);
1711 1706
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 7757b80ef924..04e622fd468d 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -427,9 +427,6 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev,
427 /* Reset the chip. */ 427 /* Reset the chip. */
428 iowrite32(0x80000000, ioaddr + DMACtrl); 428 iowrite32(0x80000000, ioaddr + DMACtrl);
429 429
430 dev->base_addr = (unsigned long)ioaddr;
431 dev->irq = irq;
432
433 pci_set_drvdata(pdev, dev); 430 pci_set_drvdata(pdev, dev);
434 spin_lock_init(&np->lock); 431 spin_lock_init(&np->lock);
435 432
@@ -569,25 +566,20 @@ static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value
569static int yellowfin_open(struct net_device *dev) 566static int yellowfin_open(struct net_device *dev)
570{ 567{
571 struct yellowfin_private *yp = netdev_priv(dev); 568 struct yellowfin_private *yp = netdev_priv(dev);
569 const int irq = yp->pci_dev->irq;
572 void __iomem *ioaddr = yp->base; 570 void __iomem *ioaddr = yp->base;
573 int i, ret; 571 int i, rc;
574 572
575 /* Reset the chip. */ 573 /* Reset the chip. */
576 iowrite32(0x80000000, ioaddr + DMACtrl); 574 iowrite32(0x80000000, ioaddr + DMACtrl);
577 575
578 ret = request_irq(dev->irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev); 576 rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
579 if (ret) 577 if (rc)
580 return ret; 578 return rc;
581
582 if (yellowfin_debug > 1)
583 netdev_printk(KERN_DEBUG, dev, "%s() irq %d\n",
584 __func__, dev->irq);
585 579
586 ret = yellowfin_init_ring(dev); 580 rc = yellowfin_init_ring(dev);
587 if (ret) { 581 if (rc < 0)
588 free_irq(dev->irq, dev); 582 goto err_free_irq;
589 return ret;
590 }
591 583
592 iowrite32(yp->rx_ring_dma, ioaddr + RxPtr); 584 iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
593 iowrite32(yp->tx_ring_dma, ioaddr + TxPtr); 585 iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
@@ -647,8 +639,12 @@ static int yellowfin_open(struct net_device *dev)
647 yp->timer.data = (unsigned long)dev; 639 yp->timer.data = (unsigned long)dev;
648 yp->timer.function = yellowfin_timer; /* timer handler */ 640 yp->timer.function = yellowfin_timer; /* timer handler */
649 add_timer(&yp->timer); 641 add_timer(&yp->timer);
642out:
643 return rc;
650 644
651 return 0; 645err_free_irq:
646 free_irq(irq, dev);
647 goto out;
652} 648}
653 649
654static void yellowfin_timer(unsigned long data) 650static void yellowfin_timer(unsigned long data)
@@ -1251,7 +1247,7 @@ static int yellowfin_close(struct net_device *dev)
1251 } 1247 }
1252#endif /* __i386__ debugging only */ 1248#endif /* __i386__ debugging only */
1253 1249
1254 free_irq(dev->irq, dev); 1250 free_irq(yp->pci_dev->irq, dev);
1255 1251
1256 /* Free all the skbuffs in the Rx queue. */ 1252 /* Free all the skbuffs in the Rx queue. */
1257 for (i = 0; i < RX_RING_SIZE; i++) { 1253 for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index ddc95b0ac78d..e559dfa06d6a 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -623,7 +623,7 @@ static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
623 mac->rx = NULL; 623 mac->rx = NULL;
624} 624}
625 625
626static void pasemi_mac_replenish_rx_ring(const struct net_device *dev, 626static void pasemi_mac_replenish_rx_ring(struct net_device *dev,
627 const int limit) 627 const int limit)
628{ 628{
629 const struct pasemi_mac *mac = netdev_priv(dev); 629 const struct pasemi_mac *mac = netdev_priv(dev);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index b5de8a7b90f1..37ccbe54e62d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
53 53
54#define _NETXEN_NIC_LINUX_MAJOR 4 54#define _NETXEN_NIC_LINUX_MAJOR 4
55#define _NETXEN_NIC_LINUX_MINOR 0 55#define _NETXEN_NIC_LINUX_MINOR 0
56#define _NETXEN_NIC_LINUX_SUBVERSION 78 56#define _NETXEN_NIC_LINUX_SUBVERSION 79
57#define NETXEN_NIC_LINUX_VERSIONID "4.0.78" 57#define NETXEN_NIC_LINUX_VERSIONID "4.0.79"
58 58
59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
60#define _major(v) (((v) >> 24) & 0xff) 60#define _major(v) (((v) >> 24) & 0xff)
@@ -419,6 +419,8 @@ struct rcv_desc {
419 (((sts_data) >> 52) & 0x1) 419 (((sts_data) >> 52) & 0x1)
420#define netxen_get_lro_sts_seq_number(sts_data) \ 420#define netxen_get_lro_sts_seq_number(sts_data) \
421 ((sts_data) & 0x0FFFFFFFF) 421 ((sts_data) & 0x0FFFFFFFF)
422#define netxen_get_lro_sts_mss(sts_data1) \
423 ((sts_data1 >> 32) & 0x0FFFF)
422 424
423 425
424struct status_desc { 426struct status_desc {
@@ -794,6 +796,7 @@ struct netxen_cmd_args {
794#define NX_CAP0_JUMBO_CONTIGUOUS NX_CAP_BIT(0, 7) 796#define NX_CAP0_JUMBO_CONTIGUOUS NX_CAP_BIT(0, 7)
795#define NX_CAP0_LRO_CONTIGUOUS NX_CAP_BIT(0, 8) 797#define NX_CAP0_LRO_CONTIGUOUS NX_CAP_BIT(0, 8)
796#define NX_CAP0_HW_LRO NX_CAP_BIT(0, 10) 798#define NX_CAP0_HW_LRO NX_CAP_BIT(0, 10)
799#define NX_CAP0_HW_LRO_MSS NX_CAP_BIT(0, 21)
797 800
798/* 801/*
799 * Context state 802 * Context state
@@ -1073,6 +1076,8 @@ typedef struct {
1073#define NX_FW_CAPABILITY_FVLANTX (1 << 9) 1076#define NX_FW_CAPABILITY_FVLANTX (1 << 9)
1074#define NX_FW_CAPABILITY_HW_LRO (1 << 10) 1077#define NX_FW_CAPABILITY_HW_LRO (1 << 10)
1075#define NX_FW_CAPABILITY_GBE_LINK_CFG (1 << 11) 1078#define NX_FW_CAPABILITY_GBE_LINK_CFG (1 << 11)
1079#define NX_FW_CAPABILITY_MORE_CAPS (1 << 31)
1080#define NX_FW_CAPABILITY_2_LRO_MAX_TCP_SEG (1 << 2)
1076 1081
1077/* module types */ 1082/* module types */
1078#define LINKEVENT_MODULE_NOT_PRESENT 1 1083#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -1155,6 +1160,7 @@ typedef struct {
1155#define NETXEN_NIC_BRIDGE_ENABLED 0X10 1160#define NETXEN_NIC_BRIDGE_ENABLED 0X10
1156#define NETXEN_NIC_DIAG_ENABLED 0x20 1161#define NETXEN_NIC_DIAG_ENABLED 0x20
1157#define NETXEN_FW_RESET_OWNER 0x40 1162#define NETXEN_FW_RESET_OWNER 0x40
1163#define NETXEN_FW_MSS_CAP 0x80
1158#define NETXEN_IS_MSI_FAMILY(adapter) \ 1164#define NETXEN_IS_MSI_FAMILY(adapter) \
1159 ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED)) 1165 ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED))
1160 1166
@@ -1201,6 +1207,9 @@ typedef struct {
1201#define NX_FORCE_FW_RESET 0xdeaddead 1207#define NX_FORCE_FW_RESET 0xdeaddead
1202 1208
1203 1209
1210/* Fw dump levels */
1211static const u32 FW_DUMP_LEVELS[] = { 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff };
1212
1204/* Flash read/write address */ 1213/* Flash read/write address */
1205#define NX_FW_DUMP_REG1 0x00130060 1214#define NX_FW_DUMP_REG1 0x00130060
1206#define NX_FW_DUMP_REG2 0x001e0000 1215#define NX_FW_DUMP_REG2 0x001e0000
@@ -1814,6 +1823,13 @@ struct netxen_brdinfo {
1814 char short_name[NETXEN_MAX_SHORT_NAME]; 1823 char short_name[NETXEN_MAX_SHORT_NAME];
1815}; 1824};
1816 1825
1826struct netxen_dimm_cfg {
1827 u8 presence;
1828 u8 mem_type;
1829 u8 dimm_type;
1830 u32 size;
1831};
1832
1817static const struct netxen_brdinfo netxen_boards[] = { 1833static const struct netxen_brdinfo netxen_boards[] = {
1818 {NETXEN_BRDTYPE_P2_SB31_10G_CX4, 1, "XGb CX4"}, 1834 {NETXEN_BRDTYPE_P2_SB31_10G_CX4, 1, "XGb CX4"},
1819 {NETXEN_BRDTYPE_P2_SB31_10G_HMEZ, 1, "XGb HMEZ"}, 1835 {NETXEN_BRDTYPE_P2_SB31_10G_HMEZ, 1, "XGb HMEZ"},
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index f3c0057a802b..7f556a84925d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -229,7 +229,7 @@ netxen_setup_minidump(struct netxen_adapter *adapter)
229 adapter->mdump.md_template; 229 adapter->mdump.md_template;
230 adapter->mdump.md_capture_buff = NULL; 230 adapter->mdump.md_capture_buff = NULL;
231 adapter->mdump.fw_supports_md = 1; 231 adapter->mdump.fw_supports_md = 1;
232 adapter->mdump.md_enabled = 1; 232 adapter->mdump.md_enabled = 0;
233 233
234 return err; 234 return err;
235 235
@@ -328,6 +328,9 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
328 cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN); 328 cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
329 cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS); 329 cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
330 330
331 if (adapter->flags & NETXEN_FW_MSS_CAP)
332 cap |= NX_CAP0_HW_LRO_MSS;
333
331 prq->capabilities[0] = cpu_to_le32(cap); 334 prq->capabilities[0] = cpu_to_le32(cap);
332 prq->host_int_crb_mode = 335 prq->host_int_crb_mode =
333 cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED); 336 cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 8c39299331a2..39730403782f 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -834,7 +834,7 @@ netxen_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
834static int 834static int
835netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val) 835netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val)
836{ 836{
837 int ret = 0; 837 int i;
838 struct netxen_adapter *adapter = netdev_priv(netdev); 838 struct netxen_adapter *adapter = netdev_priv(netdev);
839 struct netxen_minidump *mdump = &adapter->mdump; 839 struct netxen_minidump *mdump = &adapter->mdump;
840 840
@@ -844,7 +844,7 @@ netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val)
844 mdump->md_enabled = 1; 844 mdump->md_enabled = 1;
845 if (adapter->fw_mdump_rdy) { 845 if (adapter->fw_mdump_rdy) {
846 netdev_info(netdev, "Previous dump not cleared, not forcing dump\n"); 846 netdev_info(netdev, "Previous dump not cleared, not forcing dump\n");
847 return ret; 847 return 0;
848 } 848 }
849 netdev_info(netdev, "Forcing a fw dump\n"); 849 netdev_info(netdev, "Forcing a fw dump\n");
850 nx_dev_request_reset(adapter); 850 nx_dev_request_reset(adapter);
@@ -867,19 +867,21 @@ netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val)
867 adapter->flags &= ~NETXEN_FW_RESET_OWNER; 867 adapter->flags &= ~NETXEN_FW_RESET_OWNER;
868 break; 868 break;
869 default: 869 default:
870 if (val->flag <= NX_DUMP_MASK_MAX && 870 for (i = 0; i < ARRAY_SIZE(FW_DUMP_LEVELS); i++) {
871 val->flag >= NX_DUMP_MASK_MIN) { 871 if (val->flag == FW_DUMP_LEVELS[i]) {
872 mdump->md_capture_mask = val->flag & 0xff; 872 mdump->md_capture_mask = val->flag;
873 netdev_info(netdev, "Driver mask changed to: 0x%x\n", 873 netdev_info(netdev,
874 "Driver mask changed to: 0x%x\n",
874 mdump->md_capture_mask); 875 mdump->md_capture_mask);
875 break; 876 return 0;
877 }
876 } 878 }
877 netdev_info(netdev, 879 netdev_info(netdev,
878 "Invalid dump level: 0x%x\n", val->flag); 880 "Invalid dump level: 0x%x\n", val->flag);
879 return -EINVAL; 881 return -EINVAL;
880 } 882 }
881 883
882 return ret; 884 return 0;
883} 885}
884 886
885static int 887static int
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
index b1a897cd9a8d..28e076960bcb 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
@@ -776,6 +776,7 @@ enum {
776#define CRB_SW_INT_MASK_3 (NETXEN_NIC_REG(0x1e8)) 776#define CRB_SW_INT_MASK_3 (NETXEN_NIC_REG(0x1e8))
777 777
778#define CRB_FW_CAPABILITIES_1 (NETXEN_CAM_RAM(0x128)) 778#define CRB_FW_CAPABILITIES_1 (NETXEN_CAM_RAM(0x128))
779#define CRB_FW_CAPABILITIES_2 (NETXEN_CAM_RAM(0x12c))
779#define CRB_MAC_BLOCK_START (NETXEN_CAM_RAM(0x1c0)) 780#define CRB_MAC_BLOCK_START (NETXEN_CAM_RAM(0x1c0))
780 781
781/* 782/*
@@ -955,6 +956,31 @@ enum {
955#define NX_CRB_DEV_REF_COUNT (NETXEN_CAM_RAM(0x138)) 956#define NX_CRB_DEV_REF_COUNT (NETXEN_CAM_RAM(0x138))
956#define NX_CRB_DEV_STATE (NETXEN_CAM_RAM(0x140)) 957#define NX_CRB_DEV_STATE (NETXEN_CAM_RAM(0x140))
957 958
959/* MiniDIMM related macros */
960#define NETXEN_DIMM_CAPABILITY (NETXEN_CAM_RAM(0x258))
961#define NETXEN_DIMM_PRESENT 0x1
962#define NETXEN_DIMM_MEMTYPE_DDR2_SDRAM 0x2
963#define NETXEN_DIMM_SIZE 0x4
964#define NETXEN_DIMM_MEMTYPE(VAL) ((VAL >> 3) & 0xf)
965#define NETXEN_DIMM_NUMROWS(VAL) ((VAL >> 7) & 0xf)
966#define NETXEN_DIMM_NUMCOLS(VAL) ((VAL >> 11) & 0xf)
967#define NETXEN_DIMM_NUMRANKS(VAL) ((VAL >> 15) & 0x3)
968#define NETXEN_DIMM_DATAWIDTH(VAL) ((VAL >> 18) & 0x3)
969#define NETXEN_DIMM_NUMBANKS(VAL) ((VAL >> 21) & 0xf)
970#define NETXEN_DIMM_TYPE(VAL) ((VAL >> 25) & 0x3f)
971#define NETXEN_DIMM_VALID_FLAG 0x80000000
972
973#define NETXEN_DIMM_MEM_DDR2_SDRAM 0x8
974
975#define NETXEN_DIMM_STD_MEM_SIZE 512
976
977#define NETXEN_DIMM_TYPE_RDIMM 0x1
978#define NETXEN_DIMM_TYPE_UDIMM 0x2
979#define NETXEN_DIMM_TYPE_SO_DIMM 0x4
980#define NETXEN_DIMM_TYPE_Micro_DIMM 0x8
981#define NETXEN_DIMM_TYPE_Mini_RDIMM 0x10
982#define NETXEN_DIMM_TYPE_Mini_UDIMM 0x20
983
958/* Device State */ 984/* Device State */
959#define NX_DEV_COLD 1 985#define NX_DEV_COLD 1
960#define NX_DEV_INITALIZING 2 986#define NX_DEV_INITALIZING 2
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 718b27440351..0d725dc91bcb 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1131,7 +1131,6 @@ netxen_validate_firmware(struct netxen_adapter *adapter)
1131 _build(file_fw_ver)); 1131 _build(file_fw_ver));
1132 return -EINVAL; 1132 return -EINVAL;
1133 } 1133 }
1134
1135 val = nx_get_bios_version(adapter); 1134 val = nx_get_bios_version(adapter);
1136 netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios); 1135 netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios);
1137 if ((__force u32)val != bios) { 1136 if ((__force u32)val != bios) {
@@ -1661,6 +1660,9 @@ netxen_process_lro(struct netxen_adapter *adapter,
1661 1660
1662 length = skb->len; 1661 length = skb->len;
1663 1662
1663 if (adapter->flags & NETXEN_FW_MSS_CAP)
1664 skb_shinfo(skb)->gso_size = netxen_get_lro_sts_mss(sts_data1);
1665
1664 netif_receive_skb(skb); 1666 netif_receive_skb(skb);
1665 1667
1666 adapter->stats.lro_pkts++; 1668 adapter->stats.lro_pkts++;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 65a718f9ccd3..342b3a79bd0f 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1184,6 +1184,7 @@ netxen_nic_attach(struct netxen_adapter *adapter)
1184 int err, ring; 1184 int err, ring;
1185 struct nx_host_rds_ring *rds_ring; 1185 struct nx_host_rds_ring *rds_ring;
1186 struct nx_host_tx_ring *tx_ring; 1186 struct nx_host_tx_ring *tx_ring;
1187 u32 capab2;
1187 1188
1188 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) 1189 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
1189 return 0; 1190 return 0;
@@ -1192,6 +1193,13 @@ netxen_nic_attach(struct netxen_adapter *adapter)
1192 if (err) 1193 if (err)
1193 return err; 1194 return err;
1194 1195
1196 adapter->flags &= ~NETXEN_FW_MSS_CAP;
1197 if (adapter->capabilities & NX_FW_CAPABILITY_MORE_CAPS) {
1198 capab2 = NXRD32(adapter, CRB_FW_CAPABILITIES_2);
1199 if (capab2 & NX_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
1200 adapter->flags |= NETXEN_FW_MSS_CAP;
1201 }
1202
1195 err = netxen_napi_add(adapter, netdev); 1203 err = netxen_napi_add(adapter, netdev);
1196 if (err) 1204 if (err)
1197 return err; 1205 return err;
@@ -1810,7 +1818,6 @@ netxen_tso_check(struct net_device *netdev,
1810 flags = FLAGS_VLAN_TAGGED; 1818 flags = FLAGS_VLAN_TAGGED;
1811 1819
1812 } else if (vlan_tx_tag_present(skb)) { 1820 } else if (vlan_tx_tag_present(skb)) {
1813
1814 flags = FLAGS_VLAN_OOB; 1821 flags = FLAGS_VLAN_OOB;
1815 vid = vlan_tx_tag_get(skb); 1822 vid = vlan_tx_tag_get(skb);
1816 netxen_set_tx_vlan_tci(first_desc, vid); 1823 netxen_set_tx_vlan_tci(first_desc, vid);
@@ -2926,6 +2933,134 @@ static struct bin_attribute bin_attr_mem = {
2926 .write = netxen_sysfs_write_mem, 2933 .write = netxen_sysfs_write_mem,
2927}; 2934};
2928 2935
2936static ssize_t
2937netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj,
2938 struct bin_attribute *attr,
2939 char *buf, loff_t offset, size_t size)
2940{
2941 struct device *dev = container_of(kobj, struct device, kobj);
2942 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2943 struct net_device *netdev = adapter->netdev;
2944 struct netxen_dimm_cfg dimm;
2945 u8 dw, rows, cols, banks, ranks;
2946 u32 val;
2947
2948 if (size != sizeof(struct netxen_dimm_cfg)) {
2949 netdev_err(netdev, "Invalid size\n");
2950 return -1;
2951 }
2952
2953 memset(&dimm, 0, sizeof(struct netxen_dimm_cfg));
2954 val = NXRD32(adapter, NETXEN_DIMM_CAPABILITY);
2955
2956 /* Checks if DIMM info is valid. */
2957 if (val & NETXEN_DIMM_VALID_FLAG) {
2958 netdev_err(netdev, "Invalid DIMM flag\n");
2959 dimm.presence = 0xff;
2960 goto out;
2961 }
2962
2963 rows = NETXEN_DIMM_NUMROWS(val);
2964 cols = NETXEN_DIMM_NUMCOLS(val);
2965 ranks = NETXEN_DIMM_NUMRANKS(val);
2966 banks = NETXEN_DIMM_NUMBANKS(val);
2967 dw = NETXEN_DIMM_DATAWIDTH(val);
2968
2969 dimm.presence = (val & NETXEN_DIMM_PRESENT);
2970
2971 /* Checks if DIMM info is present. */
2972 if (!dimm.presence) {
2973 netdev_err(netdev, "DIMM not present\n");
2974 goto out;
2975 }
2976
2977 dimm.dimm_type = NETXEN_DIMM_TYPE(val);
2978
2979 switch (dimm.dimm_type) {
2980 case NETXEN_DIMM_TYPE_RDIMM:
2981 case NETXEN_DIMM_TYPE_UDIMM:
2982 case NETXEN_DIMM_TYPE_SO_DIMM:
2983 case NETXEN_DIMM_TYPE_Micro_DIMM:
2984 case NETXEN_DIMM_TYPE_Mini_RDIMM:
2985 case NETXEN_DIMM_TYPE_Mini_UDIMM:
2986 break;
2987 default:
2988 netdev_err(netdev, "Invalid DIMM type %x\n", dimm.dimm_type);
2989 goto out;
2990 }
2991
2992 if (val & NETXEN_DIMM_MEMTYPE_DDR2_SDRAM)
2993 dimm.mem_type = NETXEN_DIMM_MEM_DDR2_SDRAM;
2994 else
2995 dimm.mem_type = NETXEN_DIMM_MEMTYPE(val);
2996
2997 if (val & NETXEN_DIMM_SIZE) {
2998 dimm.size = NETXEN_DIMM_STD_MEM_SIZE;
2999 goto out;
3000 }
3001
3002 if (!rows) {
3003 netdev_err(netdev, "Invalid no of rows %x\n", rows);
3004 goto out;
3005 }
3006
3007 if (!cols) {
3008 netdev_err(netdev, "Invalid no of columns %x\n", cols);
3009 goto out;
3010 }
3011
3012 if (!banks) {
3013 netdev_err(netdev, "Invalid no of banks %x\n", banks);
3014 goto out;
3015 }
3016
3017 ranks += 1;
3018
3019 switch (dw) {
3020 case 0x0:
3021 dw = 32;
3022 break;
3023 case 0x1:
3024 dw = 33;
3025 break;
3026 case 0x2:
3027 dw = 36;
3028 break;
3029 case 0x3:
3030 dw = 64;
3031 break;
3032 case 0x4:
3033 dw = 72;
3034 break;
3035 case 0x5:
3036 dw = 80;
3037 break;
3038 case 0x6:
3039 dw = 128;
3040 break;
3041 case 0x7:
3042 dw = 144;
3043 break;
3044 default:
3045 netdev_err(netdev, "Invalid data-width %x\n", dw);
3046 goto out;
3047 }
3048
3049 dimm.size = ((1 << rows) * (1 << cols) * dw * banks * ranks) / 8;
3050 /* Size returned in MB. */
3051 dimm.size = (dimm.size) / 0x100000;
3052out:
3053 memcpy(buf, &dimm, sizeof(struct netxen_dimm_cfg));
3054 return sizeof(struct netxen_dimm_cfg);
3055
3056}
3057
3058static struct bin_attribute bin_attr_dimm = {
3059 .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) },
3060 .size = 0,
3061 .read = netxen_sysfs_read_dimm,
3062};
3063
2929 3064
2930static void 3065static void
2931netxen_create_sysfs_entries(struct netxen_adapter *adapter) 3066netxen_create_sysfs_entries(struct netxen_adapter *adapter)
@@ -2963,6 +3098,8 @@ netxen_create_diag_entries(struct netxen_adapter *adapter)
2963 dev_info(dev, "failed to create crb sysfs entry\n"); 3098 dev_info(dev, "failed to create crb sysfs entry\n");
2964 if (device_create_bin_file(dev, &bin_attr_mem)) 3099 if (device_create_bin_file(dev, &bin_attr_mem))
2965 dev_info(dev, "failed to create mem sysfs entry\n"); 3100 dev_info(dev, "failed to create mem sysfs entry\n");
3101 if (device_create_bin_file(dev, &bin_attr_dimm))
3102 dev_info(dev, "failed to create dimm sysfs entry\n");
2966} 3103}
2967 3104
2968 3105
@@ -2975,6 +3112,7 @@ netxen_remove_diag_entries(struct netxen_adapter *adapter)
2975 device_remove_file(dev, &dev_attr_diag_mode); 3112 device_remove_file(dev, &dev_attr_diag_mode);
2976 device_remove_bin_file(dev, &bin_attr_crb); 3113 device_remove_bin_file(dev, &bin_attr_crb);
2977 device_remove_bin_file(dev, &bin_attr_mem); 3114 device_remove_bin_file(dev, &bin_attr_mem);
3115 device_remove_bin_file(dev, &bin_attr_dimm);
2978} 3116}
2979 3117
2980#ifdef CONFIG_INET 3118#ifdef CONFIG_INET
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 385a4d5c7c25..8680a5dae4a2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -36,8 +36,8 @@
36 36
37#define _QLCNIC_LINUX_MAJOR 5 37#define _QLCNIC_LINUX_MAJOR 5
38#define _QLCNIC_LINUX_MINOR 0 38#define _QLCNIC_LINUX_MINOR 0
39#define _QLCNIC_LINUX_SUBVERSION 27 39#define _QLCNIC_LINUX_SUBVERSION 28
40#define QLCNIC_LINUX_VERSIONID "5.0.27" 40#define QLCNIC_LINUX_VERSIONID "5.0.28"
41#define QLCNIC_DRV_IDC_VER 0x01 41#define QLCNIC_DRV_IDC_VER 0x01
42#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 42#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
43 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 43 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -607,6 +607,7 @@ struct qlcnic_recv_context {
607#define QLCNIC_CDRP_CMD_CONFIG_PORT 0x0000002E 607#define QLCNIC_CDRP_CMD_CONFIG_PORT 0x0000002E
608#define QLCNIC_CDRP_CMD_TEMP_SIZE 0x0000002f 608#define QLCNIC_CDRP_CMD_TEMP_SIZE 0x0000002f
609#define QLCNIC_CDRP_CMD_GET_TEMP_HDR 0x00000030 609#define QLCNIC_CDRP_CMD_GET_TEMP_HDR 0x00000030
610#define QLCNIC_CDRP_CMD_GET_MAC_STATS 0x00000037
610 611
611#define QLCNIC_RCODE_SUCCESS 0 612#define QLCNIC_RCODE_SUCCESS 0
612#define QLCNIC_RCODE_NOT_SUPPORTED 9 613#define QLCNIC_RCODE_NOT_SUPPORTED 9
@@ -1180,18 +1181,62 @@ struct qlcnic_esw_func_cfg {
1180#define QLCNIC_STATS_ESWITCH 2 1181#define QLCNIC_STATS_ESWITCH 2
1181#define QLCNIC_QUERY_RX_COUNTER 0 1182#define QLCNIC_QUERY_RX_COUNTER 0
1182#define QLCNIC_QUERY_TX_COUNTER 1 1183#define QLCNIC_QUERY_TX_COUNTER 1
1183#define QLCNIC_ESW_STATS_NOT_AVAIL 0xffffffffffffffffULL 1184#define QLCNIC_STATS_NOT_AVAIL 0xffffffffffffffffULL
1185#define QLCNIC_FILL_STATS(VAL1) \
1186 (((VAL1) == QLCNIC_STATS_NOT_AVAIL) ? 0 : VAL1)
1187#define QLCNIC_MAC_STATS 1
1188#define QLCNIC_ESW_STATS 2
1184 1189
1185#define QLCNIC_ADD_ESW_STATS(VAL1, VAL2)\ 1190#define QLCNIC_ADD_ESW_STATS(VAL1, VAL2)\
1186do { \ 1191do { \
1187 if (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) && \ 1192 if (((VAL1) == QLCNIC_STATS_NOT_AVAIL) && \
1188 ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \ 1193 ((VAL2) != QLCNIC_STATS_NOT_AVAIL)) \
1189 (VAL1) = (VAL2); \ 1194 (VAL1) = (VAL2); \
1190 else if (((VAL1) != QLCNIC_ESW_STATS_NOT_AVAIL) && \ 1195 else if (((VAL1) != QLCNIC_STATS_NOT_AVAIL) && \
1191 ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \ 1196 ((VAL2) != QLCNIC_STATS_NOT_AVAIL)) \
1192 (VAL1) += (VAL2); \ 1197 (VAL1) += (VAL2); \
1193} while (0) 1198} while (0)
1194 1199
1200struct qlcnic_mac_statistics{
1201 __le64 mac_tx_frames;
1202 __le64 mac_tx_bytes;
1203 __le64 mac_tx_mcast_pkts;
1204 __le64 mac_tx_bcast_pkts;
1205 __le64 mac_tx_pause_cnt;
1206 __le64 mac_tx_ctrl_pkt;
1207 __le64 mac_tx_lt_64b_pkts;
1208 __le64 mac_tx_lt_127b_pkts;
1209 __le64 mac_tx_lt_255b_pkts;
1210 __le64 mac_tx_lt_511b_pkts;
1211 __le64 mac_tx_lt_1023b_pkts;
1212 __le64 mac_tx_lt_1518b_pkts;
1213 __le64 mac_tx_gt_1518b_pkts;
1214 __le64 rsvd1[3];
1215
1216 __le64 mac_rx_frames;
1217 __le64 mac_rx_bytes;
1218 __le64 mac_rx_mcast_pkts;
1219 __le64 mac_rx_bcast_pkts;
1220 __le64 mac_rx_pause_cnt;
1221 __le64 mac_rx_ctrl_pkt;
1222 __le64 mac_rx_lt_64b_pkts;
1223 __le64 mac_rx_lt_127b_pkts;
1224 __le64 mac_rx_lt_255b_pkts;
1225 __le64 mac_rx_lt_511b_pkts;
1226 __le64 mac_rx_lt_1023b_pkts;
1227 __le64 mac_rx_lt_1518b_pkts;
1228 __le64 mac_rx_gt_1518b_pkts;
1229 __le64 rsvd2[3];
1230
1231 __le64 mac_rx_length_error;
1232 __le64 mac_rx_length_small;
1233 __le64 mac_rx_length_large;
1234 __le64 mac_rx_jabber;
1235 __le64 mac_rx_dropped;
1236 __le64 mac_rx_crc_error;
1237 __le64 mac_align_error;
1238} __packed;
1239
1195struct __qlcnic_esw_statistics { 1240struct __qlcnic_esw_statistics {
1196 __le16 context_id; 1241 __le16 context_id;
1197 __le16 version; 1242 __le16 version;
@@ -1352,6 +1397,8 @@ enum op_codes {
1352#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed 1397#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed
1353#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed 1398#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed
1354#define QLCNIC_FORCE_FW_RESET 0xdeaddead 1399#define QLCNIC_FORCE_FW_RESET 0xdeaddead
1400#define QLCNIC_SET_QUIESCENT 0xadd00010
1401#define QLCNIC_RESET_QUIESCENT 0xadd00020
1355 1402
1356struct qlcnic_dump_operations { 1403struct qlcnic_dump_operations {
1357 enum op_codes opcode; 1404 enum op_codes opcode;
@@ -1510,6 +1557,7 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *, const u8, const u8,
1510int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8, 1557int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8,
1511 struct __qlcnic_esw_statistics *); 1558 struct __qlcnic_esw_statistics *);
1512int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8); 1559int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8);
1560int qlcnic_get_mac_stats(struct qlcnic_adapter *, struct qlcnic_mac_statistics *);
1513extern int qlcnic_config_tso; 1561extern int qlcnic_config_tso;
1514 1562
1515/* 1563/*
@@ -1559,6 +1607,7 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1559} 1607}
1560 1608
1561extern const struct ethtool_ops qlcnic_ethtool_ops; 1609extern const struct ethtool_ops qlcnic_ethtool_ops;
1610extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
1562 1611
1563struct qlcnic_nic_template { 1612struct qlcnic_nic_template {
1564 int (*config_bridged_mode) (struct qlcnic_adapter *, u32); 1613 int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 569a837d2ac4..8db85244e8ad 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -905,6 +905,65 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
905 return err; 905 return err;
906} 906}
907 907
908/* This routine will retrieve the MAC statistics from firmware */
909int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
910 struct qlcnic_mac_statistics *mac_stats)
911{
912 struct qlcnic_mac_statistics *stats;
913 struct qlcnic_cmd_args cmd;
914 size_t stats_size = sizeof(struct qlcnic_mac_statistics);
915 dma_addr_t stats_dma_t;
916 void *stats_addr;
917 int err;
918
919 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
920 &stats_dma_t, GFP_KERNEL);
921 if (!stats_addr) {
922 dev_err(&adapter->pdev->dev,
923 "%s: Unable to allocate memory.\n", __func__);
924 return -ENOMEM;
925 }
926 memset(stats_addr, 0, stats_size);
927 memset(&cmd, 0, sizeof(cmd));
928 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_MAC_STATS;
929 cmd.req.arg1 = stats_size << 16;
930 cmd.req.arg2 = MSD(stats_dma_t);
931 cmd.req.arg3 = LSD(stats_dma_t);
932
933 qlcnic_issue_cmd(adapter, &cmd);
934 err = cmd.rsp.cmd;
935
936 if (!err) {
937 stats = stats_addr;
938 mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames);
939 mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes);
940 mac_stats->mac_tx_mcast_pkts =
941 le64_to_cpu(stats->mac_tx_mcast_pkts);
942 mac_stats->mac_tx_bcast_pkts =
943 le64_to_cpu(stats->mac_tx_bcast_pkts);
944 mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames);
945 mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes);
946 mac_stats->mac_rx_mcast_pkts =
947 le64_to_cpu(stats->mac_rx_mcast_pkts);
948 mac_stats->mac_rx_length_error =
949 le64_to_cpu(stats->mac_rx_length_error);
950 mac_stats->mac_rx_length_small =
951 le64_to_cpu(stats->mac_rx_length_small);
952 mac_stats->mac_rx_length_large =
953 le64_to_cpu(stats->mac_rx_length_large);
954 mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
955 mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
956 mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
957 } else {
958 dev_info(&adapter->pdev->dev,
959 "%s: Get mac stats failed =%d.\n", __func__, err);
960 }
961
962 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
963 stats_dma_t);
964 return err;
965}
966
908int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch, 967int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
909 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { 968 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
910 969
@@ -920,13 +979,13 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
920 return -EIO; 979 return -EIO;
921 980
922 memset(esw_stats, 0, sizeof(u64)); 981 memset(esw_stats, 0, sizeof(u64));
923 esw_stats->unicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; 982 esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL;
924 esw_stats->multicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; 983 esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL;
925 esw_stats->broadcast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; 984 esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL;
926 esw_stats->dropped_frames = QLCNIC_ESW_STATS_NOT_AVAIL; 985 esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL;
927 esw_stats->errors = QLCNIC_ESW_STATS_NOT_AVAIL; 986 esw_stats->errors = QLCNIC_STATS_NOT_AVAIL;
928 esw_stats->local_frames = QLCNIC_ESW_STATS_NOT_AVAIL; 987 esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL;
929 esw_stats->numbytes = QLCNIC_ESW_STATS_NOT_AVAIL; 988 esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL;
930 esw_stats->context_id = eswitch; 989 esw_stats->context_id = eswitch;
931 990
932 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 991 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 89ddf7f7d7df..9e9e78a5c4d7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -78,8 +78,46 @@ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
78 "tx numbytes", 78 "tx numbytes",
79}; 79};
80 80
81#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats) 81static const char qlcnic_mac_stats_strings [][ETH_GSTRING_LEN] = {
82 "mac_tx_frames",
83 "mac_tx_bytes",
84 "mac_tx_mcast_pkts",
85 "mac_tx_bcast_pkts",
86 "mac_tx_pause_cnt",
87 "mac_tx_ctrl_pkt",
88 "mac_tx_lt_64b_pkts",
89 "mac_tx_lt_127b_pkts",
90 "mac_tx_lt_255b_pkts",
91 "mac_tx_lt_511b_pkts",
92 "mac_tx_lt_1023b_pkts",
93 "mac_tx_lt_1518b_pkts",
94 "mac_tx_gt_1518b_pkts",
95 "mac_rx_frames",
96 "mac_rx_bytes",
97 "mac_rx_mcast_pkts",
98 "mac_rx_bcast_pkts",
99 "mac_rx_pause_cnt",
100 "mac_rx_ctrl_pkt",
101 "mac_rx_lt_64b_pkts",
102 "mac_rx_lt_127b_pkts",
103 "mac_rx_lt_255b_pkts",
104 "mac_rx_lt_511b_pkts",
105 "mac_rx_lt_1023b_pkts",
106 "mac_rx_lt_1518b_pkts",
107 "mac_rx_gt_1518b_pkts",
108 "mac_rx_length_error",
109 "mac_rx_length_small",
110 "mac_rx_length_large",
111 "mac_rx_jabber",
112 "mac_rx_dropped",
113 "mac_rx_crc_error",
114 "mac_align_error",
115};
116
117#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
118#define QLCNIC_MAC_STATS_LEN ARRAY_SIZE(qlcnic_mac_stats_strings)
82#define QLCNIC_DEVICE_STATS_LEN ARRAY_SIZE(qlcnic_device_gstrings_stats) 119#define QLCNIC_DEVICE_STATS_LEN ARRAY_SIZE(qlcnic_device_gstrings_stats)
120#define QLCNIC_TOTAL_STATS_LEN QLCNIC_STATS_LEN + QLCNIC_MAC_STATS_LEN
83 121
84static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { 122static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
85 "Register_Test_on_offline", 123 "Register_Test_on_offline",
@@ -644,8 +682,8 @@ static int qlcnic_get_sset_count(struct net_device *dev, int sset)
644 return QLCNIC_TEST_LEN; 682 return QLCNIC_TEST_LEN;
645 case ETH_SS_STATS: 683 case ETH_SS_STATS:
646 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) 684 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
647 return QLCNIC_STATS_LEN + QLCNIC_DEVICE_STATS_LEN; 685 return QLCNIC_TOTAL_STATS_LEN + QLCNIC_DEVICE_STATS_LEN;
648 return QLCNIC_STATS_LEN; 686 return QLCNIC_TOTAL_STATS_LEN;
649 default: 687 default:
650 return -EOPNOTSUPP; 688 return -EOPNOTSUPP;
651 } 689 }
@@ -851,7 +889,7 @@ static void
851qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data) 889qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
852{ 890{
853 struct qlcnic_adapter *adapter = netdev_priv(dev); 891 struct qlcnic_adapter *adapter = netdev_priv(dev);
854 int index, i; 892 int index, i, j;
855 893
856 switch (stringset) { 894 switch (stringset) {
857 case ETH_SS_TEST: 895 case ETH_SS_TEST:
@@ -864,6 +902,11 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
864 qlcnic_gstrings_stats[index].stat_string, 902 qlcnic_gstrings_stats[index].stat_string,
865 ETH_GSTRING_LEN); 903 ETH_GSTRING_LEN);
866 } 904 }
905 for (j = 0; j < QLCNIC_MAC_STATS_LEN; index++, j++) {
906 memcpy(data + index * ETH_GSTRING_LEN,
907 qlcnic_mac_stats_strings[j],
908 ETH_GSTRING_LEN);
909 }
867 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 910 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
868 return; 911 return;
869 for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) { 912 for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) {
@@ -874,22 +917,64 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
874 } 917 }
875} 918}
876 919
877#define QLCNIC_FILL_ESWITCH_STATS(VAL1) \
878 (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) ? 0 : VAL1)
879
880static void 920static void
881qlcnic_fill_device_stats(int *index, u64 *data, 921qlcnic_fill_stats(int *index, u64 *data, void *stats, int type)
882 struct __qlcnic_esw_statistics *stats)
883{ 922{
884 int ind = *index; 923 int ind = *index;
885 924
886 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->unicast_frames); 925 if (type == QLCNIC_MAC_STATS) {
887 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->multicast_frames); 926 struct qlcnic_mac_statistics *mac_stats =
888 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->broadcast_frames); 927 (struct qlcnic_mac_statistics *)stats;
889 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->dropped_frames); 928 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames);
890 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->errors); 929 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes);
891 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->local_frames); 930 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts);
892 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->numbytes); 931 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts);
932 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt);
933 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt);
934 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts);
935 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts);
936 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts);
937 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts);
938 data[ind++] =
939 QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts);
940 data[ind++] =
941 QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts);
942 data[ind++] =
943 QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts);
944 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames);
945 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes);
946 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts);
947 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts);
948 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt);
949 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt);
950 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts);
951 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts);
952 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts);
953 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts);
954 data[ind++] =
955 QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts);
956 data[ind++] =
957 QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts);
958 data[ind++] =
959 QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts);
960 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error);
961 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small);
962 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large);
963 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber);
964 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped);
965 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error);
966 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_align_error);
967 } else if (type == QLCNIC_ESW_STATS) {
968 struct __qlcnic_esw_statistics *esw_stats =
969 (struct __qlcnic_esw_statistics *)stats;
970 data[ind++] = QLCNIC_FILL_STATS(esw_stats->unicast_frames);
971 data[ind++] = QLCNIC_FILL_STATS(esw_stats->multicast_frames);
972 data[ind++] = QLCNIC_FILL_STATS(esw_stats->broadcast_frames);
973 data[ind++] = QLCNIC_FILL_STATS(esw_stats->dropped_frames);
974 data[ind++] = QLCNIC_FILL_STATS(esw_stats->errors);
975 data[ind++] = QLCNIC_FILL_STATS(esw_stats->local_frames);
976 data[ind++] = QLCNIC_FILL_STATS(esw_stats->numbytes);
977 }
893 978
894 *index = ind; 979 *index = ind;
895} 980}
@@ -900,6 +985,7 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
900{ 985{
901 struct qlcnic_adapter *adapter = netdev_priv(dev); 986 struct qlcnic_adapter *adapter = netdev_priv(dev);
902 struct qlcnic_esw_statistics port_stats; 987 struct qlcnic_esw_statistics port_stats;
988 struct qlcnic_mac_statistics mac_stats;
903 int index, ret; 989 int index, ret;
904 990
905 for (index = 0; index < QLCNIC_STATS_LEN; index++) { 991 for (index = 0; index < QLCNIC_STATS_LEN; index++) {
@@ -911,6 +997,11 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
911 sizeof(u64)) ? *(u64 *)p:(*(u32 *)p); 997 sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
912 } 998 }
913 999
1000 /* Retrieve MAC statistics from firmware */
1001 memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
1002 qlcnic_get_mac_stats(adapter, &mac_stats);
1003 qlcnic_fill_stats(&index, data, &mac_stats, QLCNIC_MAC_STATS);
1004
914 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 1005 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
915 return; 1006 return;
916 1007
@@ -920,14 +1011,14 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
920 if (ret) 1011 if (ret)
921 return; 1012 return;
922 1013
923 qlcnic_fill_device_stats(&index, data, &port_stats.rx); 1014 qlcnic_fill_stats(&index, data, &port_stats.rx, QLCNIC_ESW_STATS);
924 1015
925 ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func, 1016 ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
926 QLCNIC_QUERY_TX_COUNTER, &port_stats.tx); 1017 QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
927 if (ret) 1018 if (ret)
928 return; 1019 return;
929 1020
930 qlcnic_fill_device_stats(&index, data, &port_stats.tx); 1021 qlcnic_fill_stats(&index, data, &port_stats.tx, QLCNIC_ESW_STATS);
931} 1022}
932 1023
933static int qlcnic_set_led(struct net_device *dev, 1024static int qlcnic_set_led(struct net_device *dev,
@@ -1132,11 +1223,21 @@ qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
1132 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1223 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1133 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; 1224 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1134 1225
1226 if (!fw_dump->tmpl_hdr) {
1227 netdev_err(adapter->netdev, "FW Dump not supported\n");
1228 return -ENOTSUPP;
1229 }
1230
1135 if (fw_dump->clr) 1231 if (fw_dump->clr)
1136 dump->len = fw_dump->tmpl_hdr->size + fw_dump->size; 1232 dump->len = fw_dump->tmpl_hdr->size + fw_dump->size;
1137 else 1233 else
1138 dump->len = 0; 1234 dump->len = 0;
1139 dump->flag = fw_dump->tmpl_hdr->drv_cap_mask; 1235
1236 if (!fw_dump->enable)
1237 dump->flag = ETH_FW_DUMP_DISABLE;
1238 else
1239 dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
1240
1140 dump->version = adapter->fw_version; 1241 dump->version = adapter->fw_version;
1141 return 0; 1242 return 0;
1142} 1243}
@@ -1150,6 +1251,11 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
1150 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1251 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1151 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; 1252 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1152 1253
1254 if (!fw_dump->tmpl_hdr) {
1255 netdev_err(netdev, "FW Dump not supported\n");
1256 return -ENOTSUPP;
1257 }
1258
1153 if (!fw_dump->clr) { 1259 if (!fw_dump->clr) {
1154 netdev_info(netdev, "Dump not available\n"); 1260 netdev_info(netdev, "Dump not available\n");
1155 return -EINVAL; 1261 return -EINVAL;
@@ -1177,55 +1283,74 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
1177static int 1283static int
1178qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) 1284qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
1179{ 1285{
1180 int ret = 0; 1286 int i;
1181 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1287 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1182 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; 1288 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1289 u32 state;
1183 1290
1184 switch (val->flag) { 1291 switch (val->flag) {
1185 case QLCNIC_FORCE_FW_DUMP_KEY: 1292 case QLCNIC_FORCE_FW_DUMP_KEY:
1293 if (!fw_dump->tmpl_hdr) {
1294 netdev_err(netdev, "FW dump not supported\n");
1295 return -ENOTSUPP;
1296 }
1186 if (!fw_dump->enable) { 1297 if (!fw_dump->enable) {
1187 netdev_info(netdev, "FW dump not enabled\n"); 1298 netdev_info(netdev, "FW dump not enabled\n");
1188 return ret; 1299 return 0;
1189 } 1300 }
1190 if (fw_dump->clr) { 1301 if (fw_dump->clr) {
1191 netdev_info(netdev, 1302 netdev_info(netdev,
1192 "Previous dump not cleared, not forcing dump\n"); 1303 "Previous dump not cleared, not forcing dump\n");
1193 return ret; 1304 return 0;
1194 } 1305 }
1195 netdev_info(netdev, "Forcing a FW dump\n"); 1306 netdev_info(netdev, "Forcing a FW dump\n");
1196 qlcnic_dev_request_reset(adapter); 1307 qlcnic_dev_request_reset(adapter);
1197 break; 1308 break;
1198 case QLCNIC_DISABLE_FW_DUMP: 1309 case QLCNIC_DISABLE_FW_DUMP:
1199 if (fw_dump->enable) { 1310 if (fw_dump->enable && fw_dump->tmpl_hdr) {
1200 netdev_info(netdev, "Disabling FW dump\n"); 1311 netdev_info(netdev, "Disabling FW dump\n");
1201 fw_dump->enable = 0; 1312 fw_dump->enable = 0;
1202 } 1313 }
1203 break; 1314 return 0;
1204 case QLCNIC_ENABLE_FW_DUMP: 1315 case QLCNIC_ENABLE_FW_DUMP:
1205 if (!fw_dump->enable && fw_dump->tmpl_hdr) { 1316 if (!fw_dump->tmpl_hdr) {
1317 netdev_err(netdev, "FW dump not supported\n");
1318 return -ENOTSUPP;
1319 }
1320 if (!fw_dump->enable) {
1206 netdev_info(netdev, "Enabling FW dump\n"); 1321 netdev_info(netdev, "Enabling FW dump\n");
1207 fw_dump->enable = 1; 1322 fw_dump->enable = 1;
1208 } 1323 }
1209 break; 1324 return 0;
1210 case QLCNIC_FORCE_FW_RESET: 1325 case QLCNIC_FORCE_FW_RESET:
1211 netdev_info(netdev, "Forcing a FW reset\n"); 1326 netdev_info(netdev, "Forcing a FW reset\n");
1212 qlcnic_dev_request_reset(adapter); 1327 qlcnic_dev_request_reset(adapter);
1213 adapter->flags &= ~QLCNIC_FW_RESET_OWNER; 1328 adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
1214 break; 1329 return 0;
1330 case QLCNIC_SET_QUIESCENT:
1331 case QLCNIC_RESET_QUIESCENT:
1332 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
1333 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
1334 netdev_info(netdev, "Device in FAILED state\n");
1335 return 0;
1215 default: 1336 default:
1216 if (val->flag > QLCNIC_DUMP_MASK_MAX || 1337 if (!fw_dump->tmpl_hdr) {
1217 val->flag < QLCNIC_DUMP_MASK_MIN) { 1338 netdev_err(netdev, "FW dump not supported\n");
1218 netdev_info(netdev, 1339 return -ENOTSUPP;
1219 "Invalid dump level: 0x%x\n", val->flag); 1340 }
1220 ret = -EINVAL; 1341 for (i = 0; i < ARRAY_SIZE(FW_DUMP_LEVELS); i++) {
1221 goto out; 1342 if (val->flag == FW_DUMP_LEVELS[i]) {
1343 fw_dump->tmpl_hdr->drv_cap_mask =
1344 val->flag;
1345 netdev_info(netdev, "Driver mask changed to: 0x%x\n",
1346 fw_dump->tmpl_hdr->drv_cap_mask);
1347 return 0;
1348 }
1222 } 1349 }
1223 fw_dump->tmpl_hdr->drv_cap_mask = val->flag & 0xff; 1350 netdev_info(netdev, "Invalid dump level: 0x%x\n", val->flag);
1224 netdev_info(netdev, "Driver mask changed to: 0x%x\n", 1351 return -EINVAL;
1225 fw_dump->tmpl_hdr->drv_cap_mask);
1226 } 1352 }
1227out: 1353 return 0;
1228 return ret;
1229} 1354}
1230 1355
1231const struct ethtool_ops qlcnic_ethtool_ops = { 1356const struct ethtool_ops qlcnic_ethtool_ops = {
@@ -1258,3 +1383,10 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
1258 .get_dump_data = qlcnic_get_dump_data, 1383 .get_dump_data = qlcnic_get_dump_data,
1259 .set_dump = qlcnic_set_dump, 1384 .set_dump = qlcnic_set_dump,
1260}; 1385};
1386
1387const struct ethtool_ops qlcnic_ethtool_failed_ops = {
1388 .get_settings = qlcnic_get_settings,
1389 .get_drvinfo = qlcnic_get_drvinfo,
1390 .set_msglevel = qlcnic_set_msglevel,
1391 .get_msglevel = qlcnic_get_msglevel,
1392};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index a52819303d1b..6ced3195aad3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -704,6 +704,8 @@ enum {
704#define QLCNIC_DEV_FAILED 0x6 704#define QLCNIC_DEV_FAILED 0x6
705#define QLCNIC_DEV_QUISCENT 0x7 705#define QLCNIC_DEV_QUISCENT 0x7
706 706
707#define QLCNIC_DEV_BADBAD 0xbad0bad0
708
707#define QLCNIC_DEV_NPAR_NON_OPER 0 /* NON Operational */ 709#define QLCNIC_DEV_NPAR_NON_OPER 0 /* NON Operational */
708#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */ 710#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
709#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */ 711#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
@@ -776,6 +778,10 @@ struct qlcnic_legacy_intr_set {
776#define FLASH_ROM_WINDOW 0x42110030 778#define FLASH_ROM_WINDOW 0x42110030
777#define FLASH_ROM_DATA 0x42150000 779#define FLASH_ROM_DATA 0x42150000
778 780
781
782static const u32 FW_DUMP_LEVELS[] = {
783 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff };
784
779static const u32 MIU_TEST_READ_DATA[] = { 785static const u32 MIU_TEST_READ_DATA[] = {
780 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC, }; 786 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC, };
781 787
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 75c32e875fef..46e77a2c5121 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -338,6 +338,10 @@ static const struct net_device_ops qlcnic_netdev_ops = {
338#endif 338#endif
339}; 339};
340 340
341static const struct net_device_ops qlcnic_netdev_failed_ops = {
342 .ndo_open = qlcnic_open,
343};
344
341static struct qlcnic_nic_template qlcnic_ops = { 345static struct qlcnic_nic_template qlcnic_ops = {
342 .config_bridged_mode = qlcnic_config_bridged_mode, 346 .config_bridged_mode = qlcnic_config_bridged_mode,
343 .config_led = qlcnic_config_led, 347 .config_led = qlcnic_config_led,
@@ -1623,8 +1627,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1623 1627
1624 err = adapter->nic_ops->start_firmware(adapter); 1628 err = adapter->nic_ops->start_firmware(adapter);
1625 if (err) { 1629 if (err) {
1626 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); 1630 dev_err(&pdev->dev, "Loading fw failed. Please Reboot\n"
1627 goto err_out_decr_ref; 1631 "\t\tIf reboot doesn't help, try flashing the card\n");
1632 goto err_out_maintenance_mode;
1628 } 1633 }
1629 1634
1630 if (qlcnic_read_mac_addr(adapter)) 1635 if (qlcnic_read_mac_addr(adapter))
@@ -1695,6 +1700,18 @@ err_out_disable_pdev:
1695 pci_set_drvdata(pdev, NULL); 1700 pci_set_drvdata(pdev, NULL);
1696 pci_disable_device(pdev); 1701 pci_disable_device(pdev);
1697 return err; 1702 return err;
1703
1704err_out_maintenance_mode:
1705 netdev->netdev_ops = &qlcnic_netdev_failed_ops;
1706 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
1707 err = register_netdev(netdev);
1708 if (err) {
1709 dev_err(&pdev->dev, "failed to register net device\n");
1710 goto err_out_decr_ref;
1711 }
1712 pci_set_drvdata(pdev, adapter);
1713 qlcnic_create_diag_entries(adapter);
1714 return 0;
1698} 1715}
1699 1716
1700static void __devexit qlcnic_remove(struct pci_dev *pdev) 1717static void __devexit qlcnic_remove(struct pci_dev *pdev)
@@ -1831,8 +1848,14 @@ done:
1831static int qlcnic_open(struct net_device *netdev) 1848static int qlcnic_open(struct net_device *netdev)
1832{ 1849{
1833 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1850 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1851 u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
1834 int err; 1852 int err;
1835 1853
1854 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) {
1855 netdev_err(netdev, "Device in FAILED state\n");
1856 return -EIO;
1857 }
1858
1836 netif_carrier_off(netdev); 1859 netif_carrier_off(netdev);
1837 1860
1838 err = qlcnic_attach(adapter); 1861 err = qlcnic_attach(adapter);
@@ -1942,7 +1965,7 @@ qlcnic_send_filter(struct qlcnic_adapter *adapter,
1942 __le16 vlan_id = 0; 1965 __le16 vlan_id = 0;
1943 u8 hindex; 1966 u8 hindex;
1944 1967
1945 if (!compare_ether_addr(phdr->h_source, adapter->mac_addr)) 1968 if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
1946 return; 1969 return;
1947 1970
1948 if (adapter->fhash.fnum >= adapter->fhash.fmax) 1971 if (adapter->fhash.fnum >= adapter->fhash.fmax)
@@ -2212,8 +2235,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2212 2235
2213 if (adapter->flags & QLCNIC_MACSPOOF) { 2236 if (adapter->flags & QLCNIC_MACSPOOF) {
2214 phdr = (struct ethhdr *)skb->data; 2237 phdr = (struct ethhdr *)skb->data;
2215 if (compare_ether_addr(phdr->h_source, 2238 if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
2216 adapter->mac_addr))
2217 goto drop_packet; 2239 goto drop_packet;
2218 } 2240 }
2219 2241
@@ -3018,6 +3040,12 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
3018 return; 3040 return;
3019 3041
3020 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 3042 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
3043 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) {
3044 netdev_err(adapter->netdev,
3045 "Device is in FAILED state, Please Reboot\n");
3046 qlcnic_api_unlock(adapter);
3047 return;
3048 }
3021 3049
3022 if (state == QLCNIC_DEV_READY) { 3050 if (state == QLCNIC_DEV_READY) {
3023 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET); 3051 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
@@ -3061,6 +3089,9 @@ qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
3061 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 3089 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
3062 msleep(10); 3090 msleep(10);
3063 3091
3092 if (!adapter->fw_work.work.func)
3093 return;
3094
3064 cancel_delayed_work_sync(&adapter->fw_work); 3095 cancel_delayed_work_sync(&adapter->fw_work);
3065} 3096}
3066 3097
@@ -4280,6 +4311,7 @@ static void
4280qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) 4311qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
4281{ 4312{
4282 struct device *dev = &adapter->pdev->dev; 4313 struct device *dev = &adapter->pdev->dev;
4314 u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
4283 4315
4284 if (device_create_bin_file(dev, &bin_attr_port_stats)) 4316 if (device_create_bin_file(dev, &bin_attr_port_stats))
4285 dev_info(dev, "failed to create port stats sysfs entry"); 4317 dev_info(dev, "failed to create port stats sysfs entry");
@@ -4288,14 +4320,19 @@ qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
4288 return; 4320 return;
4289 if (device_create_file(dev, &dev_attr_diag_mode)) 4321 if (device_create_file(dev, &dev_attr_diag_mode))
4290 dev_info(dev, "failed to create diag_mode sysfs entry\n"); 4322 dev_info(dev, "failed to create diag_mode sysfs entry\n");
4291 if (device_create_file(dev, &dev_attr_beacon))
4292 dev_info(dev, "failed to create beacon sysfs entry");
4293 if (device_create_bin_file(dev, &bin_attr_crb)) 4323 if (device_create_bin_file(dev, &bin_attr_crb))
4294 dev_info(dev, "failed to create crb sysfs entry\n"); 4324 dev_info(dev, "failed to create crb sysfs entry\n");
4295 if (device_create_bin_file(dev, &bin_attr_mem)) 4325 if (device_create_bin_file(dev, &bin_attr_mem))
4296 dev_info(dev, "failed to create mem sysfs entry\n"); 4326 dev_info(dev, "failed to create mem sysfs entry\n");
4327
4328 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
4329 return;
4330
4297 if (device_create_bin_file(dev, &bin_attr_pci_config)) 4331 if (device_create_bin_file(dev, &bin_attr_pci_config))
4298 dev_info(dev, "failed to create pci config sysfs entry"); 4332 dev_info(dev, "failed to create pci config sysfs entry");
4333 if (device_create_file(dev, &dev_attr_beacon))
4334 dev_info(dev, "failed to create beacon sysfs entry");
4335
4299 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 4336 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4300 return; 4337 return;
4301 if (device_create_bin_file(dev, &bin_attr_esw_config)) 4338 if (device_create_bin_file(dev, &bin_attr_esw_config))
@@ -4314,16 +4351,19 @@ static void
4314qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) 4351qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
4315{ 4352{
4316 struct device *dev = &adapter->pdev->dev; 4353 struct device *dev = &adapter->pdev->dev;
4354 u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
4317 4355
4318 device_remove_bin_file(dev, &bin_attr_port_stats); 4356 device_remove_bin_file(dev, &bin_attr_port_stats);
4319 4357
4320 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) 4358 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4321 return; 4359 return;
4322 device_remove_file(dev, &dev_attr_diag_mode); 4360 device_remove_file(dev, &dev_attr_diag_mode);
4323 device_remove_file(dev, &dev_attr_beacon);
4324 device_remove_bin_file(dev, &bin_attr_crb); 4361 device_remove_bin_file(dev, &bin_attr_crb);
4325 device_remove_bin_file(dev, &bin_attr_mem); 4362 device_remove_bin_file(dev, &bin_attr_mem);
4363 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
4364 return;
4326 device_remove_bin_file(dev, &bin_attr_pci_config); 4365 device_remove_bin_file(dev, &bin_attr_pci_config);
4366 device_remove_file(dev, &dev_attr_beacon);
4327 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 4367 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4328 return; 4368 return;
4329 device_remove_bin_file(dev, &bin_attr_esw_config); 4369 device_remove_bin_file(dev, &bin_attr_esw_config);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 49343ec21c82..09d8d33171df 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -3845,7 +3845,7 @@ static int ql_wol(struct ql_adapter *qdev)
3845 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST | 3845 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3846 WAKE_MCAST | WAKE_BCAST)) { 3846 WAKE_MCAST | WAKE_BCAST)) {
3847 netif_err(qdev, ifdown, qdev->ndev, 3847 netif_err(qdev, ifdown, qdev->ndev,
3848 "Unsupported WOL paramter. qdev->wol = 0x%x.\n", 3848 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3849 qdev->wol); 3849 qdev->wol);
3850 return -EINVAL; 3850 return -EINVAL;
3851 } 3851 }
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index b96e1920e045..4de73643fec6 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw> 4 * Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw>
5 * Copyright (C) 2007 5 * Copyright (C) 2007
6 * Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us> 6 * Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>
7 * Florian Fainelli <florian@openwrt.org> 7 * Copyright (C) 2007-2012 Florian Fainelli <florian@openwrt.org>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
@@ -74,9 +74,13 @@
74#define MT_ICR 0x0C /* TX interrupt control */ 74#define MT_ICR 0x0C /* TX interrupt control */
75#define MR_ICR 0x10 /* RX interrupt control */ 75#define MR_ICR 0x10 /* RX interrupt control */
76#define MTPR 0x14 /* TX poll command register */ 76#define MTPR 0x14 /* TX poll command register */
77#define TM2TX 0x0001 /* Trigger MAC to transmit */
77#define MR_BSR 0x18 /* RX buffer size */ 78#define MR_BSR 0x18 /* RX buffer size */
78#define MR_DCR 0x1A /* RX descriptor control */ 79#define MR_DCR 0x1A /* RX descriptor control */
79#define MLSR 0x1C /* Last status */ 80#define MLSR 0x1C /* Last status */
81#define TX_FIFO_UNDR 0x0200 /* TX FIFO under-run */
82#define TX_EXCEEDC 0x2000 /* Transmit exceed collision */
83#define TX_LATEC 0x4000 /* Transmit late collision */
80#define MMDIO 0x20 /* MDIO control register */ 84#define MMDIO 0x20 /* MDIO control register */
81#define MDIO_WRITE 0x4000 /* MDIO write */ 85#define MDIO_WRITE 0x4000 /* MDIO write */
82#define MDIO_READ 0x2000 /* MDIO read */ 86#define MDIO_READ 0x2000 /* MDIO read */
@@ -124,6 +128,9 @@
124#define MID_3M 0x82 /* MID3 Medium */ 128#define MID_3M 0x82 /* MID3 Medium */
125#define MID_3H 0x84 /* MID3 High */ 129#define MID_3H 0x84 /* MID3 High */
126#define PHY_CC 0x88 /* PHY status change configuration register */ 130#define PHY_CC 0x88 /* PHY status change configuration register */
131#define SCEN 0x8000 /* PHY status change enable */
132#define PHYAD_SHIFT 8 /* PHY address shift */
133#define TMRDIV_SHIFT 0 /* Timer divider shift */
127#define PHY_ST 0x8A /* PHY status register */ 134#define PHY_ST 0x8A /* PHY status register */
128#define MAC_SM 0xAC /* MAC status machine */ 135#define MAC_SM 0xAC /* MAC status machine */
129#define MAC_SM_RST 0x0002 /* MAC status machine reset */ 136#define MAC_SM_RST 0x0002 /* MAC status machine reset */
@@ -137,6 +144,8 @@
137#define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ 144#define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */
138#define MCAST_MAX 3 /* Max number multicast addresses to filter */ 145#define MCAST_MAX 3 /* Max number multicast addresses to filter */
139 146
147#define MAC_DEF_TIMEOUT 2048 /* Default MAC read/write operation timeout */
148
140/* Descriptor status */ 149/* Descriptor status */
141#define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */ 150#define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */
142#define DSC_RX_OK 0x4000 /* RX was successful */ 151#define DSC_RX_OK 0x4000 /* RX was successful */
@@ -187,7 +196,7 @@ struct r6040_private {
187 dma_addr_t rx_ring_dma; 196 dma_addr_t rx_ring_dma;
188 dma_addr_t tx_ring_dma; 197 dma_addr_t tx_ring_dma;
189 u16 tx_free_desc; 198 u16 tx_free_desc;
190 u16 mcr0, mcr1; 199 u16 mcr0;
191 struct net_device *dev; 200 struct net_device *dev;
192 struct mii_bus *mii_bus; 201 struct mii_bus *mii_bus;
193 struct napi_struct napi; 202 struct napi_struct napi;
@@ -204,7 +213,7 @@ static char version[] __devinitdata = DRV_NAME
204/* Read a word data from PHY Chip */ 213/* Read a word data from PHY Chip */
205static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg) 214static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
206{ 215{
207 int limit = 2048; 216 int limit = MAC_DEF_TIMEOUT;
208 u16 cmd; 217 u16 cmd;
209 218
210 iowrite16(MDIO_READ + reg + (phy_addr << 8), ioaddr + MMDIO); 219 iowrite16(MDIO_READ + reg + (phy_addr << 8), ioaddr + MMDIO);
@@ -222,7 +231,7 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
222static void r6040_phy_write(void __iomem *ioaddr, 231static void r6040_phy_write(void __iomem *ioaddr,
223 int phy_addr, int reg, u16 val) 232 int phy_addr, int reg, u16 val)
224{ 233{
225 int limit = 2048; 234 int limit = MAC_DEF_TIMEOUT;
226 u16 cmd; 235 u16 cmd;
227 236
228 iowrite16(val, ioaddr + MMWD); 237 iowrite16(val, ioaddr + MMWD);
@@ -358,27 +367,35 @@ err_exit:
358 return rc; 367 return rc;
359} 368}
360 369
361static void r6040_init_mac_regs(struct net_device *dev) 370static void r6040_reset_mac(struct r6040_private *lp)
362{ 371{
363 struct r6040_private *lp = netdev_priv(dev);
364 void __iomem *ioaddr = lp->base; 372 void __iomem *ioaddr = lp->base;
365 int limit = 2048; 373 int limit = MAC_DEF_TIMEOUT;
366 u16 cmd; 374 u16 cmd;
367 375
368 /* Mask Off Interrupt */
369 iowrite16(MSK_INT, ioaddr + MIER);
370
371 /* Reset RDC MAC */
372 iowrite16(MAC_RST, ioaddr + MCR1); 376 iowrite16(MAC_RST, ioaddr + MCR1);
373 while (limit--) { 377 while (limit--) {
374 cmd = ioread16(ioaddr + MCR1); 378 cmd = ioread16(ioaddr + MCR1);
375 if (cmd & MAC_RST) 379 if (cmd & MAC_RST)
376 break; 380 break;
377 } 381 }
382
378 /* Reset internal state machine */ 383 /* Reset internal state machine */
379 iowrite16(MAC_SM_RST, ioaddr + MAC_SM); 384 iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
380 iowrite16(0, ioaddr + MAC_SM); 385 iowrite16(0, ioaddr + MAC_SM);
381 mdelay(5); 386 mdelay(5);
387}
388
389static void r6040_init_mac_regs(struct net_device *dev)
390{
391 struct r6040_private *lp = netdev_priv(dev);
392 void __iomem *ioaddr = lp->base;
393
394 /* Mask Off Interrupt */
395 iowrite16(MSK_INT, ioaddr + MIER);
396
397 /* Reset RDC MAC */
398 r6040_reset_mac(lp);
382 399
383 /* MAC Bus Control Register */ 400 /* MAC Bus Control Register */
384 iowrite16(MBCR_DEFAULT, ioaddr + MBCR); 401 iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
@@ -407,7 +424,7 @@ static void r6040_init_mac_regs(struct net_device *dev)
407 /* Let TX poll the descriptors 424 /* Let TX poll the descriptors
408 * we may got called by r6040_tx_timeout which has left 425 * we may got called by r6040_tx_timeout which has left
409 * some unsent tx buffers */ 426 * some unsent tx buffers */
410 iowrite16(0x01, ioaddr + MTPR); 427 iowrite16(TM2TX, ioaddr + MTPR);
411} 428}
412 429
413static void r6040_tx_timeout(struct net_device *dev) 430static void r6040_tx_timeout(struct net_device *dev)
@@ -445,18 +462,13 @@ static void r6040_down(struct net_device *dev)
445{ 462{
446 struct r6040_private *lp = netdev_priv(dev); 463 struct r6040_private *lp = netdev_priv(dev);
447 void __iomem *ioaddr = lp->base; 464 void __iomem *ioaddr = lp->base;
448 int limit = 2048;
449 u16 *adrp; 465 u16 *adrp;
450 u16 cmd;
451 466
452 /* Stop MAC */ 467 /* Stop MAC */
453 iowrite16(MSK_INT, ioaddr + MIER); /* Mask Off Interrupt */ 468 iowrite16(MSK_INT, ioaddr + MIER); /* Mask Off Interrupt */
454 iowrite16(MAC_RST, ioaddr + MCR1); /* Reset RDC MAC */ 469
455 while (limit--) { 470 /* Reset RDC MAC */
456 cmd = ioread16(ioaddr + MCR1); 471 r6040_reset_mac(lp);
457 if (cmd & MAC_RST)
458 break;
459 }
460 472
461 /* Restore MAC Address to MIDx */ 473 /* Restore MAC Address to MIDx */
462 adrp = (u16 *) dev->dev_addr; 474 adrp = (u16 *) dev->dev_addr;
@@ -599,9 +611,9 @@ static void r6040_tx(struct net_device *dev)
599 /* Check for errors */ 611 /* Check for errors */
600 err = ioread16(ioaddr + MLSR); 612 err = ioread16(ioaddr + MLSR);
601 613
602 if (err & 0x0200) 614 if (err & TX_FIFO_UNDR)
603 dev->stats.rx_fifo_errors++; 615 dev->stats.tx_fifo_errors++;
604 if (err & (0x2000 | 0x4000)) 616 if (err & (TX_EXCEEDC | TX_LATEC))
605 dev->stats.tx_carrier_errors++; 617 dev->stats.tx_carrier_errors++;
606 618
607 if (descptr->status & DSC_OWNER_MAC) 619 if (descptr->status & DSC_OWNER_MAC)
@@ -736,11 +748,7 @@ static void r6040_mac_address(struct net_device *dev)
736 u16 *adrp; 748 u16 *adrp;
737 749
738 /* Reset MAC */ 750 /* Reset MAC */
739 iowrite16(MAC_RST, ioaddr + MCR1); 751 r6040_reset_mac(lp);
740 /* Reset internal state machine */
741 iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
742 iowrite16(0, ioaddr + MAC_SM);
743 mdelay(5);
744 752
745 /* Restore MAC Address */ 753 /* Restore MAC Address */
746 adrp = (u16 *) dev->dev_addr; 754 adrp = (u16 *) dev->dev_addr;
@@ -840,7 +848,7 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
840 skb_tx_timestamp(skb); 848 skb_tx_timestamp(skb);
841 849
842 /* Trigger the MAC to check the TX descriptor */ 850 /* Trigger the MAC to check the TX descriptor */
843 iowrite16(0x01, ioaddr + MTPR); 851 iowrite16(TM2TX, ioaddr + MTPR);
844 lp->tx_insert_ptr = descptr->vndescp; 852 lp->tx_insert_ptr = descptr->vndescp;
845 853
846 /* If no tx resource, stop */ 854 /* If no tx resource, stop */
@@ -973,6 +981,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
973 .get_settings = netdev_get_settings, 981 .get_settings = netdev_get_settings,
974 .set_settings = netdev_set_settings, 982 .set_settings = netdev_set_settings,
975 .get_link = ethtool_op_get_link, 983 .get_link = ethtool_op_get_link,
984 .get_ts_info = ethtool_op_get_ts_info,
976}; 985};
977 986
978static const struct net_device_ops r6040_netdev_ops = { 987static const struct net_device_ops r6040_netdev_ops = {
@@ -1126,10 +1135,15 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1126 err = -EIO; 1135 err = -EIO;
1127 goto err_out_free_res; 1136 goto err_out_free_res;
1128 } 1137 }
1138
1129 /* If PHY status change register is still set to zero it means the 1139 /* If PHY status change register is still set to zero it means the
1130 * bootloader didn't initialize it */ 1140 * bootloader didn't initialize it, so we set it to:
1141 * - enable phy status change
1142 * - enable all phy addresses
1143 * - set to lowest timer divider */
1131 if (ioread16(ioaddr + PHY_CC) == 0) 1144 if (ioread16(ioaddr + PHY_CC) == 0)
1132 iowrite16(0x9f07, ioaddr + PHY_CC); 1145 iowrite16(SCEN | PHY_MAX_ADDR << PHYAD_SHIFT |
1146 7 << TMRDIV_SHIFT, ioaddr + PHY_CC);
1133 1147
1134 /* Init system & device */ 1148 /* Init system & device */
1135 lp->base = ioaddr; 1149 lp->base = ioaddr;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index abc79076f867..5eef290997f9 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -635,9 +635,12 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
635 */ 635 */
636static void cp_poll_controller(struct net_device *dev) 636static void cp_poll_controller(struct net_device *dev)
637{ 637{
638 disable_irq(dev->irq); 638 struct cp_private *cp = netdev_priv(dev);
639 cp_interrupt(dev->irq, dev); 639 const int irq = cp->pdev->irq;
640 enable_irq(dev->irq); 640
641 disable_irq(irq);
642 cp_interrupt(irq, dev);
643 enable_irq(irq);
641} 644}
642#endif 645#endif
643 646
@@ -958,6 +961,11 @@ static inline void cp_start_hw (struct cp_private *cp)
958 cpw8(Cmd, RxOn | TxOn); 961 cpw8(Cmd, RxOn | TxOn);
959} 962}
960 963
964static void cp_enable_irq(struct cp_private *cp)
965{
966 cpw16_f(IntrMask, cp_intr_mask);
967}
968
961static void cp_init_hw (struct cp_private *cp) 969static void cp_init_hw (struct cp_private *cp)
962{ 970{
963 struct net_device *dev = cp->dev; 971 struct net_device *dev = cp->dev;
@@ -997,8 +1005,6 @@ static void cp_init_hw (struct cp_private *cp)
997 1005
998 cpw16(MultiIntr, 0); 1006 cpw16(MultiIntr, 0);
999 1007
1000 cpw16_f(IntrMask, cp_intr_mask);
1001
1002 cpw8_f(Cfg9346, Cfg9346_Lock); 1008 cpw8_f(Cfg9346, Cfg9346_Lock);
1003} 1009}
1004 1010
@@ -1114,6 +1120,7 @@ static void cp_free_rings (struct cp_private *cp)
1114static int cp_open (struct net_device *dev) 1120static int cp_open (struct net_device *dev)
1115{ 1121{
1116 struct cp_private *cp = netdev_priv(dev); 1122 struct cp_private *cp = netdev_priv(dev);
1123 const int irq = cp->pdev->irq;
1117 int rc; 1124 int rc;
1118 1125
1119 netif_dbg(cp, ifup, dev, "enabling interface\n"); 1126 netif_dbg(cp, ifup, dev, "enabling interface\n");
@@ -1126,10 +1133,12 @@ static int cp_open (struct net_device *dev)
1126 1133
1127 cp_init_hw(cp); 1134 cp_init_hw(cp);
1128 1135
1129 rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev); 1136 rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1130 if (rc) 1137 if (rc)
1131 goto err_out_hw; 1138 goto err_out_hw;
1132 1139
1140 cp_enable_irq(cp);
1141
1133 netif_carrier_off(dev); 1142 netif_carrier_off(dev);
1134 mii_check_media(&cp->mii_if, netif_msg_link(cp), true); 1143 mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1135 netif_start_queue(dev); 1144 netif_start_queue(dev);
@@ -1161,7 +1170,7 @@ static int cp_close (struct net_device *dev)
1161 1170
1162 spin_unlock_irqrestore(&cp->lock, flags); 1171 spin_unlock_irqrestore(&cp->lock, flags);
1163 1172
1164 free_irq(dev->irq, dev); 1173 free_irq(cp->pdev->irq, dev);
1165 1174
1166 cp_free_rings(cp); 1175 cp_free_rings(cp);
1167 return 0; 1176 return 0;
@@ -1909,7 +1918,6 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1909 (unsigned long long)pciaddr); 1918 (unsigned long long)pciaddr);
1910 goto err_out_res; 1919 goto err_out_res;
1911 } 1920 }
1912 dev->base_addr = (unsigned long) regs;
1913 cp->regs = regs; 1921 cp->regs = regs;
1914 1922
1915 cp_stop_hw(cp); 1923 cp_stop_hw(cp);
@@ -1937,14 +1945,12 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1937 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 1945 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1938 NETIF_F_HIGHDMA; 1946 NETIF_F_HIGHDMA;
1939 1947
1940 dev->irq = pdev->irq;
1941
1942 rc = register_netdev(dev); 1948 rc = register_netdev(dev);
1943 if (rc) 1949 if (rc)
1944 goto err_out_iomap; 1950 goto err_out_iomap;
1945 1951
1946 netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n", 1952 netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n",
1947 dev->base_addr, dev->dev_addr, dev->irq); 1953 regs, dev->dev_addr, pdev->irq);
1948 1954
1949 pci_set_drvdata(pdev, dev); 1955 pci_set_drvdata(pdev, dev);
1950 1956
@@ -2031,6 +2037,7 @@ static int cp_resume (struct pci_dev *pdev)
2031 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */ 2037 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2032 cp_init_rings_index (cp); 2038 cp_init_rings_index (cp);
2033 cp_init_hw (cp); 2039 cp_init_hw (cp);
2040 cp_enable_irq(cp);
2034 netif_start_queue (dev); 2041 netif_start_queue (dev);
2035 2042
2036 spin_lock_irqsave (&cp->lock, flags); 2043 spin_lock_irqsave (&cp->lock, flags);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index df7fd8d083dc..03df076ed596 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -148,9 +148,9 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
148 148
149/* Whether to use MMIO or PIO. Default to MMIO. */ 149/* Whether to use MMIO or PIO. Default to MMIO. */
150#ifdef CONFIG_8139TOO_PIO 150#ifdef CONFIG_8139TOO_PIO
151static int use_io = 1; 151static bool use_io = true;
152#else 152#else
153static int use_io = 0; 153static bool use_io = false;
154#endif 154#endif
155 155
156/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). 156/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
@@ -620,7 +620,7 @@ MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver");
620MODULE_LICENSE("GPL"); 620MODULE_LICENSE("GPL");
621MODULE_VERSION(DRV_VERSION); 621MODULE_VERSION(DRV_VERSION);
622 622
623module_param(use_io, int, 0); 623module_param(use_io, bool, 0);
624MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO"); 624MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO");
625module_param(multicast_filter_limit, int, 0); 625module_param(multicast_filter_limit, int, 0);
626module_param_array(media, int, NULL, 0); 626module_param_array(media, int, NULL, 0);
@@ -750,15 +750,22 @@ static void rtl8139_chip_reset (void __iomem *ioaddr)
750 750
751static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev) 751static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev)
752{ 752{
753 struct device *d = &pdev->dev;
753 void __iomem *ioaddr; 754 void __iomem *ioaddr;
754 struct net_device *dev; 755 struct net_device *dev;
755 struct rtl8139_private *tp; 756 struct rtl8139_private *tp;
756 u8 tmp8; 757 u8 tmp8;
757 int rc, disable_dev_on_err = 0; 758 int rc, disable_dev_on_err = 0;
758 unsigned int i; 759 unsigned int i, bar;
759 unsigned long pio_start, pio_end, pio_flags, pio_len; 760 unsigned long io_len;
760 unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
761 u32 version; 761 u32 version;
762 static const struct {
763 unsigned long mask;
764 char *type;
765 } res[] = {
766 { IORESOURCE_IO, "PIO" },
767 { IORESOURCE_MEM, "MMIO" }
768 };
762 769
763 assert (pdev != NULL); 770 assert (pdev != NULL);
764 771
@@ -777,78 +784,45 @@ static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev)
777 if (rc) 784 if (rc)
778 goto err_out; 785 goto err_out;
779 786
780 pio_start = pci_resource_start (pdev, 0);
781 pio_end = pci_resource_end (pdev, 0);
782 pio_flags = pci_resource_flags (pdev, 0);
783 pio_len = pci_resource_len (pdev, 0);
784
785 mmio_start = pci_resource_start (pdev, 1);
786 mmio_end = pci_resource_end (pdev, 1);
787 mmio_flags = pci_resource_flags (pdev, 1);
788 mmio_len = pci_resource_len (pdev, 1);
789
790 /* set this immediately, we need to know before
791 * we talk to the chip directly */
792 pr_debug("PIO region size == 0x%02lX\n", pio_len);
793 pr_debug("MMIO region size == 0x%02lX\n", mmio_len);
794
795retry:
796 if (use_io) {
797 /* make sure PCI base addr 0 is PIO */
798 if (!(pio_flags & IORESOURCE_IO)) {
799 dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n");
800 rc = -ENODEV;
801 goto err_out;
802 }
803 /* check for weird/broken PCI region reporting */
804 if (pio_len < RTL_MIN_IO_SIZE) {
805 dev_err(&pdev->dev, "Invalid PCI I/O region size(s), aborting\n");
806 rc = -ENODEV;
807 goto err_out;
808 }
809 } else {
810 /* make sure PCI base addr 1 is MMIO */
811 if (!(mmio_flags & IORESOURCE_MEM)) {
812 dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
813 rc = -ENODEV;
814 goto err_out;
815 }
816 if (mmio_len < RTL_MIN_IO_SIZE) {
817 dev_err(&pdev->dev, "Invalid PCI mem region size(s), aborting\n");
818 rc = -ENODEV;
819 goto err_out;
820 }
821 }
822
823 rc = pci_request_regions (pdev, DRV_NAME); 787 rc = pci_request_regions (pdev, DRV_NAME);
824 if (rc) 788 if (rc)
825 goto err_out; 789 goto err_out;
826 disable_dev_on_err = 1; 790 disable_dev_on_err = 1;
827 791
828 /* enable PCI bus-mastering */
829 pci_set_master (pdev); 792 pci_set_master (pdev);
830 793
831 if (use_io) { 794retry:
832 ioaddr = pci_iomap(pdev, 0, 0); 795 /* PIO bar register comes first. */
833 if (!ioaddr) { 796 bar = !use_io;
834 dev_err(&pdev->dev, "cannot map PIO, aborting\n"); 797
835 rc = -EIO; 798 io_len = pci_resource_len(pdev, bar);
836 goto err_out; 799
837 } 800 dev_dbg(d, "%s region size = 0x%02lX\n", res[bar].type, io_len);
838 dev->base_addr = pio_start; 801
839 tp->regs_len = pio_len; 802 if (!(pci_resource_flags(pdev, bar) & res[bar].mask)) {
840 } else { 803 dev_err(d, "region #%d not a %s resource, aborting\n", bar,
841 /* ioremap MMIO region */ 804 res[bar].type);
842 ioaddr = pci_iomap(pdev, 1, 0); 805 rc = -ENODEV;
843 if (ioaddr == NULL) { 806 goto err_out;
844 dev_err(&pdev->dev, "cannot remap MMIO, trying PIO\n"); 807 }
845 pci_release_regions(pdev); 808 if (io_len < RTL_MIN_IO_SIZE) {
846 use_io = 1; 809 dev_err(d, "Invalid PCI %s region size(s), aborting\n",
810 res[bar].type);
811 rc = -ENODEV;
812 goto err_out;
813 }
814
815 ioaddr = pci_iomap(pdev, bar, 0);
816 if (!ioaddr) {
817 dev_err(d, "cannot map %s\n", res[bar].type);
818 if (!use_io) {
819 use_io = true;
847 goto retry; 820 goto retry;
848 } 821 }
849 dev->base_addr = (long) ioaddr; 822 rc = -ENODEV;
850 tp->regs_len = mmio_len; 823 goto err_out;
851 } 824 }
825 tp->regs_len = io_len;
852 tp->mmio_addr = ioaddr; 826 tp->mmio_addr = ioaddr;
853 827
854 /* Bring old chips out of low-power mode. */ 828 /* Bring old chips out of low-power mode. */
@@ -1035,8 +1009,6 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
1035 dev->hw_features |= NETIF_F_RXALL; 1009 dev->hw_features |= NETIF_F_RXALL;
1036 dev->hw_features |= NETIF_F_RXFCS; 1010 dev->hw_features |= NETIF_F_RXFCS;
1037 1011
1038 dev->irq = pdev->irq;
1039
1040 /* tp zeroed and aligned in alloc_etherdev */ 1012 /* tp zeroed and aligned in alloc_etherdev */
1041 tp = netdev_priv(dev); 1013 tp = netdev_priv(dev);
1042 1014
@@ -1062,9 +1034,9 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
1062 1034
1063 pci_set_drvdata (pdev, dev); 1035 pci_set_drvdata (pdev, dev);
1064 1036
1065 netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n", 1037 netdev_info(dev, "%s at 0x%p, %pM, IRQ %d\n",
1066 board_info[ent->driver_data].name, 1038 board_info[ent->driver_data].name,
1067 dev->base_addr, dev->dev_addr, dev->irq); 1039 ioaddr, dev->dev_addr, pdev->irq);
1068 1040
1069 netdev_dbg(dev, "Identified 8139 chip type '%s'\n", 1041 netdev_dbg(dev, "Identified 8139 chip type '%s'\n",
1070 rtl_chip_info[tp->chipset].name); 1042 rtl_chip_info[tp->chipset].name);
@@ -1339,10 +1311,11 @@ static void mdio_write (struct net_device *dev, int phy_id, int location,
1339static int rtl8139_open (struct net_device *dev) 1311static int rtl8139_open (struct net_device *dev)
1340{ 1312{
1341 struct rtl8139_private *tp = netdev_priv(dev); 1313 struct rtl8139_private *tp = netdev_priv(dev);
1342 int retval;
1343 void __iomem *ioaddr = tp->mmio_addr; 1314 void __iomem *ioaddr = tp->mmio_addr;
1315 const int irq = tp->pci_dev->irq;
1316 int retval;
1344 1317
1345 retval = request_irq (dev->irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev); 1318 retval = request_irq(irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev);
1346 if (retval) 1319 if (retval)
1347 return retval; 1320 return retval;
1348 1321
@@ -1351,7 +1324,7 @@ static int rtl8139_open (struct net_device *dev)
1351 tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, 1324 tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN,
1352 &tp->rx_ring_dma, GFP_KERNEL); 1325 &tp->rx_ring_dma, GFP_KERNEL);
1353 if (tp->tx_bufs == NULL || tp->rx_ring == NULL) { 1326 if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
1354 free_irq(dev->irq, dev); 1327 free_irq(irq, dev);
1355 1328
1356 if (tp->tx_bufs) 1329 if (tp->tx_bufs)
1357 dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, 1330 dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN,
@@ -1377,7 +1350,7 @@ static int rtl8139_open (struct net_device *dev)
1377 "%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n", 1350 "%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n",
1378 __func__, 1351 __func__,
1379 (unsigned long long)pci_resource_start (tp->pci_dev, 1), 1352 (unsigned long long)pci_resource_start (tp->pci_dev, 1),
1380 dev->irq, RTL_R8 (MediaStatus), 1353 irq, RTL_R8 (MediaStatus),
1381 tp->mii.full_duplex ? "full" : "half"); 1354 tp->mii.full_duplex ? "full" : "half");
1382 1355
1383 rtl8139_start_thread(tp); 1356 rtl8139_start_thread(tp);
@@ -2240,9 +2213,12 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance)
2240 */ 2213 */
2241static void rtl8139_poll_controller(struct net_device *dev) 2214static void rtl8139_poll_controller(struct net_device *dev)
2242{ 2215{
2243 disable_irq(dev->irq); 2216 struct rtl8139_private *tp = netdev_priv(dev);
2244 rtl8139_interrupt(dev->irq, dev); 2217 const int irq = tp->pci_dev->irq;
2245 enable_irq(dev->irq); 2218
2219 disable_irq(irq);
2220 rtl8139_interrupt(irq, dev);
2221 enable_irq(irq);
2246} 2222}
2247#endif 2223#endif
2248 2224
@@ -2295,7 +2271,7 @@ static int rtl8139_close (struct net_device *dev)
2295 2271
2296 spin_unlock_irqrestore (&tp->lock, flags); 2272 spin_unlock_irqrestore (&tp->lock, flags);
2297 2273
2298 free_irq (dev->irq, dev); 2274 free_irq(tp->pci_dev->irq, dev);
2299 2275
2300 rtl8139_tx_clear (tp); 2276 rtl8139_tx_clear (tp);
2301 2277
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index f54509377efa..4f74b9762c29 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -44,6 +44,8 @@
44#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" 44#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" 45#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" 46#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
47 49
48#ifdef RTL8169_DEBUG 50#ifdef RTL8169_DEBUG
49#define assert(expr) \ 51#define assert(expr) \
@@ -61,8 +63,12 @@
61#define R8169_MSG_DEFAULT \ 63#define R8169_MSG_DEFAULT \
62 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) 64 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
63 65
64#define TX_BUFFS_AVAIL(tp) \ 66#define TX_SLOTS_AVAIL(tp) \
65 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) 67 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
68
69/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
70#define TX_FRAGS_READY_FOR(tp,nr_frags) \
71 (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
66 72
67/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). 73/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
68 The RTL chips use a 64 element hash table based on the Ethernet CRC. */ 74 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
@@ -133,6 +139,8 @@ enum mac_version {
133 RTL_GIGA_MAC_VER_34, 139 RTL_GIGA_MAC_VER_34,
134 RTL_GIGA_MAC_VER_35, 140 RTL_GIGA_MAC_VER_35,
135 RTL_GIGA_MAC_VER_36, 141 RTL_GIGA_MAC_VER_36,
142 RTL_GIGA_MAC_VER_37,
143 RTL_GIGA_MAC_VER_38,
136 RTL_GIGA_MAC_NONE = 0xff, 144 RTL_GIGA_MAC_NONE = 0xff,
137}; 145};
138 146
@@ -245,6 +253,12 @@ static const struct {
245 [RTL_GIGA_MAC_VER_36] = 253 [RTL_GIGA_MAC_VER_36] =
246 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2, 254 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
247 JUMBO_9K, false), 255 JUMBO_9K, false),
256 [RTL_GIGA_MAC_VER_37] =
257 _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
258 JUMBO_1K, true),
259 [RTL_GIGA_MAC_VER_38] =
260 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
261 JUMBO_9K, false),
248}; 262};
249#undef _R 263#undef _R
250 264
@@ -315,6 +329,8 @@ enum rtl_registers {
315 Config0 = 0x51, 329 Config0 = 0x51,
316 Config1 = 0x52, 330 Config1 = 0x52,
317 Config2 = 0x53, 331 Config2 = 0x53,
332#define PME_SIGNAL (1 << 5) /* 8168c and later */
333
318 Config3 = 0x54, 334 Config3 = 0x54,
319 Config4 = 0x55, 335 Config4 = 0x55,
320 Config5 = 0x56, 336 Config5 = 0x56,
@@ -355,6 +371,9 @@ enum rtl8168_8101_registers {
355#define CSIAR_BYTE_ENABLE 0x0f 371#define CSIAR_BYTE_ENABLE 0x0f
356#define CSIAR_BYTE_ENABLE_SHIFT 12 372#define CSIAR_BYTE_ENABLE_SHIFT 12
357#define CSIAR_ADDR_MASK 0x0fff 373#define CSIAR_ADDR_MASK 0x0fff
374#define CSIAR_FUNC_CARD 0x00000000
375#define CSIAR_FUNC_SDIO 0x00010000
376#define CSIAR_FUNC_NIC 0x00020000
358 PMCH = 0x6f, 377 PMCH = 0x6f,
359 EPHYAR = 0x80, 378 EPHYAR = 0x80,
360#define EPHYAR_FLAG 0x80000000 379#define EPHYAR_FLAG 0x80000000
@@ -716,6 +735,11 @@ struct rtl8169_private {
716 void (*disable)(struct rtl8169_private *); 735 void (*disable)(struct rtl8169_private *);
717 } jumbo_ops; 736 } jumbo_ops;
718 737
738 struct csi_ops {
739 void (*write)(void __iomem *, int, int);
740 u32 (*read)(void __iomem *, int);
741 } csi_ops;
742
719 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); 743 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
720 int (*get_settings)(struct net_device *, struct ethtool_cmd *); 744 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
721 void (*phy_reset_enable)(struct rtl8169_private *tp); 745 void (*phy_reset_enable)(struct rtl8169_private *tp);
@@ -768,6 +792,8 @@ MODULE_FIRMWARE(FIRMWARE_8168E_3);
768MODULE_FIRMWARE(FIRMWARE_8105E_1); 792MODULE_FIRMWARE(FIRMWARE_8105E_1);
769MODULE_FIRMWARE(FIRMWARE_8168F_1); 793MODULE_FIRMWARE(FIRMWARE_8168F_1);
770MODULE_FIRMWARE(FIRMWARE_8168F_2); 794MODULE_FIRMWARE(FIRMWARE_8168F_2);
795MODULE_FIRMWARE(FIRMWARE_8402_1);
796MODULE_FIRMWARE(FIRMWARE_8411_1);
771 797
772static void rtl_lock_work(struct rtl8169_private *tp) 798static void rtl_lock_work(struct rtl8169_private *tp)
773{ 799{
@@ -1078,40 +1104,6 @@ static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
1078 return value; 1104 return value;
1079} 1105}
1080 1106
1081static void rtl_csi_write(void __iomem *ioaddr, int addr, int value)
1082{
1083 unsigned int i;
1084
1085 RTL_W32(CSIDR, value);
1086 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
1087 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
1088
1089 for (i = 0; i < 100; i++) {
1090 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
1091 break;
1092 udelay(10);
1093 }
1094}
1095
1096static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
1097{
1098 u32 value = ~0x00;
1099 unsigned int i;
1100
1101 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
1102 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
1103
1104 for (i = 0; i < 100; i++) {
1105 if (RTL_R32(CSIAR) & CSIAR_FLAG) {
1106 value = RTL_R32(CSIDR);
1107 break;
1108 }
1109 udelay(10);
1110 }
1111
1112 return value;
1113}
1114
1115static 1107static
1116void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type) 1108void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type)
1117{ 1109{
@@ -1281,7 +1273,8 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
1281 if (!netif_running(dev)) 1273 if (!netif_running(dev))
1282 return; 1274 return;
1283 1275
1284 if (tp->mac_version == RTL_GIGA_MAC_VER_34) { 1276 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1277 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1285 if (RTL_R8(PHYstatus) & _1000bpsF) { 1278 if (RTL_R8(PHYstatus) & _1000bpsF) {
1286 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, 1279 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1287 0x00000011, ERIAR_EXGMAC); 1280 0x00000011, ERIAR_EXGMAC);
@@ -1316,6 +1309,16 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
1316 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, 1309 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1317 0x0000003f, ERIAR_EXGMAC); 1310 0x0000003f, ERIAR_EXGMAC);
1318 } 1311 }
1312 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1313 if (RTL_R8(PHYstatus) & _10bps) {
1314 rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
1315 0x4d02, ERIAR_EXGMAC);
1316 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_0011,
1317 0x0060, ERIAR_EXGMAC);
1318 } else {
1319 rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
1320 0x0000, ERIAR_EXGMAC);
1321 }
1319 } 1322 }
1320} 1323}
1321 1324
@@ -1396,7 +1399,6 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1396 u16 reg; 1399 u16 reg;
1397 u8 mask; 1400 u8 mask;
1398 } cfg[] = { 1401 } cfg[] = {
1399 { WAKE_ANY, Config1, PMEnable },
1400 { WAKE_PHY, Config3, LinkUp }, 1402 { WAKE_PHY, Config3, LinkUp },
1401 { WAKE_MAGIC, Config3, MagicPacket }, 1403 { WAKE_MAGIC, Config3, MagicPacket },
1402 { WAKE_UCAST, Config5, UWF }, 1404 { WAKE_UCAST, Config5, UWF },
@@ -1404,16 +1406,32 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1404 { WAKE_MCAST, Config5, MWF }, 1406 { WAKE_MCAST, Config5, MWF },
1405 { WAKE_ANY, Config5, LanWake } 1407 { WAKE_ANY, Config5, LanWake }
1406 }; 1408 };
1409 u8 options;
1407 1410
1408 RTL_W8(Cfg9346, Cfg9346_Unlock); 1411 RTL_W8(Cfg9346, Cfg9346_Unlock);
1409 1412
1410 for (i = 0; i < ARRAY_SIZE(cfg); i++) { 1413 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1411 u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; 1414 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1412 if (wolopts & cfg[i].opt) 1415 if (wolopts & cfg[i].opt)
1413 options |= cfg[i].mask; 1416 options |= cfg[i].mask;
1414 RTL_W8(cfg[i].reg, options); 1417 RTL_W8(cfg[i].reg, options);
1415 } 1418 }
1416 1419
1420 switch (tp->mac_version) {
1421 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1422 options = RTL_R8(Config1) & ~PMEnable;
1423 if (wolopts)
1424 options |= PMEnable;
1425 RTL_W8(Config1, options);
1426 break;
1427 default:
1428 options = RTL_R8(Config2) & ~PME_SIGNAL;
1429 if (wolopts)
1430 options |= PME_SIGNAL;
1431 RTL_W8(Config2, options);
1432 break;
1433 }
1434
1417 RTL_W8(Cfg9346, Cfg9346_Lock); 1435 RTL_W8(Cfg9346, Cfg9346_Lock);
1418} 1436}
1419 1437
@@ -1853,6 +1871,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
1853 .get_strings = rtl8169_get_strings, 1871 .get_strings = rtl8169_get_strings,
1854 .get_sset_count = rtl8169_get_sset_count, 1872 .get_sset_count = rtl8169_get_sset_count,
1855 .get_ethtool_stats = rtl8169_get_ethtool_stats, 1873 .get_ethtool_stats = rtl8169_get_ethtool_stats,
1874 .get_ts_info = ethtool_op_get_ts_info,
1856}; 1875};
1857 1876
1858static void rtl8169_get_mac_version(struct rtl8169_private *tp, 1877static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -1876,6 +1895,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1876 int mac_version; 1895 int mac_version;
1877 } mac_info[] = { 1896 } mac_info[] = {
1878 /* 8168F family. */ 1897 /* 8168F family. */
1898 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
1879 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 }, 1899 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
1880 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 }, 1900 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
1881 1901
@@ -1913,6 +1933,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1913 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, 1933 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
1914 1934
1915 /* 8101 family. */ 1935 /* 8101 family. */
1936 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
1916 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 }, 1937 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
1917 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 }, 1938 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
1918 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 }, 1939 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
@@ -3013,6 +3034,28 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3013 rtl_writephy(tp, 0x1f, 0x0000); 3034 rtl_writephy(tp, 0x1f, 0x0000);
3014} 3035}
3015 3036
3037static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3038{
3039 /* For 4-corner performance improve */
3040 rtl_writephy(tp, 0x1f, 0x0005);
3041 rtl_writephy(tp, 0x05, 0x8b80);
3042 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3043 rtl_writephy(tp, 0x1f, 0x0000);
3044
3045 /* PHY auto speed down */
3046 rtl_writephy(tp, 0x1f, 0x0007);
3047 rtl_writephy(tp, 0x1e, 0x002d);
3048 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3049 rtl_writephy(tp, 0x1f, 0x0000);
3050 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3051
3052 /* Improve 10M EEE waveform */
3053 rtl_writephy(tp, 0x1f, 0x0005);
3054 rtl_writephy(tp, 0x05, 0x8b86);
3055 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3056 rtl_writephy(tp, 0x1f, 0x0000);
3057}
3058
3016static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp) 3059static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3017{ 3060{
3018 static const struct phy_reg phy_reg_init[] = { 3061 static const struct phy_reg phy_reg_init[] = {
@@ -3054,24 +3097,7 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3054 3097
3055 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 3098 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3056 3099
3057 /* For 4-corner performance improve */ 3100 rtl8168f_hw_phy_config(tp);
3058 rtl_writephy(tp, 0x1f, 0x0005);
3059 rtl_writephy(tp, 0x05, 0x8b80);
3060 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3061 rtl_writephy(tp, 0x1f, 0x0000);
3062
3063 /* PHY auto speed down */
3064 rtl_writephy(tp, 0x1f, 0x0007);
3065 rtl_writephy(tp, 0x1e, 0x002d);
3066 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3067 rtl_writephy(tp, 0x1f, 0x0000);
3068 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3069
3070 /* Improve 10M EEE waveform */
3071 rtl_writephy(tp, 0x1f, 0x0005);
3072 rtl_writephy(tp, 0x05, 0x8b86);
3073 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3074 rtl_writephy(tp, 0x1f, 0x0000);
3075 3101
3076 /* Improve 2-pair detection performance */ 3102 /* Improve 2-pair detection performance */
3077 rtl_writephy(tp, 0x1f, 0x0005); 3103 rtl_writephy(tp, 0x1f, 0x0005);
@@ -3084,23 +3110,104 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3084{ 3110{
3085 rtl_apply_firmware(tp); 3111 rtl_apply_firmware(tp);
3086 3112
3087 /* For 4-corner performance improve */ 3113 rtl8168f_hw_phy_config(tp);
3114}
3115
3116static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3117{
3118 void __iomem *ioaddr = tp->mmio_addr;
3119 static const struct phy_reg phy_reg_init[] = {
3120 /* Channel estimation fine tune */
3121 { 0x1f, 0x0003 },
3122 { 0x09, 0xa20f },
3123 { 0x1f, 0x0000 },
3124
3125 /* Modify green table for giga & fnet */
3126 { 0x1f, 0x0005 },
3127 { 0x05, 0x8b55 },
3128 { 0x06, 0x0000 },
3129 { 0x05, 0x8b5e },
3130 { 0x06, 0x0000 },
3131 { 0x05, 0x8b67 },
3132 { 0x06, 0x0000 },
3133 { 0x05, 0x8b70 },
3134 { 0x06, 0x0000 },
3135 { 0x1f, 0x0000 },
3136 { 0x1f, 0x0007 },
3137 { 0x1e, 0x0078 },
3138 { 0x17, 0x0000 },
3139 { 0x19, 0x00aa },
3140 { 0x1f, 0x0000 },
3141
3142 /* Modify green table for 10M */
3143 { 0x1f, 0x0005 },
3144 { 0x05, 0x8b79 },
3145 { 0x06, 0xaa00 },
3146 { 0x1f, 0x0000 },
3147
3148 /* Disable hiimpedance detection (RTCT) */
3149 { 0x1f, 0x0003 },
3150 { 0x01, 0x328a },
3151 { 0x1f, 0x0000 }
3152 };
3153
3154
3155 rtl_apply_firmware(tp);
3156
3157 rtl8168f_hw_phy_config(tp);
3158
3159 /* Improve 2-pair detection performance */
3088 rtl_writephy(tp, 0x1f, 0x0005); 3160 rtl_writephy(tp, 0x1f, 0x0005);
3089 rtl_writephy(tp, 0x05, 0x8b80); 3161 rtl_writephy(tp, 0x05, 0x8b85);
3090 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000); 3162 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3091 rtl_writephy(tp, 0x1f, 0x0000); 3163 rtl_writephy(tp, 0x1f, 0x0000);
3092 3164
3093 /* PHY auto speed down */ 3165 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3094 rtl_writephy(tp, 0x1f, 0x0007); 3166
3095 rtl_writephy(tp, 0x1e, 0x002d); 3167 /* Modify green table for giga */
3096 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); 3168 rtl_writephy(tp, 0x1f, 0x0005);
3169 rtl_writephy(tp, 0x05, 0x8b54);
3170 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3171 rtl_writephy(tp, 0x05, 0x8b5d);
3172 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3173 rtl_writephy(tp, 0x05, 0x8a7c);
3174 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3175 rtl_writephy(tp, 0x05, 0x8a7f);
3176 rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
3177 rtl_writephy(tp, 0x05, 0x8a82);
3178 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3179 rtl_writephy(tp, 0x05, 0x8a85);
3180 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3181 rtl_writephy(tp, 0x05, 0x8a88);
3182 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3097 rtl_writephy(tp, 0x1f, 0x0000); 3183 rtl_writephy(tp, 0x1f, 0x0000);
3098 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3099 3184
3100 /* Improve 10M EEE waveform */ 3185 /* uc same-seed solution */
3101 rtl_writephy(tp, 0x1f, 0x0005); 3186 rtl_writephy(tp, 0x1f, 0x0005);
3102 rtl_writephy(tp, 0x05, 0x8b86); 3187 rtl_writephy(tp, 0x05, 0x8b85);
3103 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); 3188 rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
3189 rtl_writephy(tp, 0x1f, 0x0000);
3190
3191 /* eee setting */
3192 rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3193 rtl_writephy(tp, 0x1f, 0x0005);
3194 rtl_writephy(tp, 0x05, 0x8b85);
3195 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3196 rtl_writephy(tp, 0x1f, 0x0004);
3197 rtl_writephy(tp, 0x1f, 0x0007);
3198 rtl_writephy(tp, 0x1e, 0x0020);
3199 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3200 rtl_writephy(tp, 0x1f, 0x0000);
3201 rtl_writephy(tp, 0x0d, 0x0007);
3202 rtl_writephy(tp, 0x0e, 0x003c);
3203 rtl_writephy(tp, 0x0d, 0x4007);
3204 rtl_writephy(tp, 0x0e, 0x0000);
3205 rtl_writephy(tp, 0x0d, 0x0000);
3206
3207 /* Green feature */
3208 rtl_writephy(tp, 0x1f, 0x0003);
3209 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3210 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3104 rtl_writephy(tp, 0x1f, 0x0000); 3211 rtl_writephy(tp, 0x1f, 0x0000);
3105} 3212}
3106 3213
@@ -3147,6 +3254,25 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3147 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 3254 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3148} 3255}
3149 3256
3257static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3258{
3259 void __iomem *ioaddr = tp->mmio_addr;
3260
3261 /* Disable ALDPS before setting firmware */
3262 rtl_writephy(tp, 0x1f, 0x0000);
3263 rtl_writephy(tp, 0x18, 0x0310);
3264 msleep(20);
3265
3266 rtl_apply_firmware(tp);
3267
3268 /* EEE setting */
3269 rtl_eri_write(ioaddr, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3270 rtl_writephy(tp, 0x1f, 0x0004);
3271 rtl_writephy(tp, 0x10, 0x401f);
3272 rtl_writephy(tp, 0x19, 0x7030);
3273 rtl_writephy(tp, 0x1f, 0x0000);
3274}
3275
3150static void rtl_hw_phy_config(struct net_device *dev) 3276static void rtl_hw_phy_config(struct net_device *dev)
3151{ 3277{
3152 struct rtl8169_private *tp = netdev_priv(dev); 3278 struct rtl8169_private *tp = netdev_priv(dev);
@@ -3235,6 +3361,14 @@ static void rtl_hw_phy_config(struct net_device *dev)
3235 rtl8168f_2_hw_phy_config(tp); 3361 rtl8168f_2_hw_phy_config(tp);
3236 break; 3362 break;
3237 3363
3364 case RTL_GIGA_MAC_VER_37:
3365 rtl8402_hw_phy_config(tp);
3366 break;
3367
3368 case RTL_GIGA_MAC_VER_38:
3369 rtl8411_hw_phy_config(tp);
3370 break;
3371
3238 default: 3372 default:
3239 break; 3373 break;
3240 } 3374 }
@@ -3472,6 +3606,8 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3472 case RTL_GIGA_MAC_VER_32: 3606 case RTL_GIGA_MAC_VER_32:
3473 case RTL_GIGA_MAC_VER_33: 3607 case RTL_GIGA_MAC_VER_33:
3474 case RTL_GIGA_MAC_VER_34: 3608 case RTL_GIGA_MAC_VER_34:
3609 case RTL_GIGA_MAC_VER_37:
3610 case RTL_GIGA_MAC_VER_38:
3475 RTL_W32(RxConfig, RTL_R32(RxConfig) | 3611 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3476 AcceptBroadcast | AcceptMulticast | AcceptMyPhys); 3612 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3477 break; 3613 break;
@@ -3507,15 +3643,45 @@ static void r810x_phy_power_up(struct rtl8169_private *tp)
3507 3643
3508static void r810x_pll_power_down(struct rtl8169_private *tp) 3644static void r810x_pll_power_down(struct rtl8169_private *tp)
3509{ 3645{
3646 void __iomem *ioaddr = tp->mmio_addr;
3647
3510 if (rtl_wol_pll_power_down(tp)) 3648 if (rtl_wol_pll_power_down(tp))
3511 return; 3649 return;
3512 3650
3513 r810x_phy_power_down(tp); 3651 r810x_phy_power_down(tp);
3652
3653 switch (tp->mac_version) {
3654 case RTL_GIGA_MAC_VER_07:
3655 case RTL_GIGA_MAC_VER_08:
3656 case RTL_GIGA_MAC_VER_09:
3657 case RTL_GIGA_MAC_VER_10:
3658 case RTL_GIGA_MAC_VER_13:
3659 case RTL_GIGA_MAC_VER_16:
3660 break;
3661 default:
3662 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3663 break;
3664 }
3514} 3665}
3515 3666
3516static void r810x_pll_power_up(struct rtl8169_private *tp) 3667static void r810x_pll_power_up(struct rtl8169_private *tp)
3517{ 3668{
3669 void __iomem *ioaddr = tp->mmio_addr;
3670
3518 r810x_phy_power_up(tp); 3671 r810x_phy_power_up(tp);
3672
3673 switch (tp->mac_version) {
3674 case RTL_GIGA_MAC_VER_07:
3675 case RTL_GIGA_MAC_VER_08:
3676 case RTL_GIGA_MAC_VER_09:
3677 case RTL_GIGA_MAC_VER_10:
3678 case RTL_GIGA_MAC_VER_13:
3679 case RTL_GIGA_MAC_VER_16:
3680 break;
3681 default:
3682 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3683 break;
3684 }
3519} 3685}
3520 3686
3521static void r8168_phy_power_up(struct rtl8169_private *tp) 3687static void r8168_phy_power_up(struct rtl8169_private *tp)
@@ -3619,13 +3785,6 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
3619{ 3785{
3620 void __iomem *ioaddr = tp->mmio_addr; 3786 void __iomem *ioaddr = tp->mmio_addr;
3621 3787
3622 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3623 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3624 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
3625 r8168dp_check_dash(tp)) {
3626 return;
3627 }
3628
3629 switch (tp->mac_version) { 3788 switch (tp->mac_version) {
3630 case RTL_GIGA_MAC_VER_25: 3789 case RTL_GIGA_MAC_VER_25:
3631 case RTL_GIGA_MAC_VER_26: 3790 case RTL_GIGA_MAC_VER_26:
@@ -3670,6 +3829,7 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
3670 case RTL_GIGA_MAC_VER_16: 3829 case RTL_GIGA_MAC_VER_16:
3671 case RTL_GIGA_MAC_VER_29: 3830 case RTL_GIGA_MAC_VER_29:
3672 case RTL_GIGA_MAC_VER_30: 3831 case RTL_GIGA_MAC_VER_30:
3832 case RTL_GIGA_MAC_VER_37:
3673 ops->down = r810x_pll_power_down; 3833 ops->down = r810x_pll_power_down;
3674 ops->up = r810x_pll_power_up; 3834 ops->up = r810x_pll_power_up;
3675 break; 3835 break;
@@ -3694,6 +3854,7 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
3694 case RTL_GIGA_MAC_VER_34: 3854 case RTL_GIGA_MAC_VER_34:
3695 case RTL_GIGA_MAC_VER_35: 3855 case RTL_GIGA_MAC_VER_35:
3696 case RTL_GIGA_MAC_VER_36: 3856 case RTL_GIGA_MAC_VER_36:
3857 case RTL_GIGA_MAC_VER_38:
3697 ops->down = r8168_pll_power_down; 3858 ops->down = r8168_pll_power_down;
3698 ops->up = r8168_pll_power_up; 3859 ops->up = r8168_pll_power_up;
3699 break; 3860 break;
@@ -3979,7 +4140,9 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
3979 udelay(20); 4140 udelay(20);
3980 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 || 4141 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
3981 tp->mac_version == RTL_GIGA_MAC_VER_35 || 4142 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
3982 tp->mac_version == RTL_GIGA_MAC_VER_36) { 4143 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4144 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4145 tp->mac_version == RTL_GIGA_MAC_VER_38) {
3983 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); 4146 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
3984 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY)) 4147 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
3985 udelay(100); 4148 udelay(100);
@@ -4185,22 +4348,141 @@ static void rtl_hw_start_8169(struct net_device *dev)
4185 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); 4348 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4186} 4349}
4187 4350
4188static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits) 4351static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4352{
4353 if (tp->csi_ops.write)
4354 tp->csi_ops.write(tp->mmio_addr, addr, value);
4355}
4356
4357static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4358{
4359 if (tp->csi_ops.read)
4360 return tp->csi_ops.read(tp->mmio_addr, addr);
4361 else
4362 return ~0;
4363}
4364
4365static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
4189{ 4366{
4190 u32 csi; 4367 u32 csi;
4191 4368
4192 csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff; 4369 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4193 rtl_csi_write(ioaddr, 0x070c, csi | bits); 4370 rtl_csi_write(tp, 0x070c, csi | bits);
4371}
4372
4373static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4374{
4375 rtl_csi_access_enable(tp, 0x17000000);
4376}
4377
4378static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4379{
4380 rtl_csi_access_enable(tp, 0x27000000);
4381}
4382
4383static void r8169_csi_write(void __iomem *ioaddr, int addr, int value)
4384{
4385 unsigned int i;
4386
4387 RTL_W32(CSIDR, value);
4388 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4389 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4390
4391 for (i = 0; i < 100; i++) {
4392 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
4393 break;
4394 udelay(10);
4395 }
4396}
4397
4398static u32 r8169_csi_read(void __iomem *ioaddr, int addr)
4399{
4400 u32 value = ~0x00;
4401 unsigned int i;
4402
4403 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4404 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4405
4406 for (i = 0; i < 100; i++) {
4407 if (RTL_R32(CSIAR) & CSIAR_FLAG) {
4408 value = RTL_R32(CSIDR);
4409 break;
4410 }
4411 udelay(10);
4412 }
4413
4414 return value;
4415}
4416
4417static void r8402_csi_write(void __iomem *ioaddr, int addr, int value)
4418{
4419 unsigned int i;
4420
4421 RTL_W32(CSIDR, value);
4422 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4423 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4424 CSIAR_FUNC_NIC);
4425
4426 for (i = 0; i < 100; i++) {
4427 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
4428 break;
4429 udelay(10);
4430 }
4194} 4431}
4195 4432
4196static void rtl_csi_access_enable_1(void __iomem *ioaddr) 4433static u32 r8402_csi_read(void __iomem *ioaddr, int addr)
4197{ 4434{
4198 rtl_csi_access_enable(ioaddr, 0x17000000); 4435 u32 value = ~0x00;
4436 unsigned int i;
4437
4438 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4439 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4440
4441 for (i = 0; i < 100; i++) {
4442 if (RTL_R32(CSIAR) & CSIAR_FLAG) {
4443 value = RTL_R32(CSIDR);
4444 break;
4445 }
4446 udelay(10);
4447 }
4448
4449 return value;
4199} 4450}
4200 4451
4201static void rtl_csi_access_enable_2(void __iomem *ioaddr) 4452static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp)
4202{ 4453{
4203 rtl_csi_access_enable(ioaddr, 0x27000000); 4454 struct csi_ops *ops = &tp->csi_ops;
4455
4456 switch (tp->mac_version) {
4457 case RTL_GIGA_MAC_VER_01:
4458 case RTL_GIGA_MAC_VER_02:
4459 case RTL_GIGA_MAC_VER_03:
4460 case RTL_GIGA_MAC_VER_04:
4461 case RTL_GIGA_MAC_VER_05:
4462 case RTL_GIGA_MAC_VER_06:
4463 case RTL_GIGA_MAC_VER_10:
4464 case RTL_GIGA_MAC_VER_11:
4465 case RTL_GIGA_MAC_VER_12:
4466 case RTL_GIGA_MAC_VER_13:
4467 case RTL_GIGA_MAC_VER_14:
4468 case RTL_GIGA_MAC_VER_15:
4469 case RTL_GIGA_MAC_VER_16:
4470 case RTL_GIGA_MAC_VER_17:
4471 ops->write = NULL;
4472 ops->read = NULL;
4473 break;
4474
4475 case RTL_GIGA_MAC_VER_37:
4476 case RTL_GIGA_MAC_VER_38:
4477 ops->write = r8402_csi_write;
4478 ops->read = r8402_csi_read;
4479 break;
4480
4481 default:
4482 ops->write = r8169_csi_write;
4483 ops->read = r8169_csi_read;
4484 break;
4485 }
4204} 4486}
4205 4487
4206struct ephy_info { 4488struct ephy_info {
@@ -4257,8 +4539,11 @@ static void rtl_enable_clock_request(struct pci_dev *pdev)
4257 PktCntrDisable | \ 4539 PktCntrDisable | \
4258 Mac_dbgo_sel) 4540 Mac_dbgo_sel)
4259 4541
4260static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev) 4542static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4261{ 4543{
4544 void __iomem *ioaddr = tp->mmio_addr;
4545 struct pci_dev *pdev = tp->pci_dev;
4546
4262 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 4547 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4263 4548
4264 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4549 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
@@ -4267,17 +4552,22 @@ static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev)
4267 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); 4552 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4268} 4553}
4269 4554
4270static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev) 4555static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4271{ 4556{
4272 rtl_hw_start_8168bb(ioaddr, pdev); 4557 void __iomem *ioaddr = tp->mmio_addr;
4558
4559 rtl_hw_start_8168bb(tp);
4273 4560
4274 RTL_W8(MaxTxPacketSize, TxPacketMax); 4561 RTL_W8(MaxTxPacketSize, TxPacketMax);
4275 4562
4276 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); 4563 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4277} 4564}
4278 4565
4279static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev) 4566static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4280{ 4567{
4568 void __iomem *ioaddr = tp->mmio_addr;
4569 struct pci_dev *pdev = tp->pci_dev;
4570
4281 RTL_W8(Config1, RTL_R8(Config1) | Speed_down); 4571 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4282 4572
4283 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 4573 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
@@ -4289,8 +4579,9 @@ static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev)
4289 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4579 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4290} 4580}
4291 4581
4292static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev) 4582static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4293{ 4583{
4584 void __iomem *ioaddr = tp->mmio_addr;
4294 static const struct ephy_info e_info_8168cp[] = { 4585 static const struct ephy_info e_info_8168cp[] = {
4295 { 0x01, 0, 0x0001 }, 4586 { 0x01, 0, 0x0001 },
4296 { 0x02, 0x0800, 0x1000 }, 4587 { 0x02, 0x0800, 0x1000 },
@@ -4299,16 +4590,19 @@ static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
4299 { 0x07, 0, 0x2000 } 4590 { 0x07, 0, 0x2000 }
4300 }; 4591 };
4301 4592
4302 rtl_csi_access_enable_2(ioaddr); 4593 rtl_csi_access_enable_2(tp);
4303 4594
4304 rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); 4595 rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4305 4596
4306 __rtl_hw_start_8168cp(ioaddr, pdev); 4597 __rtl_hw_start_8168cp(tp);
4307} 4598}
4308 4599
4309static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev) 4600static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4310{ 4601{
4311 rtl_csi_access_enable_2(ioaddr); 4602 void __iomem *ioaddr = tp->mmio_addr;
4603 struct pci_dev *pdev = tp->pci_dev;
4604
4605 rtl_csi_access_enable_2(tp);
4312 4606
4313 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 4607 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4314 4608
@@ -4317,9 +4611,12 @@ static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev)
4317 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4611 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4318} 4612}
4319 4613
4320static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev) 4614static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4321{ 4615{
4322 rtl_csi_access_enable_2(ioaddr); 4616 void __iomem *ioaddr = tp->mmio_addr;
4617 struct pci_dev *pdev = tp->pci_dev;
4618
4619 rtl_csi_access_enable_2(tp);
4323 4620
4324 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 4621 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4325 4622
@@ -4333,52 +4630,57 @@ static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev)
4333 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4630 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4334} 4631}
4335 4632
4336static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev) 4633static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4337{ 4634{
4635 void __iomem *ioaddr = tp->mmio_addr;
4338 static const struct ephy_info e_info_8168c_1[] = { 4636 static const struct ephy_info e_info_8168c_1[] = {
4339 { 0x02, 0x0800, 0x1000 }, 4637 { 0x02, 0x0800, 0x1000 },
4340 { 0x03, 0, 0x0002 }, 4638 { 0x03, 0, 0x0002 },
4341 { 0x06, 0x0080, 0x0000 } 4639 { 0x06, 0x0080, 0x0000 }
4342 }; 4640 };
4343 4641
4344 rtl_csi_access_enable_2(ioaddr); 4642 rtl_csi_access_enable_2(tp);
4345 4643
4346 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); 4644 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4347 4645
4348 rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1)); 4646 rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4349 4647
4350 __rtl_hw_start_8168cp(ioaddr, pdev); 4648 __rtl_hw_start_8168cp(tp);
4351} 4649}
4352 4650
4353static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev) 4651static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4354{ 4652{
4653 void __iomem *ioaddr = tp->mmio_addr;
4355 static const struct ephy_info e_info_8168c_2[] = { 4654 static const struct ephy_info e_info_8168c_2[] = {
4356 { 0x01, 0, 0x0001 }, 4655 { 0x01, 0, 0x0001 },
4357 { 0x03, 0x0400, 0x0220 } 4656 { 0x03, 0x0400, 0x0220 }
4358 }; 4657 };
4359 4658
4360 rtl_csi_access_enable_2(ioaddr); 4659 rtl_csi_access_enable_2(tp);
4361 4660
4362 rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); 4661 rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4363 4662
4364 __rtl_hw_start_8168cp(ioaddr, pdev); 4663 __rtl_hw_start_8168cp(tp);
4365} 4664}
4366 4665
4367static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev) 4666static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4368{ 4667{
4369 rtl_hw_start_8168c_2(ioaddr, pdev); 4668 rtl_hw_start_8168c_2(tp);
4370} 4669}
4371 4670
4372static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev) 4671static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4373{ 4672{
4374 rtl_csi_access_enable_2(ioaddr); 4673 rtl_csi_access_enable_2(tp);
4375 4674
4376 __rtl_hw_start_8168cp(ioaddr, pdev); 4675 __rtl_hw_start_8168cp(tp);
4377} 4676}
4378 4677
4379static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev) 4678static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4380{ 4679{
4381 rtl_csi_access_enable_2(ioaddr); 4680 void __iomem *ioaddr = tp->mmio_addr;
4681 struct pci_dev *pdev = tp->pci_dev;
4682
4683 rtl_csi_access_enable_2(tp);
4382 4684
4383 rtl_disable_clock_request(pdev); 4685 rtl_disable_clock_request(pdev);
4384 4686
@@ -4389,9 +4691,12 @@ static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
4389 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4691 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4390} 4692}
4391 4693
4392static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev) 4694static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4393{ 4695{
4394 rtl_csi_access_enable_1(ioaddr); 4696 void __iomem *ioaddr = tp->mmio_addr;
4697 struct pci_dev *pdev = tp->pci_dev;
4698
4699 rtl_csi_access_enable_1(tp);
4395 4700
4396 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4701 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4397 4702
@@ -4400,8 +4705,10 @@ static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev)
4400 rtl_disable_clock_request(pdev); 4705 rtl_disable_clock_request(pdev);
4401} 4706}
4402 4707
4403static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev) 4708static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4404{ 4709{
4710 void __iomem *ioaddr = tp->mmio_addr;
4711 struct pci_dev *pdev = tp->pci_dev;
4405 static const struct ephy_info e_info_8168d_4[] = { 4712 static const struct ephy_info e_info_8168d_4[] = {
4406 { 0x0b, ~0, 0x48 }, 4713 { 0x0b, ~0, 0x48 },
4407 { 0x19, 0x20, 0x50 }, 4714 { 0x19, 0x20, 0x50 },
@@ -4409,7 +4716,7 @@ static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
4409 }; 4716 };
4410 int i; 4717 int i;
4411 4718
4412 rtl_csi_access_enable_1(ioaddr); 4719 rtl_csi_access_enable_1(tp);
4413 4720
4414 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4721 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4415 4722
@@ -4426,8 +4733,10 @@ static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
4426 rtl_enable_clock_request(pdev); 4733 rtl_enable_clock_request(pdev);
4427} 4734}
4428 4735
4429static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev) 4736static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4430{ 4737{
4738 void __iomem *ioaddr = tp->mmio_addr;
4739 struct pci_dev *pdev = tp->pci_dev;
4431 static const struct ephy_info e_info_8168e_1[] = { 4740 static const struct ephy_info e_info_8168e_1[] = {
4432 { 0x00, 0x0200, 0x0100 }, 4741 { 0x00, 0x0200, 0x0100 },
4433 { 0x00, 0x0000, 0x0004 }, 4742 { 0x00, 0x0000, 0x0004 },
@@ -4444,7 +4753,7 @@ static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4444 { 0x0a, 0x0000, 0x0040 } 4753 { 0x0a, 0x0000, 0x0040 }
4445 }; 4754 };
4446 4755
4447 rtl_csi_access_enable_2(ioaddr); 4756 rtl_csi_access_enable_2(tp);
4448 4757
4449 rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); 4758 rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
4450 4759
@@ -4461,14 +4770,16 @@ static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4461 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); 4770 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4462} 4771}
4463 4772
4464static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev) 4773static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
4465{ 4774{
4775 void __iomem *ioaddr = tp->mmio_addr;
4776 struct pci_dev *pdev = tp->pci_dev;
4466 static const struct ephy_info e_info_8168e_2[] = { 4777 static const struct ephy_info e_info_8168e_2[] = {
4467 { 0x09, 0x0000, 0x0080 }, 4778 { 0x09, 0x0000, 0x0080 },
4468 { 0x19, 0x0000, 0x0224 } 4779 { 0x19, 0x0000, 0x0224 }
4469 }; 4780 };
4470 4781
4471 rtl_csi_access_enable_1(ioaddr); 4782 rtl_csi_access_enable_1(tp);
4472 4783
4473 rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); 4784 rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
4474 4785
@@ -4499,18 +4810,12 @@ static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev)
4499 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); 4810 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4500} 4811}
4501 4812
4502static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev) 4813static void rtl_hw_start_8168f(struct rtl8169_private *tp)
4503{ 4814{
4504 static const struct ephy_info e_info_8168f_1[] = { 4815 void __iomem *ioaddr = tp->mmio_addr;
4505 { 0x06, 0x00c0, 0x0020 }, 4816 struct pci_dev *pdev = tp->pci_dev;
4506 { 0x08, 0x0001, 0x0002 },
4507 { 0x09, 0x0000, 0x0080 },
4508 { 0x19, 0x0000, 0x0224 }
4509 };
4510
4511 rtl_csi_access_enable_1(ioaddr);
4512 4817
4513 rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); 4818 rtl_csi_access_enable_2(tp);
4514 4819
4515 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4820 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4516 4821
@@ -4524,8 +4829,6 @@ static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev)
4524 rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); 4829 rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4525 rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); 4830 rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
4526 rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC); 4831 rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
4527 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
4528 ERIAR_EXGMAC);
4529 4832
4530 RTL_W8(MaxTxPacketSize, EarlySize); 4833 RTL_W8(MaxTxPacketSize, EarlySize);
4531 4834
@@ -4533,20 +4836,54 @@ static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev)
4533 4836
4534 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); 4837 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
4535 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); 4838 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
4839 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
4840 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
4841 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4842}
4843
4844static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
4845{
4846 void __iomem *ioaddr = tp->mmio_addr;
4847 static const struct ephy_info e_info_8168f_1[] = {
4848 { 0x06, 0x00c0, 0x0020 },
4849 { 0x08, 0x0001, 0x0002 },
4850 { 0x09, 0x0000, 0x0080 },
4851 { 0x19, 0x0000, 0x0224 }
4852 };
4853
4854 rtl_hw_start_8168f(tp);
4855
4856 rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
4857
4858 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
4859 ERIAR_EXGMAC);
4536 4860
4537 /* Adjust EEE LED frequency */ 4861 /* Adjust EEE LED frequency */
4538 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); 4862 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
4863}
4539 4864
4540 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); 4865static void rtl_hw_start_8411(struct rtl8169_private *tp)
4541 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); 4866{
4542 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); 4867 void __iomem *ioaddr = tp->mmio_addr;
4868 static const struct ephy_info e_info_8168f_1[] = {
4869 { 0x06, 0x00c0, 0x0020 },
4870 { 0x0f, 0xffff, 0x5200 },
4871 { 0x1e, 0x0000, 0x4000 },
4872 { 0x19, 0x0000, 0x0224 }
4873 };
4874
4875 rtl_hw_start_8168f(tp);
4876
4877 rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
4878
4879 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000,
4880 ERIAR_EXGMAC);
4543} 4881}
4544 4882
4545static void rtl_hw_start_8168(struct net_device *dev) 4883static void rtl_hw_start_8168(struct net_device *dev)
4546{ 4884{
4547 struct rtl8169_private *tp = netdev_priv(dev); 4885 struct rtl8169_private *tp = netdev_priv(dev);
4548 void __iomem *ioaddr = tp->mmio_addr; 4886 void __iomem *ioaddr = tp->mmio_addr;
4549 struct pci_dev *pdev = tp->pci_dev;
4550 4887
4551 RTL_W8(Cfg9346, Cfg9346_Unlock); 4888 RTL_W8(Cfg9346, Cfg9346_Unlock);
4552 4889
@@ -4577,67 +4914,71 @@ static void rtl_hw_start_8168(struct net_device *dev)
4577 4914
4578 switch (tp->mac_version) { 4915 switch (tp->mac_version) {
4579 case RTL_GIGA_MAC_VER_11: 4916 case RTL_GIGA_MAC_VER_11:
4580 rtl_hw_start_8168bb(ioaddr, pdev); 4917 rtl_hw_start_8168bb(tp);
4581 break; 4918 break;
4582 4919
4583 case RTL_GIGA_MAC_VER_12: 4920 case RTL_GIGA_MAC_VER_12:
4584 case RTL_GIGA_MAC_VER_17: 4921 case RTL_GIGA_MAC_VER_17:
4585 rtl_hw_start_8168bef(ioaddr, pdev); 4922 rtl_hw_start_8168bef(tp);
4586 break; 4923 break;
4587 4924
4588 case RTL_GIGA_MAC_VER_18: 4925 case RTL_GIGA_MAC_VER_18:
4589 rtl_hw_start_8168cp_1(ioaddr, pdev); 4926 rtl_hw_start_8168cp_1(tp);
4590 break; 4927 break;
4591 4928
4592 case RTL_GIGA_MAC_VER_19: 4929 case RTL_GIGA_MAC_VER_19:
4593 rtl_hw_start_8168c_1(ioaddr, pdev); 4930 rtl_hw_start_8168c_1(tp);
4594 break; 4931 break;
4595 4932
4596 case RTL_GIGA_MAC_VER_20: 4933 case RTL_GIGA_MAC_VER_20:
4597 rtl_hw_start_8168c_2(ioaddr, pdev); 4934 rtl_hw_start_8168c_2(tp);
4598 break; 4935 break;
4599 4936
4600 case RTL_GIGA_MAC_VER_21: 4937 case RTL_GIGA_MAC_VER_21:
4601 rtl_hw_start_8168c_3(ioaddr, pdev); 4938 rtl_hw_start_8168c_3(tp);
4602 break; 4939 break;
4603 4940
4604 case RTL_GIGA_MAC_VER_22: 4941 case RTL_GIGA_MAC_VER_22:
4605 rtl_hw_start_8168c_4(ioaddr, pdev); 4942 rtl_hw_start_8168c_4(tp);
4606 break; 4943 break;
4607 4944
4608 case RTL_GIGA_MAC_VER_23: 4945 case RTL_GIGA_MAC_VER_23:
4609 rtl_hw_start_8168cp_2(ioaddr, pdev); 4946 rtl_hw_start_8168cp_2(tp);
4610 break; 4947 break;
4611 4948
4612 case RTL_GIGA_MAC_VER_24: 4949 case RTL_GIGA_MAC_VER_24:
4613 rtl_hw_start_8168cp_3(ioaddr, pdev); 4950 rtl_hw_start_8168cp_3(tp);
4614 break; 4951 break;
4615 4952
4616 case RTL_GIGA_MAC_VER_25: 4953 case RTL_GIGA_MAC_VER_25:
4617 case RTL_GIGA_MAC_VER_26: 4954 case RTL_GIGA_MAC_VER_26:
4618 case RTL_GIGA_MAC_VER_27: 4955 case RTL_GIGA_MAC_VER_27:
4619 rtl_hw_start_8168d(ioaddr, pdev); 4956 rtl_hw_start_8168d(tp);
4620 break; 4957 break;
4621 4958
4622 case RTL_GIGA_MAC_VER_28: 4959 case RTL_GIGA_MAC_VER_28:
4623 rtl_hw_start_8168d_4(ioaddr, pdev); 4960 rtl_hw_start_8168d_4(tp);
4624 break; 4961 break;
4625 4962
4626 case RTL_GIGA_MAC_VER_31: 4963 case RTL_GIGA_MAC_VER_31:
4627 rtl_hw_start_8168dp(ioaddr, pdev); 4964 rtl_hw_start_8168dp(tp);
4628 break; 4965 break;
4629 4966
4630 case RTL_GIGA_MAC_VER_32: 4967 case RTL_GIGA_MAC_VER_32:
4631 case RTL_GIGA_MAC_VER_33: 4968 case RTL_GIGA_MAC_VER_33:
4632 rtl_hw_start_8168e_1(ioaddr, pdev); 4969 rtl_hw_start_8168e_1(tp);
4633 break; 4970 break;
4634 case RTL_GIGA_MAC_VER_34: 4971 case RTL_GIGA_MAC_VER_34:
4635 rtl_hw_start_8168e_2(ioaddr, pdev); 4972 rtl_hw_start_8168e_2(tp);
4636 break; 4973 break;
4637 4974
4638 case RTL_GIGA_MAC_VER_35: 4975 case RTL_GIGA_MAC_VER_35:
4639 case RTL_GIGA_MAC_VER_36: 4976 case RTL_GIGA_MAC_VER_36:
4640 rtl_hw_start_8168f_1(ioaddr, pdev); 4977 rtl_hw_start_8168f_1(tp);
4978 break;
4979
4980 case RTL_GIGA_MAC_VER_38:
4981 rtl_hw_start_8411(tp);
4641 break; 4982 break;
4642 4983
4643 default: 4984 default:
@@ -4664,8 +5005,10 @@ static void rtl_hw_start_8168(struct net_device *dev)
4664 PktCntrDisable | \ 5005 PktCntrDisable | \
4665 Mac_dbgo_sel) 5006 Mac_dbgo_sel)
4666 5007
4667static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) 5008static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
4668{ 5009{
5010 void __iomem *ioaddr = tp->mmio_addr;
5011 struct pci_dev *pdev = tp->pci_dev;
4669 static const struct ephy_info e_info_8102e_1[] = { 5012 static const struct ephy_info e_info_8102e_1[] = {
4670 { 0x01, 0, 0x6e65 }, 5013 { 0x01, 0, 0x6e65 },
4671 { 0x02, 0, 0x091f }, 5014 { 0x02, 0, 0x091f },
@@ -4678,7 +5021,7 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4678 }; 5021 };
4679 u8 cfg1; 5022 u8 cfg1;
4680 5023
4681 rtl_csi_access_enable_2(ioaddr); 5024 rtl_csi_access_enable_2(tp);
4682 5025
4683 RTL_W8(DBG_REG, FIX_NAK_1); 5026 RTL_W8(DBG_REG, FIX_NAK_1);
4684 5027
@@ -4695,9 +5038,12 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4695 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); 5038 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
4696} 5039}
4697 5040
4698static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev) 5041static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
4699{ 5042{
4700 rtl_csi_access_enable_2(ioaddr); 5043 void __iomem *ioaddr = tp->mmio_addr;
5044 struct pci_dev *pdev = tp->pci_dev;
5045
5046 rtl_csi_access_enable_2(tp);
4701 5047
4702 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 5048 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4703 5049
@@ -4705,15 +5051,16 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
4705 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 5051 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4706} 5052}
4707 5053
4708static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) 5054static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
4709{ 5055{
4710 rtl_hw_start_8102e_2(ioaddr, pdev); 5056 rtl_hw_start_8102e_2(tp);
4711 5057
4712 rtl_ephy_write(ioaddr, 0x03, 0xc2f9); 5058 rtl_ephy_write(tp->mmio_addr, 0x03, 0xc2f9);
4713} 5059}
4714 5060
4715static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev) 5061static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
4716{ 5062{
5063 void __iomem *ioaddr = tp->mmio_addr;
4717 static const struct ephy_info e_info_8105e_1[] = { 5064 static const struct ephy_info e_info_8105e_1[] = {
4718 { 0x07, 0, 0x4000 }, 5065 { 0x07, 0, 0x4000 },
4719 { 0x19, 0, 0x0200 }, 5066 { 0x19, 0, 0x0200 },
@@ -4737,12 +5084,44 @@ static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4737 rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); 5084 rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
4738} 5085}
4739 5086
4740static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev) 5087static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
4741{ 5088{
4742 rtl_hw_start_8105e_1(ioaddr, pdev); 5089 void __iomem *ioaddr = tp->mmio_addr;
5090
5091 rtl_hw_start_8105e_1(tp);
4743 rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000); 5092 rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
4744} 5093}
4745 5094
5095static void rtl_hw_start_8402(struct rtl8169_private *tp)
5096{
5097 void __iomem *ioaddr = tp->mmio_addr;
5098 static const struct ephy_info e_info_8402[] = {
5099 { 0x19, 0xffff, 0xff64 },
5100 { 0x1e, 0, 0x4000 }
5101 };
5102
5103 rtl_csi_access_enable_2(tp);
5104
5105 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5106 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5107
5108 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5109 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5110
5111 rtl_ephy_init(ioaddr, e_info_8402, ARRAY_SIZE(e_info_8402));
5112
5113 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5114
5115 rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5116 rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5117 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5118 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5119 rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5120 rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5121 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00,
5122 ERIAR_EXGMAC);
5123}
5124
4746static void rtl_hw_start_8101(struct net_device *dev) 5125static void rtl_hw_start_8101(struct net_device *dev)
4747{ 5126{
4748 struct rtl8169_private *tp = netdev_priv(dev); 5127 struct rtl8169_private *tp = netdev_priv(dev);
@@ -4766,22 +5145,26 @@ static void rtl_hw_start_8101(struct net_device *dev)
4766 5145
4767 switch (tp->mac_version) { 5146 switch (tp->mac_version) {
4768 case RTL_GIGA_MAC_VER_07: 5147 case RTL_GIGA_MAC_VER_07:
4769 rtl_hw_start_8102e_1(ioaddr, pdev); 5148 rtl_hw_start_8102e_1(tp);
4770 break; 5149 break;
4771 5150
4772 case RTL_GIGA_MAC_VER_08: 5151 case RTL_GIGA_MAC_VER_08:
4773 rtl_hw_start_8102e_3(ioaddr, pdev); 5152 rtl_hw_start_8102e_3(tp);
4774 break; 5153 break;
4775 5154
4776 case RTL_GIGA_MAC_VER_09: 5155 case RTL_GIGA_MAC_VER_09:
4777 rtl_hw_start_8102e_2(ioaddr, pdev); 5156 rtl_hw_start_8102e_2(tp);
4778 break; 5157 break;
4779 5158
4780 case RTL_GIGA_MAC_VER_29: 5159 case RTL_GIGA_MAC_VER_29:
4781 rtl_hw_start_8105e_1(ioaddr, pdev); 5160 rtl_hw_start_8105e_1(tp);
4782 break; 5161 break;
4783 case RTL_GIGA_MAC_VER_30: 5162 case RTL_GIGA_MAC_VER_30:
4784 rtl_hw_start_8105e_2(ioaddr, pdev); 5163 rtl_hw_start_8105e_2(tp);
5164 break;
5165
5166 case RTL_GIGA_MAC_VER_37:
5167 rtl_hw_start_8402(tp);
4785 break; 5168 break;
4786 } 5169 }
4787 5170
@@ -5115,7 +5498,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5115 u32 opts[2]; 5498 u32 opts[2];
5116 int frags; 5499 int frags;
5117 5500
5118 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) { 5501 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
5119 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); 5502 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5120 goto err_stop_0; 5503 goto err_stop_0;
5121 } 5504 }
@@ -5169,7 +5552,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5169 5552
5170 mmiowb(); 5553 mmiowb();
5171 5554
5172 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) { 5555 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5173 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must 5556 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5174 * not miss a ring update when it notices a stopped queue. 5557 * not miss a ring update when it notices a stopped queue.
5175 */ 5558 */
@@ -5183,7 +5566,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5183 * can't. 5566 * can't.
5184 */ 5567 */
5185 smp_mb(); 5568 smp_mb();
5186 if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS) 5569 if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
5187 netif_wake_queue(dev); 5570 netif_wake_queue(dev);
5188 } 5571 }
5189 5572
@@ -5306,7 +5689,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5306 */ 5689 */
5307 smp_mb(); 5690 smp_mb();
5308 if (netif_queue_stopped(dev) && 5691 if (netif_queue_stopped(dev) &&
5309 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { 5692 TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5310 netif_wake_queue(dev); 5693 netif_wake_queue(dev);
5311 } 5694 }
5312 /* 5695 /*
@@ -6178,6 +6561,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6178 rtl_init_mdio_ops(tp); 6561 rtl_init_mdio_ops(tp);
6179 rtl_init_pll_power_ops(tp); 6562 rtl_init_pll_power_ops(tp);
6180 rtl_init_jumbo_ops(tp); 6563 rtl_init_jumbo_ops(tp);
6564 rtl_init_csi_ops(tp);
6181 6565
6182 rtl8169_print_mac_version(tp); 6566 rtl8169_print_mac_version(tp);
6183 6567
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 3fb2355af37e..46df3a04030c 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -4,11 +4,11 @@
4 4
5config SH_ETH 5config SH_ETH
6 tristate "Renesas SuperH Ethernet support" 6 tristate "Renesas SuperH Ethernet support"
7 depends on SUPERH && \ 7 depends on (SUPERH || ARCH_SHMOBILE) && \
8 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \ 8 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
9 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \ 9 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
10 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \ 10 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \
11 CPU_SUBTYPE_SH7757) 11 CPU_SUBTYPE_SH7757 || ARCH_R8A7740)
12 select CRC32 12 select CRC32
13 select NET_CORE 13 select NET_CORE
14 select MII 14 select MII
@@ -17,4 +17,5 @@ config SH_ETH
17 ---help--- 17 ---help---
18 Renesas SuperH Ethernet device driver. 18 Renesas SuperH Ethernet device driver.
19 This driver supporting CPUs are: 19 This driver supporting CPUs are:
20 - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763 and SH7757. 20 - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
21 and R8A7740.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index d63e09b29a96..be3c22179161 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -386,6 +386,114 @@ static void sh_eth_reset_hw_crc(struct net_device *ndev)
386 sh_eth_write(ndev, 0x0, CSMR); 386 sh_eth_write(ndev, 0x0, CSMR);
387} 387}
388 388
389#elif defined(CONFIG_ARCH_R8A7740)
390#define SH_ETH_HAS_TSU 1
391static void sh_eth_chip_reset(struct net_device *ndev)
392{
393 struct sh_eth_private *mdp = netdev_priv(ndev);
394 unsigned long mii;
395
396 /* reset device */
397 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
398 mdelay(1);
399
400 switch (mdp->phy_interface) {
401 case PHY_INTERFACE_MODE_GMII:
402 mii = 2;
403 break;
404 case PHY_INTERFACE_MODE_MII:
405 mii = 1;
406 break;
407 case PHY_INTERFACE_MODE_RMII:
408 default:
409 mii = 0;
410 break;
411 }
412 sh_eth_write(ndev, mii, RMII_MII);
413}
414
415static void sh_eth_reset(struct net_device *ndev)
416{
417 int cnt = 100;
418
419 sh_eth_write(ndev, EDSR_ENALL, EDSR);
420 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
421 while (cnt > 0) {
422 if (!(sh_eth_read(ndev, EDMR) & 0x3))
423 break;
424 mdelay(1);
425 cnt--;
426 }
427 if (cnt == 0)
428 printk(KERN_ERR "Device reset fail\n");
429
430 /* Table Init */
431 sh_eth_write(ndev, 0x0, TDLAR);
432 sh_eth_write(ndev, 0x0, TDFAR);
433 sh_eth_write(ndev, 0x0, TDFXR);
434 sh_eth_write(ndev, 0x0, TDFFR);
435 sh_eth_write(ndev, 0x0, RDLAR);
436 sh_eth_write(ndev, 0x0, RDFAR);
437 sh_eth_write(ndev, 0x0, RDFXR);
438 sh_eth_write(ndev, 0x0, RDFFR);
439}
440
441static void sh_eth_set_duplex(struct net_device *ndev)
442{
443 struct sh_eth_private *mdp = netdev_priv(ndev);
444
445 if (mdp->duplex) /* Full */
446 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
447 else /* Half */
448 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
449}
450
451static void sh_eth_set_rate(struct net_device *ndev)
452{
453 struct sh_eth_private *mdp = netdev_priv(ndev);
454
455 switch (mdp->speed) {
456 case 10: /* 10BASE */
457 sh_eth_write(ndev, GECMR_10, GECMR);
458 break;
459 case 100:/* 100BASE */
460 sh_eth_write(ndev, GECMR_100, GECMR);
461 break;
462 case 1000: /* 1000BASE */
463 sh_eth_write(ndev, GECMR_1000, GECMR);
464 break;
465 default:
466 break;
467 }
468}
469
470/* R8A7740 */
471static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
472 .chip_reset = sh_eth_chip_reset,
473 .set_duplex = sh_eth_set_duplex,
474 .set_rate = sh_eth_set_rate,
475
476 .ecsr_value = ECSR_ICD | ECSR_MPD,
477 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
478 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
479
480 .tx_check = EESR_TC1 | EESR_FTC,
481 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
482 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
483 EESR_ECI,
484 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
485 EESR_TFE,
486
487 .apr = 1,
488 .mpr = 1,
489 .tpauser = 1,
490 .bculr = 1,
491 .hw_swap = 1,
492 .no_trimd = 1,
493 .no_ade = 1,
494 .tsu = 1,
495};
496
389#elif defined(CONFIG_CPU_SUBTYPE_SH7619) 497#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
390#define SH_ETH_RESET_DEFAULT 1 498#define SH_ETH_RESET_DEFAULT 1
391static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 499static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
@@ -443,7 +551,7 @@ static void sh_eth_reset(struct net_device *ndev)
443} 551}
444#endif 552#endif
445 553
446#if defined(CONFIG_CPU_SH4) 554#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
447static void sh_eth_set_receive_align(struct sk_buff *skb) 555static void sh_eth_set_receive_align(struct sk_buff *skb)
448{ 556{
449 int reserve; 557 int reserve;
@@ -919,6 +1027,10 @@ static int sh_eth_rx(struct net_device *ndev)
919 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1027 desc_status = edmac_to_cpu(mdp, rxdesc->status);
920 pkt_len = rxdesc->frame_length; 1028 pkt_len = rxdesc->frame_length;
921 1029
1030#if defined(CONFIG_ARCH_R8A7740)
1031 desc_status >>= 16;
1032#endif
1033
922 if (--boguscnt < 0) 1034 if (--boguscnt < 0)
923 break; 1035 break;
924 1036
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 0fa14afce23d..57b8e1fc5d15 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -372,7 +372,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
372}; 372};
373 373
374/* Driver's parameters */ 374/* Driver's parameters */
375#if defined(CONFIG_CPU_SH4) 375#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
376#define SH4_SKB_RX_ALIGN 32 376#define SH4_SKB_RX_ALIGN 32
377#else 377#else
378#define SH2_SH3_SKB_RX_ALIGN 2 378#define SH2_SH3_SKB_RX_ALIGN 2
@@ -381,7 +381,8 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
381/* 381/*
382 * Register's bits 382 * Register's bits
383 */ 383 */
384#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) 384#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) ||\
385 defined(CONFIG_ARCH_R8A7740)
385/* EDSR */ 386/* EDSR */
386enum EDSR_BIT { 387enum EDSR_BIT {
387 EDSR_ENT = 0x01, EDSR_ENR = 0x02, 388 EDSR_ENT = 0x01, EDSR_ENR = 0x02,
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index 1895605abb35..8e9fda0c7aeb 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -937,7 +937,7 @@ static struct net_device_stats *s6gmac_stats(struct net_device *dev)
937 do { 937 do {
938 unsigned long flags; 938 unsigned long flags;
939 spin_lock_irqsave(&pd->lock, flags); 939 spin_lock_irqsave(&pd->lock, flags);
940 for (i = 0; i < sizeof(pd->stats) / sizeof(unsigned long); i++) 940 for (i = 0; i < ARRAY_SIZE(pd->stats); i++)
941 pd->stats[i] = 941 pd->stats[i] =
942 pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1); 942 pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1);
943 s6gmac_stats_collect(pd, &statinf[0][0]); 943 s6gmac_stats_collect(pd, &statinf[0][0]);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 3cbfbffe3f00..b95f2e1b33f0 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -656,25 +656,30 @@ static void efx_stop_datapath(struct efx_nic *efx)
656 struct efx_channel *channel; 656 struct efx_channel *channel;
657 struct efx_tx_queue *tx_queue; 657 struct efx_tx_queue *tx_queue;
658 struct efx_rx_queue *rx_queue; 658 struct efx_rx_queue *rx_queue;
659 struct pci_dev *dev = efx->pci_dev;
659 int rc; 660 int rc;
660 661
661 EFX_ASSERT_RESET_SERIALISED(efx); 662 EFX_ASSERT_RESET_SERIALISED(efx);
662 BUG_ON(efx->port_enabled); 663 BUG_ON(efx->port_enabled);
663 664
664 rc = efx_nic_flush_queues(efx); 665 /* Only perform flush if dma is enabled */
665 if (rc && EFX_WORKAROUND_7803(efx)) { 666 if (dev->is_busmaster) {
666 /* Schedule a reset to recover from the flush failure. The 667 rc = efx_nic_flush_queues(efx);
667 * descriptor caches reference memory we're about to free, 668
668 * but falcon_reconfigure_mac_wrapper() won't reconnect 669 if (rc && EFX_WORKAROUND_7803(efx)) {
669 * the MACs because of the pending reset. */ 670 /* Schedule a reset to recover from the flush failure. The
670 netif_err(efx, drv, efx->net_dev, 671 * descriptor caches reference memory we're about to free,
671 "Resetting to recover from flush failure\n"); 672 * but falcon_reconfigure_mac_wrapper() won't reconnect
672 efx_schedule_reset(efx, RESET_TYPE_ALL); 673 * the MACs because of the pending reset. */
673 } else if (rc) { 674 netif_err(efx, drv, efx->net_dev,
674 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); 675 "Resetting to recover from flush failure\n");
675 } else { 676 efx_schedule_reset(efx, RESET_TYPE_ALL);
676 netif_dbg(efx, drv, efx->net_dev, 677 } else if (rc) {
677 "successfully flushed all queues\n"); 678 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
679 } else {
680 netif_dbg(efx, drv, efx->net_dev,
681 "successfully flushed all queues\n");
682 }
678 } 683 }
679 684
680 efx_for_each_channel(channel, efx) { 685 efx_for_each_channel(channel, efx) {
@@ -1349,7 +1354,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1349 } 1354 }
1350 1355
1351 /* RSS might be usable on VFs even if it is disabled on the PF */ 1356 /* RSS might be usable on VFs even if it is disabled on the PF */
1352 efx->rss_spread = (efx->n_rx_channels > 1 ? 1357 efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ?
1353 efx->n_rx_channels : efx_vf_size(efx)); 1358 efx->n_rx_channels : efx_vf_size(efx));
1354 1359
1355 return 0; 1360 return 0;
@@ -2492,8 +2497,8 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
2492 efx_fini_io(efx); 2497 efx_fini_io(efx);
2493 netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); 2498 netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
2494 2499
2495 pci_set_drvdata(pci_dev, NULL);
2496 efx_fini_struct(efx); 2500 efx_fini_struct(efx);
2501 pci_set_drvdata(pci_dev, NULL);
2497 free_netdev(efx->net_dev); 2502 free_netdev(efx->net_dev);
2498}; 2503};
2499 2504
@@ -2695,6 +2700,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2695 fail2: 2700 fail2:
2696 efx_fini_struct(efx); 2701 efx_fini_struct(efx);
2697 fail1: 2702 fail1:
2703 pci_set_drvdata(pci_dev, NULL);
2698 WARN_ON(rc > 0); 2704 WARN_ON(rc > 0);
2699 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); 2705 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
2700 free_netdev(net_dev); 2706 free_netdev(net_dev);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index f22f45f515a8..03ded364c8da 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -1023,7 +1023,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
1023 return -EINVAL; 1023 return -EINVAL;
1024 1024
1025 /* Is it a default UC or MC filter? */ 1025 /* Is it a default UC or MC filter? */
1026 if (!compare_ether_addr(mac_mask->h_dest, mac_addr_mc_mask) && 1026 if (ether_addr_equal(mac_mask->h_dest, mac_addr_mc_mask) &&
1027 vlan_tag_mask == 0) { 1027 vlan_tag_mask == 0) {
1028 if (is_multicast_ether_addr(mac_entry->h_dest)) 1028 if (is_multicast_ether_addr(mac_entry->h_dest))
1029 rc = efx_filter_set_mc_def(&spec); 1029 rc = efx_filter_set_mc_def(&spec);
@@ -1108,6 +1108,39 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
1108 return 0; 1108 return 0;
1109} 1109}
1110 1110
1111static int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
1112 struct ethtool_eeprom *ee,
1113 u8 *data)
1114{
1115 struct efx_nic *efx = netdev_priv(net_dev);
1116 int ret;
1117
1118 if (!efx->phy_op || !efx->phy_op->get_module_eeprom)
1119 return -EOPNOTSUPP;
1120
1121 mutex_lock(&efx->mac_lock);
1122 ret = efx->phy_op->get_module_eeprom(efx, ee, data);
1123 mutex_unlock(&efx->mac_lock);
1124
1125 return ret;
1126}
1127
1128static int efx_ethtool_get_module_info(struct net_device *net_dev,
1129 struct ethtool_modinfo *modinfo)
1130{
1131 struct efx_nic *efx = netdev_priv(net_dev);
1132 int ret;
1133
1134 if (!efx->phy_op || !efx->phy_op->get_module_info)
1135 return -EOPNOTSUPP;
1136
1137 mutex_lock(&efx->mac_lock);
1138 ret = efx->phy_op->get_module_info(efx, modinfo);
1139 mutex_unlock(&efx->mac_lock);
1140
1141 return ret;
1142}
1143
1111const struct ethtool_ops efx_ethtool_ops = { 1144const struct ethtool_ops efx_ethtool_ops = {
1112 .get_settings = efx_ethtool_get_settings, 1145 .get_settings = efx_ethtool_get_settings,
1113 .set_settings = efx_ethtool_set_settings, 1146 .set_settings = efx_ethtool_set_settings,
@@ -1137,4 +1170,6 @@ const struct ethtool_ops efx_ethtool_ops = {
1137 .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, 1170 .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
1138 .get_rxfh_indir = efx_ethtool_get_rxfh_indir, 1171 .get_rxfh_indir = efx_ethtool_get_rxfh_indir,
1139 .set_rxfh_indir = efx_ethtool_set_rxfh_indir, 1172 .set_rxfh_indir = efx_ethtool_set_rxfh_indir,
1173 .get_module_info = efx_ethtool_get_module_info,
1174 .get_module_eeprom = efx_ethtool_get_module_eeprom,
1140}; 1175};
diff --git a/drivers/net/ethernet/sfc/mcdi_phy.c b/drivers/net/ethernet/sfc/mcdi_phy.c
index 7bcad899a936..13cb40fe90c1 100644
--- a/drivers/net/ethernet/sfc/mcdi_phy.c
+++ b/drivers/net/ethernet/sfc/mcdi_phy.c
@@ -739,6 +739,80 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
739 return NULL; 739 return NULL;
740} 740}
741 741
742#define SFP_PAGE_SIZE 128
743#define SFP_NUM_PAGES 2
744static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
745 struct ethtool_eeprom *ee, u8 *data)
746{
747 u8 outbuf[MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX];
748 u8 inbuf[MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN];
749 size_t outlen;
750 int rc;
751 unsigned int payload_len;
752 unsigned int space_remaining = ee->len;
753 unsigned int page;
754 unsigned int page_off;
755 unsigned int to_copy;
756 u8 *user_data = data;
757
758 BUILD_BUG_ON(SFP_PAGE_SIZE * SFP_NUM_PAGES != ETH_MODULE_SFF_8079_LEN);
759
760 page_off = ee->offset % SFP_PAGE_SIZE;
761 page = ee->offset / SFP_PAGE_SIZE;
762
763 while (space_remaining && (page < SFP_NUM_PAGES)) {
764 MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page);
765
766 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_MEDIA_INFO,
767 inbuf, sizeof(inbuf),
768 outbuf, sizeof(outbuf),
769 &outlen);
770 if (rc)
771 return rc;
772
773 if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST +
774 SFP_PAGE_SIZE))
775 return -EIO;
776
777 payload_len = MCDI_DWORD(outbuf,
778 GET_PHY_MEDIA_INFO_OUT_DATALEN);
779 if (payload_len != SFP_PAGE_SIZE)
780 return -EIO;
781
782 /* Copy as much as we can into data */
783 payload_len -= page_off;
784 to_copy = (space_remaining < payload_len) ?
785 space_remaining : payload_len;
786
787 memcpy(user_data,
788 outbuf + page_off +
789 MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST,
790 to_copy);
791
792 space_remaining -= to_copy;
793 user_data += to_copy;
794 page_off = 0;
795 page++;
796 }
797
798 return 0;
799}
800
801static int efx_mcdi_phy_get_module_info(struct efx_nic *efx,
802 struct ethtool_modinfo *modinfo)
803{
804 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
805
806 switch (phy_cfg->media) {
807 case MC_CMD_MEDIA_SFP_PLUS:
808 modinfo->type = ETH_MODULE_SFF_8079;
809 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
810 return 0;
811 default:
812 return -EOPNOTSUPP;
813 }
814}
815
742const struct efx_phy_operations efx_mcdi_phy_ops = { 816const struct efx_phy_operations efx_mcdi_phy_ops = {
743 .probe = efx_mcdi_phy_probe, 817 .probe = efx_mcdi_phy_probe,
744 .init = efx_port_dummy_op_int, 818 .init = efx_port_dummy_op_int,
@@ -751,4 +825,6 @@ const struct efx_phy_operations efx_mcdi_phy_ops = {
751 .test_alive = efx_mcdi_phy_test_alive, 825 .test_alive = efx_mcdi_phy_test_alive,
752 .run_tests = efx_mcdi_phy_run_tests, 826 .run_tests = efx_mcdi_phy_run_tests,
753 .test_name = efx_mcdi_phy_test_name, 827 .test_name = efx_mcdi_phy_test_name,
828 .get_module_eeprom = efx_mcdi_phy_get_module_eeprom,
829 .get_module_info = efx_mcdi_phy_get_module_info,
754}; 830};
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index f0385e1fb2d8..0e575359af17 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -252,8 +252,6 @@ struct efx_rx_page_state {
252 * @max_fill: RX descriptor maximum fill level (<= ring size) 252 * @max_fill: RX descriptor maximum fill level (<= ring size)
253 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill 253 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
254 * (<= @max_fill) 254 * (<= @max_fill)
255 * @fast_fill_limit: The level to which a fast fill will fill
256 * (@fast_fill_trigger <= @fast_fill_limit <= @max_fill)
257 * @min_fill: RX descriptor minimum non-zero fill level. 255 * @min_fill: RX descriptor minimum non-zero fill level.
258 * This records the minimum fill level observed when a ring 256 * This records the minimum fill level observed when a ring
259 * refill was triggered. 257 * refill was triggered.
@@ -274,7 +272,6 @@ struct efx_rx_queue {
274 int removed_count; 272 int removed_count;
275 unsigned int max_fill; 273 unsigned int max_fill;
276 unsigned int fast_fill_trigger; 274 unsigned int fast_fill_trigger;
277 unsigned int fast_fill_limit;
278 unsigned int min_fill; 275 unsigned int min_fill;
279 unsigned int min_overfill; 276 unsigned int min_overfill;
280 unsigned int alloc_page_count; 277 unsigned int alloc_page_count;
@@ -522,6 +519,11 @@ struct efx_phy_operations {
522 int (*test_alive) (struct efx_nic *efx); 519 int (*test_alive) (struct efx_nic *efx);
523 const char *(*test_name) (struct efx_nic *efx, unsigned int index); 520 const char *(*test_name) (struct efx_nic *efx, unsigned int index);
524 int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); 521 int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
522 int (*get_module_eeprom) (struct efx_nic *efx,
523 struct ethtool_eeprom *ee,
524 u8 *data);
525 int (*get_module_info) (struct efx_nic *efx,
526 struct ethtool_modinfo *modinfo);
525}; 527};
526 528
527/** 529/**
diff --git a/drivers/net/ethernet/sfc/qt202x_phy.c b/drivers/net/ethernet/sfc/qt202x_phy.c
index 8a7caf88ffb6..326a28637f3c 100644
--- a/drivers/net/ethernet/sfc/qt202x_phy.c
+++ b/drivers/net/ethernet/sfc/qt202x_phy.c
@@ -449,6 +449,37 @@ static void qt202x_phy_remove(struct efx_nic *efx)
449 efx->phy_data = NULL; 449 efx->phy_data = NULL;
450} 450}
451 451
452static int qt202x_phy_get_module_info(struct efx_nic *efx,
453 struct ethtool_modinfo *modinfo)
454{
455 modinfo->type = ETH_MODULE_SFF_8079;
456 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
457 return 0;
458}
459
460static int qt202x_phy_get_module_eeprom(struct efx_nic *efx,
461 struct ethtool_eeprom *ee, u8 *data)
462{
463 int mmd, reg_base, rc, i;
464
465 if (efx->phy_type == PHY_TYPE_QT2025C) {
466 mmd = MDIO_MMD_PCS;
467 reg_base = 0xd000;
468 } else {
469 mmd = MDIO_MMD_PMAPMD;
470 reg_base = 0x8007;
471 }
472
473 for (i = 0; i < ee->len; i++) {
474 rc = efx_mdio_read(efx, mmd, reg_base + ee->offset + i);
475 if (rc < 0)
476 return rc;
477 data[i] = rc;
478 }
479
480 return 0;
481}
482
452const struct efx_phy_operations falcon_qt202x_phy_ops = { 483const struct efx_phy_operations falcon_qt202x_phy_ops = {
453 .probe = qt202x_phy_probe, 484 .probe = qt202x_phy_probe,
454 .init = qt202x_phy_init, 485 .init = qt202x_phy_init,
@@ -459,4 +490,6 @@ const struct efx_phy_operations falcon_qt202x_phy_ops = {
459 .get_settings = qt202x_phy_get_settings, 490 .get_settings = qt202x_phy_get_settings,
460 .set_settings = efx_mdio_set_settings, 491 .set_settings = efx_mdio_set_settings,
461 .test_alive = efx_mdio_test_alive, 492 .test_alive = efx_mdio_test_alive,
493 .get_module_eeprom = qt202x_phy_get_module_eeprom,
494 .get_module_info = qt202x_phy_get_module_info,
462}; 495};
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 763fa2fe1a38..243e91f3dff9 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -76,12 +76,7 @@ static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
76/* This is the percentage fill level below which new RX descriptors 76/* This is the percentage fill level below which new RX descriptors
77 * will be added to the RX descriptor ring. 77 * will be added to the RX descriptor ring.
78 */ 78 */
79static unsigned int rx_refill_threshold = 90; 79static unsigned int rx_refill_threshold;
80
81/* This is the percentage fill level to which an RX queue will be refilled
82 * when the "RX refill threshold" is reached.
83 */
84static unsigned int rx_refill_limit = 95;
85 80
86/* 81/*
87 * RX maximum head room required. 82 * RX maximum head room required.
@@ -342,7 +337,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
342 * efx_fast_push_rx_descriptors - push new RX descriptors quickly 337 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
343 * @rx_queue: RX descriptor queue 338 * @rx_queue: RX descriptor queue
344 * This will aim to fill the RX descriptor queue up to 339 * This will aim to fill the RX descriptor queue up to
345 * @rx_queue->@fast_fill_limit. If there is insufficient atomic 340 * @rx_queue->@max_fill. If there is insufficient atomic
346 * memory to do so, a slow fill will be scheduled. 341 * memory to do so, a slow fill will be scheduled.
347 * 342 *
348 * The caller must provide serialisation (none is used here). In practise, 343 * The caller must provide serialisation (none is used here). In practise,
@@ -367,15 +362,14 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
367 rx_queue->min_fill = fill_level; 362 rx_queue->min_fill = fill_level;
368 } 363 }
369 364
370 space = rx_queue->fast_fill_limit - fill_level; 365 space = rx_queue->max_fill - fill_level;
371 if (space < EFX_RX_BATCH) 366 EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH);
372 goto out;
373 367
374 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 368 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
375 "RX queue %d fast-filling descriptor ring from" 369 "RX queue %d fast-filling descriptor ring from"
376 " level %d to level %d using %s allocation\n", 370 " level %d to level %d using %s allocation\n",
377 efx_rx_queue_index(rx_queue), fill_level, 371 efx_rx_queue_index(rx_queue), fill_level,
378 rx_queue->fast_fill_limit, 372 rx_queue->max_fill,
379 channel->rx_alloc_push_pages ? "page" : "skb"); 373 channel->rx_alloc_push_pages ? "page" : "skb");
380 374
381 do { 375 do {
@@ -681,7 +675,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
681void efx_init_rx_queue(struct efx_rx_queue *rx_queue) 675void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
682{ 676{
683 struct efx_nic *efx = rx_queue->efx; 677 struct efx_nic *efx = rx_queue->efx;
684 unsigned int max_fill, trigger, limit; 678 unsigned int max_fill, trigger, max_trigger;
685 679
686 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 680 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
687 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); 681 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
@@ -694,12 +688,17 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
694 688
695 /* Initialise limit fields */ 689 /* Initialise limit fields */
696 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; 690 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
697 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 691 max_trigger = max_fill - EFX_RX_BATCH;
698 limit = max_fill * min(rx_refill_limit, 100U) / 100U; 692 if (rx_refill_threshold != 0) {
693 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
694 if (trigger > max_trigger)
695 trigger = max_trigger;
696 } else {
697 trigger = max_trigger;
698 }
699 699
700 rx_queue->max_fill = max_fill; 700 rx_queue->max_fill = max_fill;
701 rx_queue->fast_fill_trigger = trigger; 701 rx_queue->fast_fill_trigger = trigger;
702 rx_queue->fast_fill_limit = limit;
703 702
704 /* Set up RX descriptor ring */ 703 /* Set up RX descriptor ring */
705 rx_queue->enabled = true; 704 rx_queue->enabled = true;
@@ -746,5 +745,5 @@ MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
746 745
747module_param(rx_refill_threshold, uint, 0444); 746module_param(rx_refill_threshold, uint, 0444);
748MODULE_PARM_DESC(rx_refill_threshold, 747MODULE_PARM_DESC(rx_refill_threshold,
749 "RX descriptor ring fast/slow fill threshold (%)"); 748 "RX descriptor ring refill threshold (%)");
750 749
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index a284d6440538..32e55664df6e 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -39,9 +39,7 @@
39#define SC92031_NAME "sc92031" 39#define SC92031_NAME "sc92031"
40 40
41/* BAR 0 is MMIO, BAR 1 is PIO */ 41/* BAR 0 is MMIO, BAR 1 is PIO */
42#ifndef SC92031_USE_BAR 42#define SC92031_USE_PIO 0
43#define SC92031_USE_BAR 0
44#endif
45 43
46/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */ 44/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
47static int multicast_filter_limit = 64; 45static int multicast_filter_limit = 64;
@@ -366,7 +364,7 @@ static void sc92031_disable_interrupts(struct net_device *dev)
366 mmiowb(); 364 mmiowb();
367 365
368 /* wait for any concurrent interrupt/tasklet to finish */ 366 /* wait for any concurrent interrupt/tasklet to finish */
369 synchronize_irq(dev->irq); 367 synchronize_irq(priv->pdev->irq);
370 tasklet_disable(&priv->tasklet); 368 tasklet_disable(&priv->tasklet);
371} 369}
372 370
@@ -1114,10 +1112,13 @@ static void sc92031_tx_timeout(struct net_device *dev)
1114#ifdef CONFIG_NET_POLL_CONTROLLER 1112#ifdef CONFIG_NET_POLL_CONTROLLER
1115static void sc92031_poll_controller(struct net_device *dev) 1113static void sc92031_poll_controller(struct net_device *dev)
1116{ 1114{
1117 disable_irq(dev->irq); 1115 struct sc92031_priv *priv = netdev_priv(dev);
1118 if (sc92031_interrupt(dev->irq, dev) != IRQ_NONE) 1116 const int irq = priv->pdev->irq;
1117
1118 disable_irq(irq);
1119 if (sc92031_interrupt(irq, dev) != IRQ_NONE)
1119 sc92031_tasklet((unsigned long)dev); 1120 sc92031_tasklet((unsigned long)dev);
1120 enable_irq(dev->irq); 1121 enable_irq(irq);
1121} 1122}
1122#endif 1123#endif
1123 1124
@@ -1402,7 +1403,6 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
1402 struct net_device *dev; 1403 struct net_device *dev;
1403 struct sc92031_priv *priv; 1404 struct sc92031_priv *priv;
1404 u32 mac0, mac1; 1405 u32 mac0, mac1;
1405 unsigned long base_addr;
1406 1406
1407 err = pci_enable_device(pdev); 1407 err = pci_enable_device(pdev);
1408 if (unlikely(err < 0)) 1408 if (unlikely(err < 0))
@@ -1422,7 +1422,7 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
1422 if (unlikely(err < 0)) 1422 if (unlikely(err < 0))
1423 goto out_request_regions; 1423 goto out_request_regions;
1424 1424
1425 port_base = pci_iomap(pdev, SC92031_USE_BAR, 0); 1425 port_base = pci_iomap(pdev, SC92031_USE_PIO, 0);
1426 if (unlikely(!port_base)) { 1426 if (unlikely(!port_base)) {
1427 err = -EIO; 1427 err = -EIO;
1428 goto out_iomap; 1428 goto out_iomap;
@@ -1437,14 +1437,6 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
1437 pci_set_drvdata(pdev, dev); 1437 pci_set_drvdata(pdev, dev);
1438 SET_NETDEV_DEV(dev, &pdev->dev); 1438 SET_NETDEV_DEV(dev, &pdev->dev);
1439 1439
1440#if SC92031_USE_BAR == 0
1441 dev->mem_start = pci_resource_start(pdev, SC92031_USE_BAR);
1442 dev->mem_end = pci_resource_end(pdev, SC92031_USE_BAR);
1443#elif SC92031_USE_BAR == 1
1444 dev->base_addr = pci_resource_start(pdev, SC92031_USE_BAR);
1445#endif
1446 dev->irq = pdev->irq;
1447
1448 /* faked with skb_copy_and_csum_dev */ 1440 /* faked with skb_copy_and_csum_dev */
1449 dev->features = NETIF_F_SG | NETIF_F_HIGHDMA | 1441 dev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
1450 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1442 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -1478,13 +1470,9 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
1478 if (err < 0) 1470 if (err < 0)
1479 goto out_register_netdev; 1471 goto out_register_netdev;
1480 1472
1481#if SC92031_USE_BAR == 0
1482 base_addr = dev->mem_start;
1483#elif SC92031_USE_BAR == 1
1484 base_addr = dev->base_addr;
1485#endif
1486 printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name, 1473 printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name,
1487 base_addr, dev->dev_addr, dev->irq); 1474 (long)pci_resource_start(pdev, SC92031_USE_PIO), dev->dev_addr,
1475 pdev->irq);
1488 1476
1489 return 0; 1477 return 0;
1490 1478
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index a9deda8eaf63..4613591b43e7 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -729,7 +729,7 @@ static void sis190_tx_interrupt(struct net_device *dev,
729 * The interrupt handler does all of the Rx thread work and cleans up after 729 * The interrupt handler does all of the Rx thread work and cleans up after
730 * the Tx thread. 730 * the Tx thread.
731 */ 731 */
732static irqreturn_t sis190_interrupt(int irq, void *__dev) 732static irqreturn_t sis190_irq(int irq, void *__dev)
733{ 733{
734 struct net_device *dev = __dev; 734 struct net_device *dev = __dev;
735 struct sis190_private *tp = netdev_priv(dev); 735 struct sis190_private *tp = netdev_priv(dev);
@@ -772,11 +772,11 @@ out:
772static void sis190_netpoll(struct net_device *dev) 772static void sis190_netpoll(struct net_device *dev)
773{ 773{
774 struct sis190_private *tp = netdev_priv(dev); 774 struct sis190_private *tp = netdev_priv(dev);
775 struct pci_dev *pdev = tp->pci_dev; 775 const int irq = tp->pci_dev->irq;
776 776
777 disable_irq(pdev->irq); 777 disable_irq(irq);
778 sis190_interrupt(pdev->irq, dev); 778 sis190_irq(irq, dev);
779 enable_irq(pdev->irq); 779 enable_irq(irq);
780} 780}
781#endif 781#endif
782 782
@@ -1085,7 +1085,7 @@ static int sis190_open(struct net_device *dev)
1085 1085
1086 sis190_request_timer(dev); 1086 sis190_request_timer(dev);
1087 1087
1088 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev); 1088 rc = request_irq(pdev->irq, sis190_irq, IRQF_SHARED, dev->name, dev);
1089 if (rc < 0) 1089 if (rc < 0)
1090 goto err_release_timer_2; 1090 goto err_release_timer_2;
1091 1091
@@ -1097,11 +1097,9 @@ err_release_timer_2:
1097 sis190_delete_timer(dev); 1097 sis190_delete_timer(dev);
1098 sis190_rx_clear(tp); 1098 sis190_rx_clear(tp);
1099err_free_rx_1: 1099err_free_rx_1:
1100 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing, 1100 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1101 tp->rx_dma);
1102err_free_tx_0: 1101err_free_tx_0:
1103 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing, 1102 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1104 tp->tx_dma);
1105 goto out; 1103 goto out;
1106} 1104}
1107 1105
@@ -1141,7 +1139,7 @@ static void sis190_down(struct net_device *dev)
1141 1139
1142 spin_unlock_irq(&tp->lock); 1140 spin_unlock_irq(&tp->lock);
1143 1141
1144 synchronize_irq(dev->irq); 1142 synchronize_irq(tp->pci_dev->irq);
1145 1143
1146 if (!poll_locked) 1144 if (!poll_locked)
1147 poll_locked++; 1145 poll_locked++;
@@ -1161,7 +1159,7 @@ static int sis190_close(struct net_device *dev)
1161 1159
1162 sis190_down(dev); 1160 sis190_down(dev);
1163 1161
1164 free_irq(dev->irq, dev); 1162 free_irq(pdev->irq, dev);
1165 1163
1166 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma); 1164 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1167 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma); 1165 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
@@ -1884,8 +1882,6 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
1884 dev->netdev_ops = &sis190_netdev_ops; 1882 dev->netdev_ops = &sis190_netdev_ops;
1885 1883
1886 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops); 1884 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1887 dev->irq = pdev->irq;
1888 dev->base_addr = (unsigned long) 0xdead;
1889 dev->watchdog_timeo = SIS190_TX_TIMEOUT; 1885 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1890 1886
1891 spin_lock_init(&tp->lock); 1887 spin_lock_init(&tp->lock);
@@ -1902,7 +1898,7 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
1902 netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n", 1898 netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
1903 pci_name(pdev), 1899 pci_name(pdev),
1904 sis_chip_info[ent->driver_data].name, 1900 sis_chip_info[ent->driver_data].name,
1905 ioaddr, dev->irq, dev->dev_addr); 1901 ioaddr, pdev->irq, dev->dev_addr);
1906 netdev_info(dev, "%s mode.\n", 1902 netdev_info(dev, "%s mode.\n",
1907 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII"); 1903 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1908 } 1904 }
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 5ccf02e7e3ad..203d9c6ec23a 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -168,6 +168,8 @@ struct sis900_private {
168 unsigned int cur_phy; 168 unsigned int cur_phy;
169 struct mii_if_info mii_info; 169 struct mii_if_info mii_info;
170 170
171 void __iomem *ioaddr;
172
171 struct timer_list timer; /* Link status detection timer. */ 173 struct timer_list timer; /* Link status detection timer. */
172 u8 autong_complete; /* 1: auto-negotiate complete */ 174 u8 autong_complete; /* 1: auto-negotiate complete */
173 175
@@ -201,13 +203,18 @@ MODULE_PARM_DESC(multicast_filter_limit, "SiS 900/7016 maximum number of filtere
201MODULE_PARM_DESC(max_interrupt_work, "SiS 900/7016 maximum events handled per interrupt"); 203MODULE_PARM_DESC(max_interrupt_work, "SiS 900/7016 maximum events handled per interrupt");
202MODULE_PARM_DESC(sis900_debug, "SiS 900/7016 bitmapped debugging message level"); 204MODULE_PARM_DESC(sis900_debug, "SiS 900/7016 bitmapped debugging message level");
203 205
206#define sw32(reg, val) iowrite32(val, ioaddr + (reg))
207#define sw8(reg, val) iowrite8(val, ioaddr + (reg))
208#define sr32(reg) ioread32(ioaddr + (reg))
209#define sr16(reg) ioread16(ioaddr + (reg))
210
204#ifdef CONFIG_NET_POLL_CONTROLLER 211#ifdef CONFIG_NET_POLL_CONTROLLER
205static void sis900_poll(struct net_device *dev); 212static void sis900_poll(struct net_device *dev);
206#endif 213#endif
207static int sis900_open(struct net_device *net_dev); 214static int sis900_open(struct net_device *net_dev);
208static int sis900_mii_probe (struct net_device * net_dev); 215static int sis900_mii_probe (struct net_device * net_dev);
209static void sis900_init_rxfilter (struct net_device * net_dev); 216static void sis900_init_rxfilter (struct net_device * net_dev);
210static u16 read_eeprom(long ioaddr, int location); 217static u16 read_eeprom(void __iomem *ioaddr, int location);
211static int mdio_read(struct net_device *net_dev, int phy_id, int location); 218static int mdio_read(struct net_device *net_dev, int phy_id, int location);
212static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val); 219static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val);
213static void sis900_timer(unsigned long data); 220static void sis900_timer(unsigned long data);
@@ -231,7 +238,7 @@ static u16 sis900_default_phy(struct net_device * net_dev);
231static void sis900_set_capability( struct net_device *net_dev ,struct mii_phy *phy); 238static void sis900_set_capability( struct net_device *net_dev ,struct mii_phy *phy);
232static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr); 239static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr);
233static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr); 240static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr);
234static void sis900_set_mode (long ioaddr, int speed, int duplex); 241static void sis900_set_mode(struct sis900_private *, int speed, int duplex);
235static const struct ethtool_ops sis900_ethtool_ops; 242static const struct ethtool_ops sis900_ethtool_ops;
236 243
237/** 244/**
@@ -246,7 +253,8 @@ static const struct ethtool_ops sis900_ethtool_ops;
246 253
247static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev) 254static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev)
248{ 255{
249 long ioaddr = pci_resource_start(pci_dev, 0); 256 struct sis900_private *sis_priv = netdev_priv(net_dev);
257 void __iomem *ioaddr = sis_priv->ioaddr;
250 u16 signature; 258 u16 signature;
251 int i; 259 int i;
252 260
@@ -325,29 +333,30 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
325static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev, 333static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
326 struct net_device *net_dev) 334 struct net_device *net_dev)
327{ 335{
328 long ioaddr = net_dev->base_addr; 336 struct sis900_private *sis_priv = netdev_priv(net_dev);
337 void __iomem *ioaddr = sis_priv->ioaddr;
329 u32 rfcrSave; 338 u32 rfcrSave;
330 u32 i; 339 u32 i;
331 340
332 rfcrSave = inl(rfcr + ioaddr); 341 rfcrSave = sr32(rfcr);
333 342
334 outl(rfcrSave | RELOAD, ioaddr + cr); 343 sw32(cr, rfcrSave | RELOAD);
335 outl(0, ioaddr + cr); 344 sw32(cr, 0);
336 345
337 /* disable packet filtering before setting filter */ 346 /* disable packet filtering before setting filter */
338 outl(rfcrSave & ~RFEN, rfcr + ioaddr); 347 sw32(rfcr, rfcrSave & ~RFEN);
339 348
340 /* load MAC addr to filter data register */ 349 /* load MAC addr to filter data register */
341 for (i = 0 ; i < 3 ; i++) { 350 for (i = 0 ; i < 3 ; i++) {
342 outl((i << RFADDR_shift), ioaddr + rfcr); 351 sw32(rfcr, (i << RFADDR_shift));
343 *( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr); 352 *( ((u16 *)net_dev->dev_addr) + i) = sr16(rfdr);
344 } 353 }
345 354
346 /* Store MAC Address in perm_addr */ 355 /* Store MAC Address in perm_addr */
347 memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); 356 memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
348 357
349 /* enable packet filtering */ 358 /* enable packet filtering */
350 outl(rfcrSave | RFEN, rfcr + ioaddr); 359 sw32(rfcr, rfcrSave | RFEN);
351 360
352 return 1; 361 return 1;
353} 362}
@@ -371,31 +380,30 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
371static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev, 380static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
372 struct net_device *net_dev) 381 struct net_device *net_dev)
373{ 382{
374 long ioaddr = net_dev->base_addr; 383 struct sis900_private *sis_priv = netdev_priv(net_dev);
375 long ee_addr = ioaddr + mear; 384 void __iomem *ioaddr = sis_priv->ioaddr;
376 u32 waittime = 0; 385 int wait, rc = 0;
377 int i;
378 386
379 outl(EEREQ, ee_addr); 387 sw32(mear, EEREQ);
380 while(waittime < 2000) { 388 for (wait = 0; wait < 2000; wait++) {
381 if(inl(ee_addr) & EEGNT) { 389 if (sr32(mear) & EEGNT) {
390 u16 *mac = (u16 *)net_dev->dev_addr;
391 int i;
382 392
383 /* get MAC address from EEPROM */ 393 /* get MAC address from EEPROM */
384 for (i = 0; i < 3; i++) 394 for (i = 0; i < 3; i++)
385 ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); 395 mac[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
386 396
387 /* Store MAC Address in perm_addr */ 397 /* Store MAC Address in perm_addr */
388 memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); 398 memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
389 399
390 outl(EEDONE, ee_addr); 400 rc = 1;
391 return 1; 401 break;
392 } else {
393 udelay(1);
394 waittime ++;
395 } 402 }
403 udelay(1);
396 } 404 }
397 outl(EEDONE, ee_addr); 405 sw32(mear, EEDONE);
398 return 0; 406 return rc;
399} 407}
400 408
401static const struct net_device_ops sis900_netdev_ops = { 409static const struct net_device_ops sis900_netdev_ops = {
@@ -433,7 +441,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
433 struct pci_dev *dev; 441 struct pci_dev *dev;
434 dma_addr_t ring_dma; 442 dma_addr_t ring_dma;
435 void *ring_space; 443 void *ring_space;
436 long ioaddr; 444 void __iomem *ioaddr;
437 int i, ret; 445 int i, ret;
438 const char *card_name = card_names[pci_id->driver_data]; 446 const char *card_name = card_names[pci_id->driver_data];
439 const char *dev_name = pci_name(pci_dev); 447 const char *dev_name = pci_name(pci_dev);
@@ -464,14 +472,17 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
464 SET_NETDEV_DEV(net_dev, &pci_dev->dev); 472 SET_NETDEV_DEV(net_dev, &pci_dev->dev);
465 473
466 /* We do a request_region() to register /proc/ioports info. */ 474 /* We do a request_region() to register /proc/ioports info. */
467 ioaddr = pci_resource_start(pci_dev, 0);
468 ret = pci_request_regions(pci_dev, "sis900"); 475 ret = pci_request_regions(pci_dev, "sis900");
469 if (ret) 476 if (ret)
470 goto err_out; 477 goto err_out;
471 478
479 /* IO region. */
480 ioaddr = pci_iomap(pci_dev, 0, 0);
481 if (!ioaddr)
482 goto err_out_cleardev;
483
472 sis_priv = netdev_priv(net_dev); 484 sis_priv = netdev_priv(net_dev);
473 net_dev->base_addr = ioaddr; 485 sis_priv->ioaddr = ioaddr;
474 net_dev->irq = pci_dev->irq;
475 sis_priv->pci_dev = pci_dev; 486 sis_priv->pci_dev = pci_dev;
476 spin_lock_init(&sis_priv->lock); 487 spin_lock_init(&sis_priv->lock);
477 488
@@ -480,7 +491,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
480 ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma); 491 ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma);
481 if (!ring_space) { 492 if (!ring_space) {
482 ret = -ENOMEM; 493 ret = -ENOMEM;
483 goto err_out_cleardev; 494 goto err_out_unmap;
484 } 495 }
485 sis_priv->tx_ring = ring_space; 496 sis_priv->tx_ring = ring_space;
486 sis_priv->tx_ring_dma = ring_dma; 497 sis_priv->tx_ring_dma = ring_dma;
@@ -534,7 +545,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
534 545
535 /* 630ET : set the mii access mode as software-mode */ 546 /* 630ET : set the mii access mode as software-mode */
536 if (sis_priv->chipset_rev == SIS630ET_900_REV) 547 if (sis_priv->chipset_rev == SIS630ET_900_REV)
537 outl(ACCESSMODE | inl(ioaddr + cr), ioaddr + cr); 548 sw32(cr, ACCESSMODE | sr32(cr));
538 549
539 /* probe for mii transceiver */ 550 /* probe for mii transceiver */
540 if (sis900_mii_probe(net_dev) == 0) { 551 if (sis900_mii_probe(net_dev) == 0) {
@@ -556,25 +567,27 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
556 goto err_unmap_rx; 567 goto err_unmap_rx;
557 568
558 /* print some information about our NIC */ 569 /* print some information about our NIC */
559 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n", 570 printk(KERN_INFO "%s: %s at 0x%p, IRQ %d, %pM\n",
560 net_dev->name, card_name, ioaddr, net_dev->irq, 571 net_dev->name, card_name, ioaddr, pci_dev->irq,
561 net_dev->dev_addr); 572 net_dev->dev_addr);
562 573
563 /* Detect Wake on Lan support */ 574 /* Detect Wake on Lan support */
564 ret = (inl(net_dev->base_addr + CFGPMC) & PMESP) >> 27; 575 ret = (sr32(CFGPMC) & PMESP) >> 27;
565 if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0) 576 if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0)
566 printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name); 577 printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name);
567 578
568 return 0; 579 return 0;
569 580
570 err_unmap_rx: 581err_unmap_rx:
571 pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring, 582 pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
572 sis_priv->rx_ring_dma); 583 sis_priv->rx_ring_dma);
573 err_unmap_tx: 584err_unmap_tx:
574 pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring, 585 pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
575 sis_priv->tx_ring_dma); 586 sis_priv->tx_ring_dma);
576 err_out_cleardev: 587err_out_unmap:
577 pci_set_drvdata(pci_dev, NULL); 588 pci_iounmap(pci_dev, ioaddr);
589err_out_cleardev:
590 pci_set_drvdata(pci_dev, NULL);
578 pci_release_regions(pci_dev); 591 pci_release_regions(pci_dev);
579 err_out: 592 err_out:
580 free_netdev(net_dev); 593 free_netdev(net_dev);
@@ -798,7 +811,7 @@ static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *ph
798 811
799 812
800/* Delay between EEPROM clock transitions. */ 813/* Delay between EEPROM clock transitions. */
801#define eeprom_delay() inl(ee_addr) 814#define eeprom_delay() sr32(mear)
802 815
803/** 816/**
804 * read_eeprom - Read Serial EEPROM 817 * read_eeprom - Read Serial EEPROM
@@ -809,41 +822,41 @@ static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *ph
809 * Note that location is in word (16 bits) unit 822 * Note that location is in word (16 bits) unit
810 */ 823 */
811 824
812static u16 __devinit read_eeprom(long ioaddr, int location) 825static u16 __devinit read_eeprom(void __iomem *ioaddr, int location)
813{ 826{
827 u32 read_cmd = location | EEread;
814 int i; 828 int i;
815 u16 retval = 0; 829 u16 retval = 0;
816 long ee_addr = ioaddr + mear;
817 u32 read_cmd = location | EEread;
818 830
819 outl(0, ee_addr); 831 sw32(mear, 0);
820 eeprom_delay(); 832 eeprom_delay();
821 outl(EECS, ee_addr); 833 sw32(mear, EECS);
822 eeprom_delay(); 834 eeprom_delay();
823 835
824 /* Shift the read command (9) bits out. */ 836 /* Shift the read command (9) bits out. */
825 for (i = 8; i >= 0; i--) { 837 for (i = 8; i >= 0; i--) {
826 u32 dataval = (read_cmd & (1 << i)) ? EEDI | EECS : EECS; 838 u32 dataval = (read_cmd & (1 << i)) ? EEDI | EECS : EECS;
827 outl(dataval, ee_addr); 839
840 sw32(mear, dataval);
828 eeprom_delay(); 841 eeprom_delay();
829 outl(dataval | EECLK, ee_addr); 842 sw32(mear, dataval | EECLK);
830 eeprom_delay(); 843 eeprom_delay();
831 } 844 }
832 outl(EECS, ee_addr); 845 sw32(mear, EECS);
833 eeprom_delay(); 846 eeprom_delay();
834 847
835 /* read the 16-bits data in */ 848 /* read the 16-bits data in */
836 for (i = 16; i > 0; i--) { 849 for (i = 16; i > 0; i--) {
837 outl(EECS, ee_addr); 850 sw32(mear, EECS);
838 eeprom_delay(); 851 eeprom_delay();
839 outl(EECS | EECLK, ee_addr); 852 sw32(mear, EECS | EECLK);
840 eeprom_delay(); 853 eeprom_delay();
841 retval = (retval << 1) | ((inl(ee_addr) & EEDO) ? 1 : 0); 854 retval = (retval << 1) | ((sr32(mear) & EEDO) ? 1 : 0);
842 eeprom_delay(); 855 eeprom_delay();
843 } 856 }
844 857
845 /* Terminate the EEPROM access. */ 858 /* Terminate the EEPROM access. */
846 outl(0, ee_addr); 859 sw32(mear, 0);
847 eeprom_delay(); 860 eeprom_delay();
848 861
849 return retval; 862 return retval;
@@ -852,24 +865,27 @@ static u16 __devinit read_eeprom(long ioaddr, int location)
852/* Read and write the MII management registers using software-generated 865/* Read and write the MII management registers using software-generated
853 serial MDIO protocol. Note that the command bits and data bits are 866 serial MDIO protocol. Note that the command bits and data bits are
854 send out separately */ 867 send out separately */
855#define mdio_delay() inl(mdio_addr) 868#define mdio_delay() sr32(mear)
856 869
857static void mdio_idle(long mdio_addr) 870static void mdio_idle(struct sis900_private *sp)
858{ 871{
859 outl(MDIO | MDDIR, mdio_addr); 872 void __iomem *ioaddr = sp->ioaddr;
873
874 sw32(mear, MDIO | MDDIR);
860 mdio_delay(); 875 mdio_delay();
861 outl(MDIO | MDDIR | MDC, mdio_addr); 876 sw32(mear, MDIO | MDDIR | MDC);
862} 877}
863 878
864/* Syncronize the MII management interface by shifting 32 one bits out. */ 879/* Synchronize the MII management interface by shifting 32 one bits out. */
865static void mdio_reset(long mdio_addr) 880static void mdio_reset(struct sis900_private *sp)
866{ 881{
882 void __iomem *ioaddr = sp->ioaddr;
867 int i; 883 int i;
868 884
869 for (i = 31; i >= 0; i--) { 885 for (i = 31; i >= 0; i--) {
870 outl(MDDIR | MDIO, mdio_addr); 886 sw32(mear, MDDIR | MDIO);
871 mdio_delay(); 887 mdio_delay();
872 outl(MDDIR | MDIO | MDC, mdio_addr); 888 sw32(mear, MDDIR | MDIO | MDC);
873 mdio_delay(); 889 mdio_delay();
874 } 890 }
875} 891}
@@ -887,31 +903,33 @@ static void mdio_reset(long mdio_addr)
887 903
888static int mdio_read(struct net_device *net_dev, int phy_id, int location) 904static int mdio_read(struct net_device *net_dev, int phy_id, int location)
889{ 905{
890 long mdio_addr = net_dev->base_addr + mear;
891 int mii_cmd = MIIread|(phy_id<<MIIpmdShift)|(location<<MIIregShift); 906 int mii_cmd = MIIread|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
907 struct sis900_private *sp = netdev_priv(net_dev);
908 void __iomem *ioaddr = sp->ioaddr;
892 u16 retval = 0; 909 u16 retval = 0;
893 int i; 910 int i;
894 911
895 mdio_reset(mdio_addr); 912 mdio_reset(sp);
896 mdio_idle(mdio_addr); 913 mdio_idle(sp);
897 914
898 for (i = 15; i >= 0; i--) { 915 for (i = 15; i >= 0; i--) {
899 int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR; 916 int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
900 outl(dataval, mdio_addr); 917
918 sw32(mear, dataval);
901 mdio_delay(); 919 mdio_delay();
902 outl(dataval | MDC, mdio_addr); 920 sw32(mear, dataval | MDC);
903 mdio_delay(); 921 mdio_delay();
904 } 922 }
905 923
906 /* Read the 16 data bits. */ 924 /* Read the 16 data bits. */
907 for (i = 16; i > 0; i--) { 925 for (i = 16; i > 0; i--) {
908 outl(0, mdio_addr); 926 sw32(mear, 0);
909 mdio_delay(); 927 mdio_delay();
910 retval = (retval << 1) | ((inl(mdio_addr) & MDIO) ? 1 : 0); 928 retval = (retval << 1) | ((sr32(mear) & MDIO) ? 1 : 0);
911 outl(MDC, mdio_addr); 929 sw32(mear, MDC);
912 mdio_delay(); 930 mdio_delay();
913 } 931 }
914 outl(0x00, mdio_addr); 932 sw32(mear, 0x00);
915 933
916 return retval; 934 return retval;
917} 935}
@@ -931,19 +949,21 @@ static int mdio_read(struct net_device *net_dev, int phy_id, int location)
931static void mdio_write(struct net_device *net_dev, int phy_id, int location, 949static void mdio_write(struct net_device *net_dev, int phy_id, int location,
932 int value) 950 int value)
933{ 951{
934 long mdio_addr = net_dev->base_addr + mear;
935 int mii_cmd = MIIwrite|(phy_id<<MIIpmdShift)|(location<<MIIregShift); 952 int mii_cmd = MIIwrite|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
953 struct sis900_private *sp = netdev_priv(net_dev);
954 void __iomem *ioaddr = sp->ioaddr;
936 int i; 955 int i;
937 956
938 mdio_reset(mdio_addr); 957 mdio_reset(sp);
939 mdio_idle(mdio_addr); 958 mdio_idle(sp);
940 959
941 /* Shift the command bits out. */ 960 /* Shift the command bits out. */
942 for (i = 15; i >= 0; i--) { 961 for (i = 15; i >= 0; i--) {
943 int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR; 962 int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
944 outb(dataval, mdio_addr); 963
964 sw8(mear, dataval);
945 mdio_delay(); 965 mdio_delay();
946 outb(dataval | MDC, mdio_addr); 966 sw8(mear, dataval | MDC);
947 mdio_delay(); 967 mdio_delay();
948 } 968 }
949 mdio_delay(); 969 mdio_delay();
@@ -951,21 +971,22 @@ static void mdio_write(struct net_device *net_dev, int phy_id, int location,
951 /* Shift the value bits out. */ 971 /* Shift the value bits out. */
952 for (i = 15; i >= 0; i--) { 972 for (i = 15; i >= 0; i--) {
953 int dataval = (value & (1 << i)) ? MDDIR | MDIO : MDDIR; 973 int dataval = (value & (1 << i)) ? MDDIR | MDIO : MDDIR;
954 outl(dataval, mdio_addr); 974
975 sw32(mear, dataval);
955 mdio_delay(); 976 mdio_delay();
956 outl(dataval | MDC, mdio_addr); 977 sw32(mear, dataval | MDC);
957 mdio_delay(); 978 mdio_delay();
958 } 979 }
959 mdio_delay(); 980 mdio_delay();
960 981
961 /* Clear out extra bits. */ 982 /* Clear out extra bits. */
962 for (i = 2; i > 0; i--) { 983 for (i = 2; i > 0; i--) {
963 outb(0, mdio_addr); 984 sw8(mear, 0);
964 mdio_delay(); 985 mdio_delay();
965 outb(MDC, mdio_addr); 986 sw8(mear, MDC);
966 mdio_delay(); 987 mdio_delay();
967 } 988 }
968 outl(0x00, mdio_addr); 989 sw32(mear, 0x00);
969} 990}
970 991
971 992
@@ -1000,9 +1021,12 @@ static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr)
1000*/ 1021*/
1001static void sis900_poll(struct net_device *dev) 1022static void sis900_poll(struct net_device *dev)
1002{ 1023{
1003 disable_irq(dev->irq); 1024 struct sis900_private *sp = netdev_priv(dev);
1004 sis900_interrupt(dev->irq, dev); 1025 const int irq = sp->pci_dev->irq;
1005 enable_irq(dev->irq); 1026
1027 disable_irq(irq);
1028 sis900_interrupt(irq, dev);
1029 enable_irq(irq);
1006} 1030}
1007#endif 1031#endif
1008 1032
@@ -1018,7 +1042,7 @@ static int
1018sis900_open(struct net_device *net_dev) 1042sis900_open(struct net_device *net_dev)
1019{ 1043{
1020 struct sis900_private *sis_priv = netdev_priv(net_dev); 1044 struct sis900_private *sis_priv = netdev_priv(net_dev);
1021 long ioaddr = net_dev->base_addr; 1045 void __iomem *ioaddr = sis_priv->ioaddr;
1022 int ret; 1046 int ret;
1023 1047
1024 /* Soft reset the chip. */ 1048 /* Soft reset the chip. */
@@ -1027,8 +1051,8 @@ sis900_open(struct net_device *net_dev)
1027 /* Equalizer workaround Rule */ 1051 /* Equalizer workaround Rule */
1028 sis630_set_eq(net_dev, sis_priv->chipset_rev); 1052 sis630_set_eq(net_dev, sis_priv->chipset_rev);
1029 1053
1030 ret = request_irq(net_dev->irq, sis900_interrupt, IRQF_SHARED, 1054 ret = request_irq(sis_priv->pci_dev->irq, sis900_interrupt, IRQF_SHARED,
1031 net_dev->name, net_dev); 1055 net_dev->name, net_dev);
1032 if (ret) 1056 if (ret)
1033 return ret; 1057 return ret;
1034 1058
@@ -1042,12 +1066,12 @@ sis900_open(struct net_device *net_dev)
1042 netif_start_queue(net_dev); 1066 netif_start_queue(net_dev);
1043 1067
1044 /* Workaround for EDB */ 1068 /* Workaround for EDB */
1045 sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); 1069 sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
1046 1070
1047 /* Enable all known interrupts by setting the interrupt mask. */ 1071 /* Enable all known interrupts by setting the interrupt mask. */
1048 outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr); 1072 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
1049 outl(RxENA | inl(ioaddr + cr), ioaddr + cr); 1073 sw32(cr, RxENA | sr32(cr));
1050 outl(IE, ioaddr + ier); 1074 sw32(ier, IE);
1051 1075
1052 sis900_check_mode(net_dev, sis_priv->mii); 1076 sis900_check_mode(net_dev, sis_priv->mii);
1053 1077
@@ -1074,31 +1098,30 @@ static void
1074sis900_init_rxfilter (struct net_device * net_dev) 1098sis900_init_rxfilter (struct net_device * net_dev)
1075{ 1099{
1076 struct sis900_private *sis_priv = netdev_priv(net_dev); 1100 struct sis900_private *sis_priv = netdev_priv(net_dev);
1077 long ioaddr = net_dev->base_addr; 1101 void __iomem *ioaddr = sis_priv->ioaddr;
1078 u32 rfcrSave; 1102 u32 rfcrSave;
1079 u32 i; 1103 u32 i;
1080 1104
1081 rfcrSave = inl(rfcr + ioaddr); 1105 rfcrSave = sr32(rfcr);
1082 1106
1083 /* disable packet filtering before setting filter */ 1107 /* disable packet filtering before setting filter */
1084 outl(rfcrSave & ~RFEN, rfcr + ioaddr); 1108 sw32(rfcr, rfcrSave & ~RFEN);
1085 1109
1086 /* load MAC addr to filter data register */ 1110 /* load MAC addr to filter data register */
1087 for (i = 0 ; i < 3 ; i++) { 1111 for (i = 0 ; i < 3 ; i++) {
1088 u32 w; 1112 u32 w = (u32) *((u16 *)(net_dev->dev_addr)+i);
1089 1113
1090 w = (u32) *((u16 *)(net_dev->dev_addr)+i); 1114 sw32(rfcr, i << RFADDR_shift);
1091 outl((i << RFADDR_shift), ioaddr + rfcr); 1115 sw32(rfdr, w);
1092 outl(w, ioaddr + rfdr);
1093 1116
1094 if (netif_msg_hw(sis_priv)) { 1117 if (netif_msg_hw(sis_priv)) {
1095 printk(KERN_DEBUG "%s: Receive Filter Addrss[%d]=%x\n", 1118 printk(KERN_DEBUG "%s: Receive Filter Addrss[%d]=%x\n",
1096 net_dev->name, i, inl(ioaddr + rfdr)); 1119 net_dev->name, i, sr32(rfdr));
1097 } 1120 }
1098 } 1121 }
1099 1122
1100 /* enable packet filtering */ 1123 /* enable packet filtering */
1101 outl(rfcrSave | RFEN, rfcr + ioaddr); 1124 sw32(rfcr, rfcrSave | RFEN);
1102} 1125}
1103 1126
1104/** 1127/**
@@ -1112,7 +1135,7 @@ static void
1112sis900_init_tx_ring(struct net_device *net_dev) 1135sis900_init_tx_ring(struct net_device *net_dev)
1113{ 1136{
1114 struct sis900_private *sis_priv = netdev_priv(net_dev); 1137 struct sis900_private *sis_priv = netdev_priv(net_dev);
1115 long ioaddr = net_dev->base_addr; 1138 void __iomem *ioaddr = sis_priv->ioaddr;
1116 int i; 1139 int i;
1117 1140
1118 sis_priv->tx_full = 0; 1141 sis_priv->tx_full = 0;
@@ -1128,10 +1151,10 @@ sis900_init_tx_ring(struct net_device *net_dev)
1128 } 1151 }
1129 1152
1130 /* load Transmit Descriptor Register */ 1153 /* load Transmit Descriptor Register */
1131 outl(sis_priv->tx_ring_dma, ioaddr + txdp); 1154 sw32(txdp, sis_priv->tx_ring_dma);
1132 if (netif_msg_hw(sis_priv)) 1155 if (netif_msg_hw(sis_priv))
1133 printk(KERN_DEBUG "%s: TX descriptor register loaded with: %8.8x\n", 1156 printk(KERN_DEBUG "%s: TX descriptor register loaded with: %8.8x\n",
1134 net_dev->name, inl(ioaddr + txdp)); 1157 net_dev->name, sr32(txdp));
1135} 1158}
1136 1159
1137/** 1160/**
@@ -1146,7 +1169,7 @@ static void
1146sis900_init_rx_ring(struct net_device *net_dev) 1169sis900_init_rx_ring(struct net_device *net_dev)
1147{ 1170{
1148 struct sis900_private *sis_priv = netdev_priv(net_dev); 1171 struct sis900_private *sis_priv = netdev_priv(net_dev);
1149 long ioaddr = net_dev->base_addr; 1172 void __iomem *ioaddr = sis_priv->ioaddr;
1150 int i; 1173 int i;
1151 1174
1152 sis_priv->cur_rx = 0; 1175 sis_priv->cur_rx = 0;
@@ -1181,10 +1204,10 @@ sis900_init_rx_ring(struct net_device *net_dev)
1181 sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC); 1204 sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC);
1182 1205
1183 /* load Receive Descriptor Register */ 1206 /* load Receive Descriptor Register */
1184 outl(sis_priv->rx_ring_dma, ioaddr + rxdp); 1207 sw32(rxdp, sis_priv->rx_ring_dma);
1185 if (netif_msg_hw(sis_priv)) 1208 if (netif_msg_hw(sis_priv))
1186 printk(KERN_DEBUG "%s: RX descriptor register loaded with: %8.8x\n", 1209 printk(KERN_DEBUG "%s: RX descriptor register loaded with: %8.8x\n",
1187 net_dev->name, inl(ioaddr + rxdp)); 1210 net_dev->name, sr32(rxdp));
1188} 1211}
1189 1212
1190/** 1213/**
@@ -1298,7 +1321,7 @@ static void sis900_timer(unsigned long data)
1298 1321
1299 sis900_read_mode(net_dev, &speed, &duplex); 1322 sis900_read_mode(net_dev, &speed, &duplex);
1300 if (duplex){ 1323 if (duplex){
1301 sis900_set_mode(net_dev->base_addr, speed, duplex); 1324 sis900_set_mode(sis_priv, speed, duplex);
1302 sis630_set_eq(net_dev, sis_priv->chipset_rev); 1325 sis630_set_eq(net_dev, sis_priv->chipset_rev);
1303 netif_start_queue(net_dev); 1326 netif_start_queue(net_dev);
1304 } 1327 }
@@ -1359,25 +1382,25 @@ static void sis900_timer(unsigned long data)
1359static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_phy) 1382static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_phy)
1360{ 1383{
1361 struct sis900_private *sis_priv = netdev_priv(net_dev); 1384 struct sis900_private *sis_priv = netdev_priv(net_dev);
1362 long ioaddr = net_dev->base_addr; 1385 void __iomem *ioaddr = sis_priv->ioaddr;
1363 int speed, duplex; 1386 int speed, duplex;
1364 1387
1365 if (mii_phy->phy_types == LAN) { 1388 if (mii_phy->phy_types == LAN) {
1366 outl(~EXD & inl(ioaddr + cfg), ioaddr + cfg); 1389 sw32(cfg, ~EXD & sr32(cfg));
1367 sis900_set_capability(net_dev , mii_phy); 1390 sis900_set_capability(net_dev , mii_phy);
1368 sis900_auto_negotiate(net_dev, sis_priv->cur_phy); 1391 sis900_auto_negotiate(net_dev, sis_priv->cur_phy);
1369 } else { 1392 } else {
1370 outl(EXD | inl(ioaddr + cfg), ioaddr + cfg); 1393 sw32(cfg, EXD | sr32(cfg));
1371 speed = HW_SPEED_HOME; 1394 speed = HW_SPEED_HOME;
1372 duplex = FDX_CAPABLE_HALF_SELECTED; 1395 duplex = FDX_CAPABLE_HALF_SELECTED;
1373 sis900_set_mode(ioaddr, speed, duplex); 1396 sis900_set_mode(sis_priv, speed, duplex);
1374 sis_priv->autong_complete = 1; 1397 sis_priv->autong_complete = 1;
1375 } 1398 }
1376} 1399}
1377 1400
1378/** 1401/**
1379 * sis900_set_mode - Set the media mode of mac register. 1402 * sis900_set_mode - Set the media mode of mac register.
1380 * @ioaddr: the address of the device 1403 * @sp: the device private data
1381 * @speed : the transmit speed to be determined 1404 * @speed : the transmit speed to be determined
1382 * @duplex: the duplex mode to be determined 1405 * @duplex: the duplex mode to be determined
1383 * 1406 *
@@ -1388,11 +1411,12 @@ static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_ph
1388 * double words. 1411 * double words.
1389 */ 1412 */
1390 1413
1391static void sis900_set_mode (long ioaddr, int speed, int duplex) 1414static void sis900_set_mode(struct sis900_private *sp, int speed, int duplex)
1392{ 1415{
1416 void __iomem *ioaddr = sp->ioaddr;
1393 u32 tx_flags = 0, rx_flags = 0; 1417 u32 tx_flags = 0, rx_flags = 0;
1394 1418
1395 if (inl(ioaddr + cfg) & EDB_MASTER_EN) { 1419 if (sr32( cfg) & EDB_MASTER_EN) {
1396 tx_flags = TxATP | (DMA_BURST_64 << TxMXDMA_shift) | 1420 tx_flags = TxATP | (DMA_BURST_64 << TxMXDMA_shift) |
1397 (TX_FILL_THRESH << TxFILLT_shift); 1421 (TX_FILL_THRESH << TxFILLT_shift);
1398 rx_flags = DMA_BURST_64 << RxMXDMA_shift; 1422 rx_flags = DMA_BURST_64 << RxMXDMA_shift;
@@ -1420,8 +1444,8 @@ static void sis900_set_mode (long ioaddr, int speed, int duplex)
1420 rx_flags |= RxAJAB; 1444 rx_flags |= RxAJAB;
1421#endif 1445#endif
1422 1446
1423 outl (tx_flags, ioaddr + txcfg); 1447 sw32(txcfg, tx_flags);
1424 outl (rx_flags, ioaddr + rxcfg); 1448 sw32(rxcfg, rx_flags);
1425} 1449}
1426 1450
1427/** 1451/**
@@ -1528,16 +1552,17 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex
1528static void sis900_tx_timeout(struct net_device *net_dev) 1552static void sis900_tx_timeout(struct net_device *net_dev)
1529{ 1553{
1530 struct sis900_private *sis_priv = netdev_priv(net_dev); 1554 struct sis900_private *sis_priv = netdev_priv(net_dev);
1531 long ioaddr = net_dev->base_addr; 1555 void __iomem *ioaddr = sis_priv->ioaddr;
1532 unsigned long flags; 1556 unsigned long flags;
1533 int i; 1557 int i;
1534 1558
1535 if(netif_msg_tx_err(sis_priv)) 1559 if (netif_msg_tx_err(sis_priv)) {
1536 printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n", 1560 printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n",
1537 net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr)); 1561 net_dev->name, sr32(cr), sr32(isr));
1562 }
1538 1563
1539 /* Disable interrupts by clearing the interrupt mask. */ 1564 /* Disable interrupts by clearing the interrupt mask. */
1540 outl(0x0000, ioaddr + imr); 1565 sw32(imr, 0x0000);
1541 1566
1542 /* use spinlock to prevent interrupt handler accessing buffer ring */ 1567 /* use spinlock to prevent interrupt handler accessing buffer ring */
1543 spin_lock_irqsave(&sis_priv->lock, flags); 1568 spin_lock_irqsave(&sis_priv->lock, flags);
@@ -1566,10 +1591,10 @@ static void sis900_tx_timeout(struct net_device *net_dev)
1566 net_dev->trans_start = jiffies; /* prevent tx timeout */ 1591 net_dev->trans_start = jiffies; /* prevent tx timeout */
1567 1592
1568 /* load Transmit Descriptor Register */ 1593 /* load Transmit Descriptor Register */
1569 outl(sis_priv->tx_ring_dma, ioaddr + txdp); 1594 sw32(txdp, sis_priv->tx_ring_dma);
1570 1595
1571 /* Enable all known interrupts by setting the interrupt mask. */ 1596 /* Enable all known interrupts by setting the interrupt mask. */
1572 outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr); 1597 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
1573} 1598}
1574 1599
1575/** 1600/**
@@ -1586,7 +1611,7 @@ static netdev_tx_t
1586sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev) 1611sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1587{ 1612{
1588 struct sis900_private *sis_priv = netdev_priv(net_dev); 1613 struct sis900_private *sis_priv = netdev_priv(net_dev);
1589 long ioaddr = net_dev->base_addr; 1614 void __iomem *ioaddr = sis_priv->ioaddr;
1590 unsigned int entry; 1615 unsigned int entry;
1591 unsigned long flags; 1616 unsigned long flags;
1592 unsigned int index_cur_tx, index_dirty_tx; 1617 unsigned int index_cur_tx, index_dirty_tx;
@@ -1608,7 +1633,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1608 sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev, 1633 sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev,
1609 skb->data, skb->len, PCI_DMA_TODEVICE); 1634 skb->data, skb->len, PCI_DMA_TODEVICE);
1610 sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len); 1635 sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len);
1611 outl(TxENA | inl(ioaddr + cr), ioaddr + cr); 1636 sw32(cr, TxENA | sr32(cr));
1612 1637
1613 sis_priv->cur_tx ++; 1638 sis_priv->cur_tx ++;
1614 index_cur_tx = sis_priv->cur_tx; 1639 index_cur_tx = sis_priv->cur_tx;
@@ -1654,14 +1679,14 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
1654 struct net_device *net_dev = dev_instance; 1679 struct net_device *net_dev = dev_instance;
1655 struct sis900_private *sis_priv = netdev_priv(net_dev); 1680 struct sis900_private *sis_priv = netdev_priv(net_dev);
1656 int boguscnt = max_interrupt_work; 1681 int boguscnt = max_interrupt_work;
1657 long ioaddr = net_dev->base_addr; 1682 void __iomem *ioaddr = sis_priv->ioaddr;
1658 u32 status; 1683 u32 status;
1659 unsigned int handled = 0; 1684 unsigned int handled = 0;
1660 1685
1661 spin_lock (&sis_priv->lock); 1686 spin_lock (&sis_priv->lock);
1662 1687
1663 do { 1688 do {
1664 status = inl(ioaddr + isr); 1689 status = sr32(isr);
1665 1690
1666 if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0) 1691 if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0)
1667 /* nothing intresting happened */ 1692 /* nothing intresting happened */
@@ -1696,7 +1721,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
1696 if(netif_msg_intr(sis_priv)) 1721 if(netif_msg_intr(sis_priv))
1697 printk(KERN_DEBUG "%s: exiting interrupt, " 1722 printk(KERN_DEBUG "%s: exiting interrupt, "
1698 "interrupt status = 0x%#8.8x.\n", 1723 "interrupt status = 0x%#8.8x.\n",
1699 net_dev->name, inl(ioaddr + isr)); 1724 net_dev->name, sr32(isr));
1700 1725
1701 spin_unlock (&sis_priv->lock); 1726 spin_unlock (&sis_priv->lock);
1702 return IRQ_RETVAL(handled); 1727 return IRQ_RETVAL(handled);
@@ -1715,7 +1740,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
1715static int sis900_rx(struct net_device *net_dev) 1740static int sis900_rx(struct net_device *net_dev)
1716{ 1741{
1717 struct sis900_private *sis_priv = netdev_priv(net_dev); 1742 struct sis900_private *sis_priv = netdev_priv(net_dev);
1718 long ioaddr = net_dev->base_addr; 1743 void __iomem *ioaddr = sis_priv->ioaddr;
1719 unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC; 1744 unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC;
1720 u32 rx_status = sis_priv->rx_ring[entry].cmdsts; 1745 u32 rx_status = sis_priv->rx_ring[entry].cmdsts;
1721 int rx_work_limit; 1746 int rx_work_limit;
@@ -1847,7 +1872,7 @@ refill_rx_ring:
1847 } 1872 }
1848 } 1873 }
1849 /* re-enable the potentially idle receive state matchine */ 1874 /* re-enable the potentially idle receive state matchine */
1850 outl(RxENA | inl(ioaddr + cr), ioaddr + cr ); 1875 sw32(cr , RxENA | sr32(cr));
1851 1876
1852 return 0; 1877 return 0;
1853} 1878}
@@ -1932,31 +1957,31 @@ static void sis900_finish_xmit (struct net_device *net_dev)
1932 1957
1933static int sis900_close(struct net_device *net_dev) 1958static int sis900_close(struct net_device *net_dev)
1934{ 1959{
1935 long ioaddr = net_dev->base_addr;
1936 struct sis900_private *sis_priv = netdev_priv(net_dev); 1960 struct sis900_private *sis_priv = netdev_priv(net_dev);
1961 struct pci_dev *pdev = sis_priv->pci_dev;
1962 void __iomem *ioaddr = sis_priv->ioaddr;
1937 struct sk_buff *skb; 1963 struct sk_buff *skb;
1938 int i; 1964 int i;
1939 1965
1940 netif_stop_queue(net_dev); 1966 netif_stop_queue(net_dev);
1941 1967
1942 /* Disable interrupts by clearing the interrupt mask. */ 1968 /* Disable interrupts by clearing the interrupt mask. */
1943 outl(0x0000, ioaddr + imr); 1969 sw32(imr, 0x0000);
1944 outl(0x0000, ioaddr + ier); 1970 sw32(ier, 0x0000);
1945 1971
1946 /* Stop the chip's Tx and Rx Status Machine */ 1972 /* Stop the chip's Tx and Rx Status Machine */
1947 outl(RxDIS | TxDIS | inl(ioaddr + cr), ioaddr + cr); 1973 sw32(cr, RxDIS | TxDIS | sr32(cr));
1948 1974
1949 del_timer(&sis_priv->timer); 1975 del_timer(&sis_priv->timer);
1950 1976
1951 free_irq(net_dev->irq, net_dev); 1977 free_irq(pdev->irq, net_dev);
1952 1978
1953 /* Free Tx and RX skbuff */ 1979 /* Free Tx and RX skbuff */
1954 for (i = 0; i < NUM_RX_DESC; i++) { 1980 for (i = 0; i < NUM_RX_DESC; i++) {
1955 skb = sis_priv->rx_skbuff[i]; 1981 skb = sis_priv->rx_skbuff[i];
1956 if (skb) { 1982 if (skb) {
1957 pci_unmap_single(sis_priv->pci_dev, 1983 pci_unmap_single(pdev, sis_priv->rx_ring[i].bufptr,
1958 sis_priv->rx_ring[i].bufptr, 1984 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1959 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1960 dev_kfree_skb(skb); 1985 dev_kfree_skb(skb);
1961 sis_priv->rx_skbuff[i] = NULL; 1986 sis_priv->rx_skbuff[i] = NULL;
1962 } 1987 }
@@ -1964,9 +1989,8 @@ static int sis900_close(struct net_device *net_dev)
1964 for (i = 0; i < NUM_TX_DESC; i++) { 1989 for (i = 0; i < NUM_TX_DESC; i++) {
1965 skb = sis_priv->tx_skbuff[i]; 1990 skb = sis_priv->tx_skbuff[i];
1966 if (skb) { 1991 if (skb) {
1967 pci_unmap_single(sis_priv->pci_dev, 1992 pci_unmap_single(pdev, sis_priv->tx_ring[i].bufptr,
1968 sis_priv->tx_ring[i].bufptr, skb->len, 1993 skb->len, PCI_DMA_TODEVICE);
1969 PCI_DMA_TODEVICE);
1970 dev_kfree_skb(skb); 1994 dev_kfree_skb(skb);
1971 sis_priv->tx_skbuff[i] = NULL; 1995 sis_priv->tx_skbuff[i] = NULL;
1972 } 1996 }
@@ -2055,14 +2079,14 @@ static int sis900_nway_reset(struct net_device *net_dev)
2055static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) 2079static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
2056{ 2080{
2057 struct sis900_private *sis_priv = netdev_priv(net_dev); 2081 struct sis900_private *sis_priv = netdev_priv(net_dev);
2058 long pmctrl_addr = net_dev->base_addr + pmctrl; 2082 void __iomem *ioaddr = sis_priv->ioaddr;
2059 u32 cfgpmcsr = 0, pmctrl_bits = 0; 2083 u32 cfgpmcsr = 0, pmctrl_bits = 0;
2060 2084
2061 if (wol->wolopts == 0) { 2085 if (wol->wolopts == 0) {
2062 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr); 2086 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
2063 cfgpmcsr &= ~PME_EN; 2087 cfgpmcsr &= ~PME_EN;
2064 pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr); 2088 pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
2065 outl(pmctrl_bits, pmctrl_addr); 2089 sw32(pmctrl, pmctrl_bits);
2066 if (netif_msg_wol(sis_priv)) 2090 if (netif_msg_wol(sis_priv))
2067 printk(KERN_DEBUG "%s: Wake on LAN disabled\n", net_dev->name); 2091 printk(KERN_DEBUG "%s: Wake on LAN disabled\n", net_dev->name);
2068 return 0; 2092 return 0;
@@ -2077,7 +2101,7 @@ static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wo
2077 if (wol->wolopts & WAKE_PHY) 2101 if (wol->wolopts & WAKE_PHY)
2078 pmctrl_bits |= LINKON; 2102 pmctrl_bits |= LINKON;
2079 2103
2080 outl(pmctrl_bits, pmctrl_addr); 2104 sw32(pmctrl, pmctrl_bits);
2081 2105
2082 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr); 2106 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
2083 cfgpmcsr |= PME_EN; 2107 cfgpmcsr |= PME_EN;
@@ -2090,10 +2114,11 @@ static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wo
2090 2114
2091static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) 2115static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
2092{ 2116{
2093 long pmctrl_addr = net_dev->base_addr + pmctrl; 2117 struct sis900_private *sp = netdev_priv(net_dev);
2118 void __iomem *ioaddr = sp->ioaddr;
2094 u32 pmctrl_bits; 2119 u32 pmctrl_bits;
2095 2120
2096 pmctrl_bits = inl(pmctrl_addr); 2121 pmctrl_bits = sr32(pmctrl);
2097 if (pmctrl_bits & MAGICPKT) 2122 if (pmctrl_bits & MAGICPKT)
2098 wol->wolopts |= WAKE_MAGIC; 2123 wol->wolopts |= WAKE_MAGIC;
2099 if (pmctrl_bits & LINKON) 2124 if (pmctrl_bits & LINKON)
@@ -2279,8 +2304,8 @@ static inline u16 sis900_mcast_bitnr(u8 *addr, u8 revision)
2279 2304
2280static void set_rx_mode(struct net_device *net_dev) 2305static void set_rx_mode(struct net_device *net_dev)
2281{ 2306{
2282 long ioaddr = net_dev->base_addr;
2283 struct sis900_private *sis_priv = netdev_priv(net_dev); 2307 struct sis900_private *sis_priv = netdev_priv(net_dev);
2308 void __iomem *ioaddr = sis_priv->ioaddr;
2284 u16 mc_filter[16] = {0}; /* 256/128 bits multicast hash table */ 2309 u16 mc_filter[16] = {0}; /* 256/128 bits multicast hash table */
2285 int i, table_entries; 2310 int i, table_entries;
2286 u32 rx_mode; 2311 u32 rx_mode;
@@ -2322,24 +2347,24 @@ static void set_rx_mode(struct net_device *net_dev)
2322 /* update Multicast Hash Table in Receive Filter */ 2347 /* update Multicast Hash Table in Receive Filter */
2323 for (i = 0; i < table_entries; i++) { 2348 for (i = 0; i < table_entries; i++) {
2324 /* why plus 0x04 ??, That makes the correct value for hash table. */ 2349 /* why plus 0x04 ??, That makes the correct value for hash table. */
2325 outl((u32)(0x00000004+i) << RFADDR_shift, ioaddr + rfcr); 2350 sw32(rfcr, (u32)(0x00000004 + i) << RFADDR_shift);
2326 outl(mc_filter[i], ioaddr + rfdr); 2351 sw32(rfdr, mc_filter[i]);
2327 } 2352 }
2328 2353
2329 outl(RFEN | rx_mode, ioaddr + rfcr); 2354 sw32(rfcr, RFEN | rx_mode);
2330 2355
2331 /* sis900 is capable of looping back packets at MAC level for 2356 /* sis900 is capable of looping back packets at MAC level for
2332 * debugging purpose */ 2357 * debugging purpose */
2333 if (net_dev->flags & IFF_LOOPBACK) { 2358 if (net_dev->flags & IFF_LOOPBACK) {
2334 u32 cr_saved; 2359 u32 cr_saved;
2335 /* We must disable Tx/Rx before setting loopback mode */ 2360 /* We must disable Tx/Rx before setting loopback mode */
2336 cr_saved = inl(ioaddr + cr); 2361 cr_saved = sr32(cr);
2337 outl(cr_saved | TxDIS | RxDIS, ioaddr + cr); 2362 sw32(cr, cr_saved | TxDIS | RxDIS);
2338 /* enable loopback */ 2363 /* enable loopback */
2339 outl(inl(ioaddr + txcfg) | TxMLB, ioaddr + txcfg); 2364 sw32(txcfg, sr32(txcfg) | TxMLB);
2340 outl(inl(ioaddr + rxcfg) | RxATX, ioaddr + rxcfg); 2365 sw32(rxcfg, sr32(rxcfg) | RxATX);
2341 /* restore cr */ 2366 /* restore cr */
2342 outl(cr_saved, ioaddr + cr); 2367 sw32(cr, cr_saved);
2343 } 2368 }
2344} 2369}
2345 2370
@@ -2355,26 +2380,25 @@ static void set_rx_mode(struct net_device *net_dev)
2355static void sis900_reset(struct net_device *net_dev) 2380static void sis900_reset(struct net_device *net_dev)
2356{ 2381{
2357 struct sis900_private *sis_priv = netdev_priv(net_dev); 2382 struct sis900_private *sis_priv = netdev_priv(net_dev);
2358 long ioaddr = net_dev->base_addr; 2383 void __iomem *ioaddr = sis_priv->ioaddr;
2359 int i = 0;
2360 u32 status = TxRCMP | RxRCMP; 2384 u32 status = TxRCMP | RxRCMP;
2385 int i;
2361 2386
2362 outl(0, ioaddr + ier); 2387 sw32(ier, 0);
2363 outl(0, ioaddr + imr); 2388 sw32(imr, 0);
2364 outl(0, ioaddr + rfcr); 2389 sw32(rfcr, 0);
2365 2390
2366 outl(RxRESET | TxRESET | RESET | inl(ioaddr + cr), ioaddr + cr); 2391 sw32(cr, RxRESET | TxRESET | RESET | sr32(cr));
2367 2392
2368 /* Check that the chip has finished the reset. */ 2393 /* Check that the chip has finished the reset. */
2369 while (status && (i++ < 1000)) { 2394 for (i = 0; status && (i < 1000); i++)
2370 status ^= (inl(isr + ioaddr) & status); 2395 status ^= sr32(isr) & status;
2371 }
2372 2396
2373 if( (sis_priv->chipset_rev >= SIS635A_900_REV) || 2397 if (sis_priv->chipset_rev >= SIS635A_900_REV ||
2374 (sis_priv->chipset_rev == SIS900B_900_REV) ) 2398 sis_priv->chipset_rev == SIS900B_900_REV)
2375 outl(PESEL | RND_CNT, ioaddr + cfg); 2399 sw32(cfg, PESEL | RND_CNT);
2376 else 2400 else
2377 outl(PESEL, ioaddr + cfg); 2401 sw32(cfg, PESEL);
2378} 2402}
2379 2403
2380/** 2404/**
@@ -2388,10 +2412,12 @@ static void __devexit sis900_remove(struct pci_dev *pci_dev)
2388{ 2412{
2389 struct net_device *net_dev = pci_get_drvdata(pci_dev); 2413 struct net_device *net_dev = pci_get_drvdata(pci_dev);
2390 struct sis900_private *sis_priv = netdev_priv(net_dev); 2414 struct sis900_private *sis_priv = netdev_priv(net_dev);
2391 struct mii_phy *phy = NULL; 2415
2416 unregister_netdev(net_dev);
2392 2417
2393 while (sis_priv->first_mii) { 2418 while (sis_priv->first_mii) {
2394 phy = sis_priv->first_mii; 2419 struct mii_phy *phy = sis_priv->first_mii;
2420
2395 sis_priv->first_mii = phy->next; 2421 sis_priv->first_mii = phy->next;
2396 kfree(phy); 2422 kfree(phy);
2397 } 2423 }
@@ -2400,7 +2426,7 @@ static void __devexit sis900_remove(struct pci_dev *pci_dev)
2400 sis_priv->rx_ring_dma); 2426 sis_priv->rx_ring_dma);
2401 pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring, 2427 pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
2402 sis_priv->tx_ring_dma); 2428 sis_priv->tx_ring_dma);
2403 unregister_netdev(net_dev); 2429 pci_iounmap(pci_dev, sis_priv->ioaddr);
2404 free_netdev(net_dev); 2430 free_netdev(net_dev);
2405 pci_release_regions(pci_dev); 2431 pci_release_regions(pci_dev);
2406 pci_set_drvdata(pci_dev, NULL); 2432 pci_set_drvdata(pci_dev, NULL);
@@ -2411,7 +2437,8 @@ static void __devexit sis900_remove(struct pci_dev *pci_dev)
2411static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state) 2437static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state)
2412{ 2438{
2413 struct net_device *net_dev = pci_get_drvdata(pci_dev); 2439 struct net_device *net_dev = pci_get_drvdata(pci_dev);
2414 long ioaddr = net_dev->base_addr; 2440 struct sis900_private *sis_priv = netdev_priv(net_dev);
2441 void __iomem *ioaddr = sis_priv->ioaddr;
2415 2442
2416 if(!netif_running(net_dev)) 2443 if(!netif_running(net_dev))
2417 return 0; 2444 return 0;
@@ -2420,7 +2447,7 @@ static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state)
2420 netif_device_detach(net_dev); 2447 netif_device_detach(net_dev);
2421 2448
2422 /* Stop the chip's Tx and Rx Status Machine */ 2449 /* Stop the chip's Tx and Rx Status Machine */
2423 outl(RxDIS | TxDIS | inl(ioaddr + cr), ioaddr + cr); 2450 sw32(cr, RxDIS | TxDIS | sr32(cr));
2424 2451
2425 pci_set_power_state(pci_dev, PCI_D3hot); 2452 pci_set_power_state(pci_dev, PCI_D3hot);
2426 pci_save_state(pci_dev); 2453 pci_save_state(pci_dev);
@@ -2432,7 +2459,7 @@ static int sis900_resume(struct pci_dev *pci_dev)
2432{ 2459{
2433 struct net_device *net_dev = pci_get_drvdata(pci_dev); 2460 struct net_device *net_dev = pci_get_drvdata(pci_dev);
2434 struct sis900_private *sis_priv = netdev_priv(net_dev); 2461 struct sis900_private *sis_priv = netdev_priv(net_dev);
2435 long ioaddr = net_dev->base_addr; 2462 void __iomem *ioaddr = sis_priv->ioaddr;
2436 2463
2437 if(!netif_running(net_dev)) 2464 if(!netif_running(net_dev))
2438 return 0; 2465 return 0;
@@ -2453,9 +2480,9 @@ static int sis900_resume(struct pci_dev *pci_dev)
2453 sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); 2480 sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
2454 2481
2455 /* Enable all known interrupts by setting the interrupt mask. */ 2482 /* Enable all known interrupts by setting the interrupt mask. */
2456 outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr); 2483 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
2457 outl(RxENA | inl(ioaddr + cr), ioaddr + cr); 2484 sw32(cr, RxENA | sr32(cr));
2458 outl(IE, ioaddr + ier); 2485 sw32(ier, IE);
2459 2486
2460 sis900_check_mode(net_dev, sis_priv->mii); 2487 sis900_check_mode(net_dev, sis_priv->mii);
2461 2488
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 2a662e6112e9..d01e59c348ad 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -146,6 +146,12 @@ enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
146#define EPIC_TOTAL_SIZE 0x100 146#define EPIC_TOTAL_SIZE 0x100
147#define USE_IO_OPS 1 147#define USE_IO_OPS 1
148 148
149#ifdef USE_IO_OPS
150#define EPIC_BAR 0
151#else
152#define EPIC_BAR 1
153#endif
154
149typedef enum { 155typedef enum {
150 SMSC_83C170_0, 156 SMSC_83C170_0,
151 SMSC_83C170, 157 SMSC_83C170,
@@ -176,21 +182,11 @@ static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = {
176}; 182};
177MODULE_DEVICE_TABLE (pci, epic_pci_tbl); 183MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
178 184
179 185#define ew16(reg, val) iowrite16(val, ioaddr + (reg))
180#ifndef USE_IO_OPS 186#define ew32(reg, val) iowrite32(val, ioaddr + (reg))
181#undef inb 187#define er8(reg) ioread8(ioaddr + (reg))
182#undef inw 188#define er16(reg) ioread16(ioaddr + (reg))
183#undef inl 189#define er32(reg) ioread32(ioaddr + (reg))
184#undef outb
185#undef outw
186#undef outl
187#define inb readb
188#define inw readw
189#define inl readl
190#define outb writeb
191#define outw writew
192#define outl writel
193#endif
194 190
195/* Offsets to registers, using the (ugh) SMC names. */ 191/* Offsets to registers, using the (ugh) SMC names. */
196enum epic_registers { 192enum epic_registers {
@@ -275,6 +271,7 @@ struct epic_private {
275 u32 irq_mask; 271 u32 irq_mask;
276 unsigned int rx_buf_sz; /* Based on MTU+slack. */ 272 unsigned int rx_buf_sz; /* Based on MTU+slack. */
277 273
274 void __iomem *ioaddr;
278 struct pci_dev *pci_dev; /* PCI bus location. */ 275 struct pci_dev *pci_dev; /* PCI bus location. */
279 int chip_id, chip_flags; 276 int chip_id, chip_flags;
280 277
@@ -290,7 +287,7 @@ struct epic_private {
290}; 287};
291 288
292static int epic_open(struct net_device *dev); 289static int epic_open(struct net_device *dev);
293static int read_eeprom(long ioaddr, int location); 290static int read_eeprom(struct epic_private *, int);
294static int mdio_read(struct net_device *dev, int phy_id, int location); 291static int mdio_read(struct net_device *dev, int phy_id, int location);
295static void mdio_write(struct net_device *dev, int phy_id, int loc, int val); 292static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
296static void epic_restart(struct net_device *dev); 293static void epic_restart(struct net_device *dev);
@@ -321,11 +318,11 @@ static const struct net_device_ops epic_netdev_ops = {
321 .ndo_validate_addr = eth_validate_addr, 318 .ndo_validate_addr = eth_validate_addr,
322}; 319};
323 320
324static int __devinit epic_init_one (struct pci_dev *pdev, 321static int __devinit epic_init_one(struct pci_dev *pdev,
325 const struct pci_device_id *ent) 322 const struct pci_device_id *ent)
326{ 323{
327 static int card_idx = -1; 324 static int card_idx = -1;
328 long ioaddr; 325 void __iomem *ioaddr;
329 int chip_idx = (int) ent->driver_data; 326 int chip_idx = (int) ent->driver_data;
330 int irq; 327 int irq;
331 struct net_device *dev; 328 struct net_device *dev;
@@ -368,19 +365,15 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
368 365
369 SET_NETDEV_DEV(dev, &pdev->dev); 366 SET_NETDEV_DEV(dev, &pdev->dev);
370 367
371#ifdef USE_IO_OPS 368 ioaddr = pci_iomap(pdev, EPIC_BAR, 0);
372 ioaddr = pci_resource_start (pdev, 0);
373#else
374 ioaddr = pci_resource_start (pdev, 1);
375 ioaddr = (long) pci_ioremap_bar(pdev, 1);
376 if (!ioaddr) { 369 if (!ioaddr) {
377 dev_err(&pdev->dev, "ioremap failed\n"); 370 dev_err(&pdev->dev, "ioremap failed\n");
378 goto err_out_free_netdev; 371 goto err_out_free_netdev;
379 } 372 }
380#endif
381 373
382 pci_set_drvdata(pdev, dev); 374 pci_set_drvdata(pdev, dev);
383 ep = netdev_priv(dev); 375 ep = netdev_priv(dev);
376 ep->ioaddr = ioaddr;
384 ep->mii.dev = dev; 377 ep->mii.dev = dev;
385 ep->mii.mdio_read = mdio_read; 378 ep->mii.mdio_read = mdio_read;
386 ep->mii.mdio_write = mdio_write; 379 ep->mii.mdio_write = mdio_write;
@@ -409,34 +402,31 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
409 duplex = full_duplex[card_idx]; 402 duplex = full_duplex[card_idx];
410 } 403 }
411 404
412 dev->base_addr = ioaddr;
413 dev->irq = irq;
414
415 spin_lock_init(&ep->lock); 405 spin_lock_init(&ep->lock);
416 spin_lock_init(&ep->napi_lock); 406 spin_lock_init(&ep->napi_lock);
417 ep->reschedule_in_poll = 0; 407 ep->reschedule_in_poll = 0;
418 408
419 /* Bring the chip out of low-power mode. */ 409 /* Bring the chip out of low-power mode. */
420 outl(0x4200, ioaddr + GENCTL); 410 ew32(GENCTL, 0x4200);
421 /* Magic?! If we don't set this bit the MII interface won't work. */ 411 /* Magic?! If we don't set this bit the MII interface won't work. */
422 /* This magic is documented in SMSC app note 7.15 */ 412 /* This magic is documented in SMSC app note 7.15 */
423 for (i = 16; i > 0; i--) 413 for (i = 16; i > 0; i--)
424 outl(0x0008, ioaddr + TEST1); 414 ew32(TEST1, 0x0008);
425 415
426 /* Turn on the MII transceiver. */ 416 /* Turn on the MII transceiver. */
427 outl(0x12, ioaddr + MIICfg); 417 ew32(MIICfg, 0x12);
428 if (chip_idx == 1) 418 if (chip_idx == 1)
429 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); 419 ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
430 outl(0x0200, ioaddr + GENCTL); 420 ew32(GENCTL, 0x0200);
431 421
432 /* Note: the '175 does not have a serial EEPROM. */ 422 /* Note: the '175 does not have a serial EEPROM. */
433 for (i = 0; i < 3; i++) 423 for (i = 0; i < 3; i++)
434 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(inw(ioaddr + LAN0 + i*4)); 424 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
435 425
436 if (debug > 2) { 426 if (debug > 2) {
437 dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n"); 427 dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
438 for (i = 0; i < 64; i++) 428 for (i = 0; i < 64; i++)
439 printk(" %4.4x%s", read_eeprom(ioaddr, i), 429 printk(" %4.4x%s", read_eeprom(ep, i),
440 i % 16 == 15 ? "\n" : ""); 430 i % 16 == 15 ? "\n" : "");
441 } 431 }
442 432
@@ -481,8 +471,8 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
481 471
482 /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */ 472 /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
483 if (ep->chip_flags & MII_PWRDWN) 473 if (ep->chip_flags & MII_PWRDWN)
484 outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL); 474 ew32(NVCTL, er32(NVCTL) & ~0x483c);
485 outl(0x0008, ioaddr + GENCTL); 475 ew32(GENCTL, 0x0008);
486 476
487 /* The lower four bits are the media type. */ 477 /* The lower four bits are the media type. */
488 if (duplex) { 478 if (duplex) {
@@ -501,8 +491,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
501 if (ret < 0) 491 if (ret < 0)
502 goto err_out_unmap_rx; 492 goto err_out_unmap_rx;
503 493
504 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n", 494 printk(KERN_INFO "%s: %s at %lx, IRQ %d, %pM\n",
505 dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq, 495 dev->name, pci_id_tbl[chip_idx].name,
496 (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
506 dev->dev_addr); 497 dev->dev_addr);
507 498
508out: 499out:
@@ -513,10 +504,8 @@ err_out_unmap_rx:
513err_out_unmap_tx: 504err_out_unmap_tx:
514 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); 505 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
515err_out_iounmap: 506err_out_iounmap:
516#ifndef USE_IO_OPS 507 pci_iounmap(pdev, ioaddr);
517 iounmap(ioaddr);
518err_out_free_netdev: 508err_out_free_netdev:
519#endif
520 free_netdev(dev); 509 free_netdev(dev);
521err_out_free_res: 510err_out_free_res:
522 pci_release_regions(pdev); 511 pci_release_regions(pdev);
@@ -540,7 +529,7 @@ err_out_disable:
540 This serves to flush the operation to the PCI bus. 529 This serves to flush the operation to the PCI bus.
541 */ 530 */
542 531
543#define eeprom_delay() inl(ee_addr) 532#define eeprom_delay() er32(EECTL)
544 533
545/* The EEPROM commands include the alway-set leading bit. */ 534/* The EEPROM commands include the alway-set leading bit. */
546#define EE_WRITE_CMD (5 << 6) 535#define EE_WRITE_CMD (5 << 6)
@@ -550,67 +539,67 @@ err_out_disable:
550 539
551static void epic_disable_int(struct net_device *dev, struct epic_private *ep) 540static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
552{ 541{
553 long ioaddr = dev->base_addr; 542 void __iomem *ioaddr = ep->ioaddr;
554 543
555 outl(0x00000000, ioaddr + INTMASK); 544 ew32(INTMASK, 0x00000000);
556} 545}
557 546
558static inline void __epic_pci_commit(long ioaddr) 547static inline void __epic_pci_commit(void __iomem *ioaddr)
559{ 548{
560#ifndef USE_IO_OPS 549#ifndef USE_IO_OPS
561 inl(ioaddr + INTMASK); 550 er32(INTMASK);
562#endif 551#endif
563} 552}
564 553
565static inline void epic_napi_irq_off(struct net_device *dev, 554static inline void epic_napi_irq_off(struct net_device *dev,
566 struct epic_private *ep) 555 struct epic_private *ep)
567{ 556{
568 long ioaddr = dev->base_addr; 557 void __iomem *ioaddr = ep->ioaddr;
569 558
570 outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK); 559 ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent);
571 __epic_pci_commit(ioaddr); 560 __epic_pci_commit(ioaddr);
572} 561}
573 562
574static inline void epic_napi_irq_on(struct net_device *dev, 563static inline void epic_napi_irq_on(struct net_device *dev,
575 struct epic_private *ep) 564 struct epic_private *ep)
576{ 565{
577 long ioaddr = dev->base_addr; 566 void __iomem *ioaddr = ep->ioaddr;
578 567
579 /* No need to commit possible posted write */ 568 /* No need to commit possible posted write */
580 outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK); 569 ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
581} 570}
582 571
583static int __devinit read_eeprom(long ioaddr, int location) 572static int __devinit read_eeprom(struct epic_private *ep, int location)
584{ 573{
574 void __iomem *ioaddr = ep->ioaddr;
585 int i; 575 int i;
586 int retval = 0; 576 int retval = 0;
587 long ee_addr = ioaddr + EECTL;
588 int read_cmd = location | 577 int read_cmd = location |
589 (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD); 578 (er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
590 579
591 outl(EE_ENB & ~EE_CS, ee_addr); 580 ew32(EECTL, EE_ENB & ~EE_CS);
592 outl(EE_ENB, ee_addr); 581 ew32(EECTL, EE_ENB);
593 582
594 /* Shift the read command bits out. */ 583 /* Shift the read command bits out. */
595 for (i = 12; i >= 0; i--) { 584 for (i = 12; i >= 0; i--) {
596 short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0; 585 short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
597 outl(EE_ENB | dataval, ee_addr); 586 ew32(EECTL, EE_ENB | dataval);
598 eeprom_delay(); 587 eeprom_delay();
599 outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr); 588 ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK);
600 eeprom_delay(); 589 eeprom_delay();
601 } 590 }
602 outl(EE_ENB, ee_addr); 591 ew32(EECTL, EE_ENB);
603 592
604 for (i = 16; i > 0; i--) { 593 for (i = 16; i > 0; i--) {
605 outl(EE_ENB | EE_SHIFT_CLK, ee_addr); 594 ew32(EECTL, EE_ENB | EE_SHIFT_CLK);
606 eeprom_delay(); 595 eeprom_delay();
607 retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0); 596 retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0);
608 outl(EE_ENB, ee_addr); 597 ew32(EECTL, EE_ENB);
609 eeprom_delay(); 598 eeprom_delay();
610 } 599 }
611 600
612 /* Terminate the EEPROM access. */ 601 /* Terminate the EEPROM access. */
613 outl(EE_ENB & ~EE_CS, ee_addr); 602 ew32(EECTL, EE_ENB & ~EE_CS);
614 return retval; 603 return retval;
615} 604}
616 605
@@ -618,22 +607,23 @@ static int __devinit read_eeprom(long ioaddr, int location)
618#define MII_WRITEOP 2 607#define MII_WRITEOP 2
619static int mdio_read(struct net_device *dev, int phy_id, int location) 608static int mdio_read(struct net_device *dev, int phy_id, int location)
620{ 609{
621 long ioaddr = dev->base_addr; 610 struct epic_private *ep = netdev_priv(dev);
611 void __iomem *ioaddr = ep->ioaddr;
622 int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP; 612 int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
623 int i; 613 int i;
624 614
625 outl(read_cmd, ioaddr + MIICtrl); 615 ew32(MIICtrl, read_cmd);
626 /* Typical operation takes 25 loops. */ 616 /* Typical operation takes 25 loops. */
627 for (i = 400; i > 0; i--) { 617 for (i = 400; i > 0; i--) {
628 barrier(); 618 barrier();
629 if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) { 619 if ((er32(MIICtrl) & MII_READOP) == 0) {
630 /* Work around read failure bug. */ 620 /* Work around read failure bug. */
631 if (phy_id == 1 && location < 6 && 621 if (phy_id == 1 && location < 6 &&
632 inw(ioaddr + MIIData) == 0xffff) { 622 er16(MIIData) == 0xffff) {
633 outl(read_cmd, ioaddr + MIICtrl); 623 ew32(MIICtrl, read_cmd);
634 continue; 624 continue;
635 } 625 }
636 return inw(ioaddr + MIIData); 626 return er16(MIIData);
637 } 627 }
638 } 628 }
639 return 0xffff; 629 return 0xffff;
@@ -641,14 +631,15 @@ static int mdio_read(struct net_device *dev, int phy_id, int location)
641 631
642static void mdio_write(struct net_device *dev, int phy_id, int loc, int value) 632static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
643{ 633{
644 long ioaddr = dev->base_addr; 634 struct epic_private *ep = netdev_priv(dev);
635 void __iomem *ioaddr = ep->ioaddr;
645 int i; 636 int i;
646 637
647 outw(value, ioaddr + MIIData); 638 ew16(MIIData, value);
648 outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl); 639 ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP);
649 for (i = 10000; i > 0; i--) { 640 for (i = 10000; i > 0; i--) {
650 barrier(); 641 barrier();
651 if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0) 642 if ((er32(MIICtrl) & MII_WRITEOP) == 0)
652 break; 643 break;
653 } 644 }
654} 645}
@@ -657,25 +648,26 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
657static int epic_open(struct net_device *dev) 648static int epic_open(struct net_device *dev)
658{ 649{
659 struct epic_private *ep = netdev_priv(dev); 650 struct epic_private *ep = netdev_priv(dev);
660 long ioaddr = dev->base_addr; 651 void __iomem *ioaddr = ep->ioaddr;
661 int i; 652 const int irq = ep->pci_dev->irq;
662 int retval; 653 int rc, i;
663 654
664 /* Soft reset the chip. */ 655 /* Soft reset the chip. */
665 outl(0x4001, ioaddr + GENCTL); 656 ew32(GENCTL, 0x4001);
666 657
667 napi_enable(&ep->napi); 658 napi_enable(&ep->napi);
668 if ((retval = request_irq(dev->irq, epic_interrupt, IRQF_SHARED, dev->name, dev))) { 659 rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev);
660 if (rc) {
669 napi_disable(&ep->napi); 661 napi_disable(&ep->napi);
670 return retval; 662 return rc;
671 } 663 }
672 664
673 epic_init_ring(dev); 665 epic_init_ring(dev);
674 666
675 outl(0x4000, ioaddr + GENCTL); 667 ew32(GENCTL, 0x4000);
676 /* This magic is documented in SMSC app note 7.15 */ 668 /* This magic is documented in SMSC app note 7.15 */
677 for (i = 16; i > 0; i--) 669 for (i = 16; i > 0; i--)
678 outl(0x0008, ioaddr + TEST1); 670 ew32(TEST1, 0x0008);
679 671
680 /* Pull the chip out of low-power mode, enable interrupts, and set for 672 /* Pull the chip out of low-power mode, enable interrupts, and set for
681 PCI read multiple. The MIIcfg setting and strange write order are 673 PCI read multiple. The MIIcfg setting and strange write order are
@@ -683,29 +675,29 @@ static int epic_open(struct net_device *dev)
683 wiring on the Ositech CardBus card. 675 wiring on the Ositech CardBus card.
684 */ 676 */
685#if 0 677#if 0
686 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg); 678 ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
687#endif 679#endif
688 if (ep->chip_flags & MII_PWRDWN) 680 if (ep->chip_flags & MII_PWRDWN)
689 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); 681 ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
690 682
691 /* Tell the chip to byteswap descriptors on big-endian hosts */ 683 /* Tell the chip to byteswap descriptors on big-endian hosts */
692#ifdef __BIG_ENDIAN 684#ifdef __BIG_ENDIAN
693 outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 685 ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8));
694 inl(ioaddr + GENCTL); 686 er32(GENCTL);
695 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 687 ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
696#else 688#else
697 outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 689 ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8));
698 inl(ioaddr + GENCTL); 690 er32(GENCTL);
699 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 691 ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
700#endif 692#endif
701 693
702 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */ 694 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
703 695
704 for (i = 0; i < 3; i++) 696 for (i = 0; i < 3; i++)
705 outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4); 697 ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
706 698
707 ep->tx_threshold = TX_FIFO_THRESH; 699 ep->tx_threshold = TX_FIFO_THRESH;
708 outl(ep->tx_threshold, ioaddr + TxThresh); 700 ew32(TxThresh, ep->tx_threshold);
709 701
710 if (media2miictl[dev->if_port & 15]) { 702 if (media2miictl[dev->if_port & 15]) {
711 if (ep->mii_phy_cnt) 703 if (ep->mii_phy_cnt)
@@ -731,26 +723,27 @@ static int epic_open(struct net_device *dev)
731 } 723 }
732 } 724 }
733 725
734 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl); 726 ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
735 outl(ep->rx_ring_dma, ioaddr + PRxCDAR); 727 ew32(PRxCDAR, ep->rx_ring_dma);
736 outl(ep->tx_ring_dma, ioaddr + PTxCDAR); 728 ew32(PTxCDAR, ep->tx_ring_dma);
737 729
738 /* Start the chip's Rx process. */ 730 /* Start the chip's Rx process. */
739 set_rx_mode(dev); 731 set_rx_mode(dev);
740 outl(StartRx | RxQueued, ioaddr + COMMAND); 732 ew32(COMMAND, StartRx | RxQueued);
741 733
742 netif_start_queue(dev); 734 netif_start_queue(dev);
743 735
744 /* Enable interrupts by setting the interrupt mask. */ 736 /* Enable interrupts by setting the interrupt mask. */
745 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) 737 ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
746 | CntFull | TxUnderrun 738 ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
747 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK); 739 TxUnderrun);
748 740
749 if (debug > 1) 741 if (debug > 1) {
750 printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x " 742 printk(KERN_DEBUG "%s: epic_open() ioaddr %p IRQ %d "
751 "%s-duplex.\n", 743 "status %4.4x %s-duplex.\n",
752 dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL), 744 dev->name, ioaddr, irq, er32(GENCTL),
753 ep->mii.full_duplex ? "full" : "half"); 745 ep->mii.full_duplex ? "full" : "half");
746 }
754 747
755 /* Set the timer to switch to check for link beat and perhaps switch 748 /* Set the timer to switch to check for link beat and perhaps switch
756 to an alternate media type. */ 749 to an alternate media type. */
@@ -760,27 +753,29 @@ static int epic_open(struct net_device *dev)
760 ep->timer.function = epic_timer; /* timer handler */ 753 ep->timer.function = epic_timer; /* timer handler */
761 add_timer(&ep->timer); 754 add_timer(&ep->timer);
762 755
763 return 0; 756 return rc;
764} 757}
765 758
766/* Reset the chip to recover from a PCI transaction error. 759/* Reset the chip to recover from a PCI transaction error.
767 This may occur at interrupt time. */ 760 This may occur at interrupt time. */
768static void epic_pause(struct net_device *dev) 761static void epic_pause(struct net_device *dev)
769{ 762{
770 long ioaddr = dev->base_addr; 763 struct net_device_stats *stats = &dev->stats;
764 struct epic_private *ep = netdev_priv(dev);
765 void __iomem *ioaddr = ep->ioaddr;
771 766
772 netif_stop_queue (dev); 767 netif_stop_queue (dev);
773 768
774 /* Disable interrupts by clearing the interrupt mask. */ 769 /* Disable interrupts by clearing the interrupt mask. */
775 outl(0x00000000, ioaddr + INTMASK); 770 ew32(INTMASK, 0x00000000);
776 /* Stop the chip's Tx and Rx DMA processes. */ 771 /* Stop the chip's Tx and Rx DMA processes. */
777 outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND); 772 ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA);
778 773
779 /* Update the error counts. */ 774 /* Update the error counts. */
780 if (inw(ioaddr + COMMAND) != 0xffff) { 775 if (er16(COMMAND) != 0xffff) {
781 dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); 776 stats->rx_missed_errors += er8(MPCNT);
782 dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); 777 stats->rx_frame_errors += er8(ALICNT);
783 dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); 778 stats->rx_crc_errors += er8(CRCCNT);
784 } 779 }
785 780
786 /* Remove the packets on the Rx queue. */ 781 /* Remove the packets on the Rx queue. */
@@ -789,12 +784,12 @@ static void epic_pause(struct net_device *dev)
789 784
790static void epic_restart(struct net_device *dev) 785static void epic_restart(struct net_device *dev)
791{ 786{
792 long ioaddr = dev->base_addr;
793 struct epic_private *ep = netdev_priv(dev); 787 struct epic_private *ep = netdev_priv(dev);
788 void __iomem *ioaddr = ep->ioaddr;
794 int i; 789 int i;
795 790
796 /* Soft reset the chip. */ 791 /* Soft reset the chip. */
797 outl(0x4001, ioaddr + GENCTL); 792 ew32(GENCTL, 0x4001);
798 793
799 printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n", 794 printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
800 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx); 795 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
@@ -802,47 +797,46 @@ static void epic_restart(struct net_device *dev)
802 797
803 /* This magic is documented in SMSC app note 7.15 */ 798 /* This magic is documented in SMSC app note 7.15 */
804 for (i = 16; i > 0; i--) 799 for (i = 16; i > 0; i--)
805 outl(0x0008, ioaddr + TEST1); 800 ew32(TEST1, 0x0008);
806 801
807#ifdef __BIG_ENDIAN 802#ifdef __BIG_ENDIAN
808 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 803 ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
809#else 804#else
810 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 805 ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
811#endif 806#endif
812 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg); 807 ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
813 if (ep->chip_flags & MII_PWRDWN) 808 if (ep->chip_flags & MII_PWRDWN)
814 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); 809 ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
815 810
816 for (i = 0; i < 3; i++) 811 for (i = 0; i < 3; i++)
817 outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4); 812 ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
818 813
819 ep->tx_threshold = TX_FIFO_THRESH; 814 ep->tx_threshold = TX_FIFO_THRESH;
820 outl(ep->tx_threshold, ioaddr + TxThresh); 815 ew32(TxThresh, ep->tx_threshold);
821 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl); 816 ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
822 outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)* 817 ew32(PRxCDAR, ep->rx_ring_dma +
823 sizeof(struct epic_rx_desc), ioaddr + PRxCDAR); 818 (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc));
824 outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)* 819 ew32(PTxCDAR, ep->tx_ring_dma +
825 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR); 820 (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc));
826 821
827 /* Start the chip's Rx process. */ 822 /* Start the chip's Rx process. */
828 set_rx_mode(dev); 823 set_rx_mode(dev);
829 outl(StartRx | RxQueued, ioaddr + COMMAND); 824 ew32(COMMAND, StartRx | RxQueued);
830 825
831 /* Enable interrupts by setting the interrupt mask. */ 826 /* Enable interrupts by setting the interrupt mask. */
832 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) 827 ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
833 | CntFull | TxUnderrun 828 ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
834 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK); 829 TxUnderrun);
835 830
836 printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x" 831 printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
837 " interrupt %4.4x.\n", 832 " interrupt %4.4x.\n",
838 dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL), 833 dev->name, er32(COMMAND), er32(GENCTL), er32(INTSTAT));
839 (int)inl(ioaddr + INTSTAT));
840} 834}
841 835
842static void check_media(struct net_device *dev) 836static void check_media(struct net_device *dev)
843{ 837{
844 struct epic_private *ep = netdev_priv(dev); 838 struct epic_private *ep = netdev_priv(dev);
845 long ioaddr = dev->base_addr; 839 void __iomem *ioaddr = ep->ioaddr;
846 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0; 840 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
847 int negotiated = mii_lpa & ep->mii.advertising; 841 int negotiated = mii_lpa & ep->mii.advertising;
848 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040; 842 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
@@ -856,7 +850,7 @@ static void check_media(struct net_device *dev)
856 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link" 850 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
857 " partner capability of %4.4x.\n", dev->name, 851 " partner capability of %4.4x.\n", dev->name,
858 ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa); 852 ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
859 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl); 853 ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
860 } 854 }
861} 855}
862 856
@@ -864,16 +858,15 @@ static void epic_timer(unsigned long data)
864{ 858{
865 struct net_device *dev = (struct net_device *)data; 859 struct net_device *dev = (struct net_device *)data;
866 struct epic_private *ep = netdev_priv(dev); 860 struct epic_private *ep = netdev_priv(dev);
867 long ioaddr = dev->base_addr; 861 void __iomem *ioaddr = ep->ioaddr;
868 int next_tick = 5*HZ; 862 int next_tick = 5*HZ;
869 863
870 if (debug > 3) { 864 if (debug > 3) {
871 printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n", 865 printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
872 dev->name, (int)inl(ioaddr + TxSTAT)); 866 dev->name, er32(TxSTAT));
873 printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x " 867 printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
874 "IntStatus %4.4x RxStatus %4.4x.\n", 868 "IntStatus %4.4x RxStatus %4.4x.\n", dev->name,
875 dev->name, (int)inl(ioaddr + INTMASK), 869 er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
876 (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
877 } 870 }
878 871
879 check_media(dev); 872 check_media(dev);
@@ -885,23 +878,22 @@ static void epic_timer(unsigned long data)
885static void epic_tx_timeout(struct net_device *dev) 878static void epic_tx_timeout(struct net_device *dev)
886{ 879{
887 struct epic_private *ep = netdev_priv(dev); 880 struct epic_private *ep = netdev_priv(dev);
888 long ioaddr = dev->base_addr; 881 void __iomem *ioaddr = ep->ioaddr;
889 882
890 if (debug > 0) { 883 if (debug > 0) {
891 printk(KERN_WARNING "%s: Transmit timeout using MII device, " 884 printk(KERN_WARNING "%s: Transmit timeout using MII device, "
892 "Tx status %4.4x.\n", 885 "Tx status %4.4x.\n", dev->name, er16(TxSTAT));
893 dev->name, (int)inw(ioaddr + TxSTAT));
894 if (debug > 1) { 886 if (debug > 1) {
895 printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n", 887 printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
896 dev->name, ep->dirty_tx, ep->cur_tx); 888 dev->name, ep->dirty_tx, ep->cur_tx);
897 } 889 }
898 } 890 }
899 if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */ 891 if (er16(TxSTAT) & 0x10) { /* Tx FIFO underflow. */
900 dev->stats.tx_fifo_errors++; 892 dev->stats.tx_fifo_errors++;
901 outl(RestartTx, ioaddr + COMMAND); 893 ew32(COMMAND, RestartTx);
902 } else { 894 } else {
903 epic_restart(dev); 895 epic_restart(dev);
904 outl(TxQueued, dev->base_addr + COMMAND); 896 ew32(COMMAND, TxQueued);
905 } 897 }
906 898
907 dev->trans_start = jiffies; /* prevent tx timeout */ 899 dev->trans_start = jiffies; /* prevent tx timeout */
@@ -959,6 +951,7 @@ static void epic_init_ring(struct net_device *dev)
959static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev) 951static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
960{ 952{
961 struct epic_private *ep = netdev_priv(dev); 953 struct epic_private *ep = netdev_priv(dev);
954 void __iomem *ioaddr = ep->ioaddr;
962 int entry, free_count; 955 int entry, free_count;
963 u32 ctrl_word; 956 u32 ctrl_word;
964 unsigned long flags; 957 unsigned long flags;
@@ -999,13 +992,12 @@ static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
999 992
1000 spin_unlock_irqrestore(&ep->lock, flags); 993 spin_unlock_irqrestore(&ep->lock, flags);
1001 /* Trigger an immediate transmit demand. */ 994 /* Trigger an immediate transmit demand. */
1002 outl(TxQueued, dev->base_addr + COMMAND); 995 ew32(COMMAND, TxQueued);
1003 996
1004 if (debug > 4) 997 if (debug > 4)
1005 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, " 998 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
1006 "flag %2.2x Tx status %8.8x.\n", 999 "flag %2.2x Tx status %8.8x.\n", dev->name, skb->len,
1007 dev->name, (int)skb->len, entry, ctrl_word, 1000 entry, ctrl_word, er32(TxSTAT));
1008 (int)inl(dev->base_addr + TxSTAT));
1009 1001
1010 return NETDEV_TX_OK; 1002 return NETDEV_TX_OK;
1011} 1003}
@@ -1086,18 +1078,17 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1086{ 1078{
1087 struct net_device *dev = dev_instance; 1079 struct net_device *dev = dev_instance;
1088 struct epic_private *ep = netdev_priv(dev); 1080 struct epic_private *ep = netdev_priv(dev);
1089 long ioaddr = dev->base_addr; 1081 void __iomem *ioaddr = ep->ioaddr;
1090 unsigned int handled = 0; 1082 unsigned int handled = 0;
1091 int status; 1083 int status;
1092 1084
1093 status = inl(ioaddr + INTSTAT); 1085 status = er32(INTSTAT);
1094 /* Acknowledge all of the current interrupt sources ASAP. */ 1086 /* Acknowledge all of the current interrupt sources ASAP. */
1095 outl(status & EpicNormalEvent, ioaddr + INTSTAT); 1087 ew32(INTSTAT, status & EpicNormalEvent);
1096 1088
1097 if (debug > 4) { 1089 if (debug > 4) {
1098 printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new " 1090 printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1099 "intstat=%#8.8x.\n", dev->name, status, 1091 "intstat=%#8.8x.\n", dev->name, status, er32(INTSTAT));
1100 (int)inl(ioaddr + INTSTAT));
1101 } 1092 }
1102 1093
1103 if ((status & IntrSummary) == 0) 1094 if ((status & IntrSummary) == 0)
@@ -1118,19 +1109,21 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1118 1109
1119 /* Check uncommon events all at once. */ 1110 /* Check uncommon events all at once. */
1120 if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) { 1111 if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1112 struct net_device_stats *stats = &dev->stats;
1113
1121 if (status == EpicRemoved) 1114 if (status == EpicRemoved)
1122 goto out; 1115 goto out;
1123 1116
1124 /* Always update the error counts to avoid overhead later. */ 1117 /* Always update the error counts to avoid overhead later. */
1125 dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); 1118 stats->rx_missed_errors += er8(MPCNT);
1126 dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); 1119 stats->rx_frame_errors += er8(ALICNT);
1127 dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); 1120 stats->rx_crc_errors += er8(CRCCNT);
1128 1121
1129 if (status & TxUnderrun) { /* Tx FIFO underflow. */ 1122 if (status & TxUnderrun) { /* Tx FIFO underflow. */
1130 dev->stats.tx_fifo_errors++; 1123 stats->tx_fifo_errors++;
1131 outl(ep->tx_threshold += 128, ioaddr + TxThresh); 1124 ew32(TxThresh, ep->tx_threshold += 128);
1132 /* Restart the transmit process. */ 1125 /* Restart the transmit process. */
1133 outl(RestartTx, ioaddr + COMMAND); 1126 ew32(COMMAND, RestartTx);
1134 } 1127 }
1135 if (status & PCIBusErr170) { 1128 if (status & PCIBusErr170) {
1136 printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n", 1129 printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
@@ -1139,7 +1132,7 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1139 epic_restart(dev); 1132 epic_restart(dev);
1140 } 1133 }
1141 /* Clear all error sources. */ 1134 /* Clear all error sources. */
1142 outl(status & 0x7f18, ioaddr + INTSTAT); 1135 ew32(INTSTAT, status & 0x7f18);
1143 } 1136 }
1144 1137
1145out: 1138out:
@@ -1248,17 +1241,17 @@ static int epic_rx(struct net_device *dev, int budget)
1248 1241
1249static void epic_rx_err(struct net_device *dev, struct epic_private *ep) 1242static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1250{ 1243{
1251 long ioaddr = dev->base_addr; 1244 void __iomem *ioaddr = ep->ioaddr;
1252 int status; 1245 int status;
1253 1246
1254 status = inl(ioaddr + INTSTAT); 1247 status = er32(INTSTAT);
1255 1248
1256 if (status == EpicRemoved) 1249 if (status == EpicRemoved)
1257 return; 1250 return;
1258 if (status & RxOverflow) /* Missed a Rx frame. */ 1251 if (status & RxOverflow) /* Missed a Rx frame. */
1259 dev->stats.rx_errors++; 1252 dev->stats.rx_errors++;
1260 if (status & (RxOverflow | RxFull)) 1253 if (status & (RxOverflow | RxFull))
1261 outw(RxQueued, ioaddr + COMMAND); 1254 ew16(COMMAND, RxQueued);
1262} 1255}
1263 1256
1264static int epic_poll(struct napi_struct *napi, int budget) 1257static int epic_poll(struct napi_struct *napi, int budget)
@@ -1266,7 +1259,7 @@ static int epic_poll(struct napi_struct *napi, int budget)
1266 struct epic_private *ep = container_of(napi, struct epic_private, napi); 1259 struct epic_private *ep = container_of(napi, struct epic_private, napi);
1267 struct net_device *dev = ep->mii.dev; 1260 struct net_device *dev = ep->mii.dev;
1268 int work_done = 0; 1261 int work_done = 0;
1269 long ioaddr = dev->base_addr; 1262 void __iomem *ioaddr = ep->ioaddr;
1270 1263
1271rx_action: 1264rx_action:
1272 1265
@@ -1287,7 +1280,7 @@ rx_action:
1287 more = ep->reschedule_in_poll; 1280 more = ep->reschedule_in_poll;
1288 if (!more) { 1281 if (!more) {
1289 __napi_complete(napi); 1282 __napi_complete(napi);
1290 outl(EpicNapiEvent, ioaddr + INTSTAT); 1283 ew32(INTSTAT, EpicNapiEvent);
1291 epic_napi_irq_on(dev, ep); 1284 epic_napi_irq_on(dev, ep);
1292 } else 1285 } else
1293 ep->reschedule_in_poll--; 1286 ep->reschedule_in_poll--;
@@ -1303,8 +1296,9 @@ rx_action:
1303 1296
1304static int epic_close(struct net_device *dev) 1297static int epic_close(struct net_device *dev)
1305{ 1298{
1306 long ioaddr = dev->base_addr;
1307 struct epic_private *ep = netdev_priv(dev); 1299 struct epic_private *ep = netdev_priv(dev);
1300 struct pci_dev *pdev = ep->pci_dev;
1301 void __iomem *ioaddr = ep->ioaddr;
1308 struct sk_buff *skb; 1302 struct sk_buff *skb;
1309 int i; 1303 int i;
1310 1304
@@ -1313,13 +1307,13 @@ static int epic_close(struct net_device *dev)
1313 1307
1314 if (debug > 1) 1308 if (debug > 1)
1315 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", 1309 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1316 dev->name, (int)inl(ioaddr + INTSTAT)); 1310 dev->name, er32(INTSTAT));
1317 1311
1318 del_timer_sync(&ep->timer); 1312 del_timer_sync(&ep->timer);
1319 1313
1320 epic_disable_int(dev, ep); 1314 epic_disable_int(dev, ep);
1321 1315
1322 free_irq(dev->irq, dev); 1316 free_irq(pdev->irq, dev);
1323 1317
1324 epic_pause(dev); 1318 epic_pause(dev);
1325 1319
@@ -1330,7 +1324,7 @@ static int epic_close(struct net_device *dev)
1330 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */ 1324 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
1331 ep->rx_ring[i].buflength = 0; 1325 ep->rx_ring[i].buflength = 0;
1332 if (skb) { 1326 if (skb) {
1333 pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr, 1327 pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
1334 ep->rx_buf_sz, PCI_DMA_FROMDEVICE); 1328 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1335 dev_kfree_skb(skb); 1329 dev_kfree_skb(skb);
1336 } 1330 }
@@ -1341,26 +1335,28 @@ static int epic_close(struct net_device *dev)
1341 ep->tx_skbuff[i] = NULL; 1335 ep->tx_skbuff[i] = NULL;
1342 if (!skb) 1336 if (!skb)
1343 continue; 1337 continue;
1344 pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr, 1338 pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len,
1345 skb->len, PCI_DMA_TODEVICE); 1339 PCI_DMA_TODEVICE);
1346 dev_kfree_skb(skb); 1340 dev_kfree_skb(skb);
1347 } 1341 }
1348 1342
1349 /* Green! Leave the chip in low-power mode. */ 1343 /* Green! Leave the chip in low-power mode. */
1350 outl(0x0008, ioaddr + GENCTL); 1344 ew32(GENCTL, 0x0008);
1351 1345
1352 return 0; 1346 return 0;
1353} 1347}
1354 1348
1355static struct net_device_stats *epic_get_stats(struct net_device *dev) 1349static struct net_device_stats *epic_get_stats(struct net_device *dev)
1356{ 1350{
1357 long ioaddr = dev->base_addr; 1351 struct epic_private *ep = netdev_priv(dev);
1352 void __iomem *ioaddr = ep->ioaddr;
1358 1353
1359 if (netif_running(dev)) { 1354 if (netif_running(dev)) {
1360 /* Update the error counts. */ 1355 struct net_device_stats *stats = &dev->stats;
1361 dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); 1356
1362 dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); 1357 stats->rx_missed_errors += er8(MPCNT);
1363 dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); 1358 stats->rx_frame_errors += er8(ALICNT);
1359 stats->rx_crc_errors += er8(CRCCNT);
1364 } 1360 }
1365 1361
1366 return &dev->stats; 1362 return &dev->stats;
@@ -1373,13 +1369,13 @@ static struct net_device_stats *epic_get_stats(struct net_device *dev)
1373 1369
1374static void set_rx_mode(struct net_device *dev) 1370static void set_rx_mode(struct net_device *dev)
1375{ 1371{
1376 long ioaddr = dev->base_addr;
1377 struct epic_private *ep = netdev_priv(dev); 1372 struct epic_private *ep = netdev_priv(dev);
1373 void __iomem *ioaddr = ep->ioaddr;
1378 unsigned char mc_filter[8]; /* Multicast hash filter */ 1374 unsigned char mc_filter[8]; /* Multicast hash filter */
1379 int i; 1375 int i;
1380 1376
1381 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1377 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1382 outl(0x002C, ioaddr + RxCtrl); 1378 ew32(RxCtrl, 0x002c);
1383 /* Unconditionally log net taps. */ 1379 /* Unconditionally log net taps. */
1384 memset(mc_filter, 0xff, sizeof(mc_filter)); 1380 memset(mc_filter, 0xff, sizeof(mc_filter));
1385 } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) { 1381 } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
@@ -1387,9 +1383,9 @@ static void set_rx_mode(struct net_device *dev)
1387 is never enabled. */ 1383 is never enabled. */
1388 /* Too many to filter perfectly -- accept all multicasts. */ 1384 /* Too many to filter perfectly -- accept all multicasts. */
1389 memset(mc_filter, 0xff, sizeof(mc_filter)); 1385 memset(mc_filter, 0xff, sizeof(mc_filter));
1390 outl(0x000C, ioaddr + RxCtrl); 1386 ew32(RxCtrl, 0x000c);
1391 } else if (netdev_mc_empty(dev)) { 1387 } else if (netdev_mc_empty(dev)) {
1392 outl(0x0004, ioaddr + RxCtrl); 1388 ew32(RxCtrl, 0x0004);
1393 return; 1389 return;
1394 } else { /* Never executed, for now. */ 1390 } else { /* Never executed, for now. */
1395 struct netdev_hw_addr *ha; 1391 struct netdev_hw_addr *ha;
@@ -1404,7 +1400,7 @@ static void set_rx_mode(struct net_device *dev)
1404 /* ToDo: perhaps we need to stop the Tx and Rx process here? */ 1400 /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1405 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) { 1401 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1406 for (i = 0; i < 4; i++) 1402 for (i = 0; i < 4; i++)
1407 outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4); 1403 ew16(MC0 + i*4, ((u16 *)mc_filter)[i]);
1408 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter)); 1404 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1409 } 1405 }
1410} 1406}
@@ -1466,22 +1462,26 @@ static void netdev_set_msglevel(struct net_device *dev, u32 value)
1466 1462
1467static int ethtool_begin(struct net_device *dev) 1463static int ethtool_begin(struct net_device *dev)
1468{ 1464{
1469 unsigned long ioaddr = dev->base_addr; 1465 struct epic_private *ep = netdev_priv(dev);
1466 void __iomem *ioaddr = ep->ioaddr;
1467
1470 /* power-up, if interface is down */ 1468 /* power-up, if interface is down */
1471 if (! netif_running(dev)) { 1469 if (!netif_running(dev)) {
1472 outl(0x0200, ioaddr + GENCTL); 1470 ew32(GENCTL, 0x0200);
1473 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); 1471 ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1474 } 1472 }
1475 return 0; 1473 return 0;
1476} 1474}
1477 1475
1478static void ethtool_complete(struct net_device *dev) 1476static void ethtool_complete(struct net_device *dev)
1479{ 1477{
1480 unsigned long ioaddr = dev->base_addr; 1478 struct epic_private *ep = netdev_priv(dev);
1479 void __iomem *ioaddr = ep->ioaddr;
1480
1481 /* power-down, if interface is down */ 1481 /* power-down, if interface is down */
1482 if (! netif_running(dev)) { 1482 if (!netif_running(dev)) {
1483 outl(0x0008, ioaddr + GENCTL); 1483 ew32(GENCTL, 0x0008);
1484 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL); 1484 ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1485 } 1485 }
1486} 1486}
1487 1487
@@ -1500,14 +1500,14 @@ static const struct ethtool_ops netdev_ethtool_ops = {
1500static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1500static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1501{ 1501{
1502 struct epic_private *np = netdev_priv(dev); 1502 struct epic_private *np = netdev_priv(dev);
1503 long ioaddr = dev->base_addr; 1503 void __iomem *ioaddr = np->ioaddr;
1504 struct mii_ioctl_data *data = if_mii(rq); 1504 struct mii_ioctl_data *data = if_mii(rq);
1505 int rc; 1505 int rc;
1506 1506
1507 /* power-up, if interface is down */ 1507 /* power-up, if interface is down */
1508 if (! netif_running(dev)) { 1508 if (! netif_running(dev)) {
1509 outl(0x0200, ioaddr + GENCTL); 1509 ew32(GENCTL, 0x0200);
1510 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); 1510 ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1511 } 1511 }
1512 1512
1513 /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */ 1513 /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
@@ -1517,14 +1517,14 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1517 1517
1518 /* power-down, if interface is down */ 1518 /* power-down, if interface is down */
1519 if (! netif_running(dev)) { 1519 if (! netif_running(dev)) {
1520 outl(0x0008, ioaddr + GENCTL); 1520 ew32(GENCTL, 0x0008);
1521 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL); 1521 ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1522 } 1522 }
1523 return rc; 1523 return rc;
1524} 1524}
1525 1525
1526 1526
1527static void __devexit epic_remove_one (struct pci_dev *pdev) 1527static void __devexit epic_remove_one(struct pci_dev *pdev)
1528{ 1528{
1529 struct net_device *dev = pci_get_drvdata(pdev); 1529 struct net_device *dev = pci_get_drvdata(pdev);
1530 struct epic_private *ep = netdev_priv(dev); 1530 struct epic_private *ep = netdev_priv(dev);
@@ -1532,9 +1532,7 @@ static void __devexit epic_remove_one (struct pci_dev *pdev)
1532 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); 1532 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1533 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); 1533 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1534 unregister_netdev(dev); 1534 unregister_netdev(dev);
1535#ifndef USE_IO_OPS 1535 pci_iounmap(pdev, ep->ioaddr);
1536 iounmap((void*) dev->base_addr);
1537#endif
1538 pci_release_regions(pdev); 1536 pci_release_regions(pdev);
1539 free_netdev(dev); 1537 free_netdev(dev);
1540 pci_disable_device(pdev); 1538 pci_disable_device(pdev);
@@ -1548,13 +1546,14 @@ static void __devexit epic_remove_one (struct pci_dev *pdev)
1548static int epic_suspend (struct pci_dev *pdev, pm_message_t state) 1546static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1549{ 1547{
1550 struct net_device *dev = pci_get_drvdata(pdev); 1548 struct net_device *dev = pci_get_drvdata(pdev);
1551 long ioaddr = dev->base_addr; 1549 struct epic_private *ep = netdev_priv(dev);
1550 void __iomem *ioaddr = ep->ioaddr;
1552 1551
1553 if (!netif_running(dev)) 1552 if (!netif_running(dev))
1554 return 0; 1553 return 0;
1555 epic_pause(dev); 1554 epic_pause(dev);
1556 /* Put the chip into low-power mode. */ 1555 /* Put the chip into low-power mode. */
1557 outl(0x0008, ioaddr + GENCTL); 1556 ew32(GENCTL, 0x0008);
1558 /* pci_power_off(pdev, -1); */ 1557 /* pci_power_off(pdev, -1); */
1559 return 0; 1558 return 0;
1560} 1559}
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 4a6971027076..dab9c6f671ec 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1166,10 +1166,8 @@ smsc911x_rx_counterrors(struct net_device *dev, unsigned int rxstat)
1166 1166
1167/* Quickly dumps bad packets */ 1167/* Quickly dumps bad packets */
1168static void 1168static void
1169smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes) 1169smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktwords)
1170{ 1170{
1171 unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2;
1172
1173 if (likely(pktwords >= 4)) { 1171 if (likely(pktwords >= 4)) {
1174 unsigned int timeout = 500; 1172 unsigned int timeout = 500;
1175 unsigned int val; 1173 unsigned int val;
@@ -1233,7 +1231,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
1233 continue; 1231 continue;
1234 } 1232 }
1235 1233
1236 skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN); 1234 skb = netdev_alloc_skb(dev, pktwords << 2);
1237 if (unlikely(!skb)) { 1235 if (unlikely(!skb)) {
1238 SMSC_WARN(pdata, rx_err, 1236 SMSC_WARN(pdata, rx_err,
1239 "Unable to allocate skb for rx packet"); 1237 "Unable to allocate skb for rx packet");
@@ -1243,14 +1241,12 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
1243 break; 1241 break;
1244 } 1242 }
1245 1243
1246 skb->data = skb->head; 1244 pdata->ops->rx_readfifo(pdata,
1247 skb_reset_tail_pointer(skb); 1245 (unsigned int *)skb->data, pktwords);
1248 1246
1249 /* Align IP on 16B boundary */ 1247 /* Align IP on 16B boundary */
1250 skb_reserve(skb, NET_IP_ALIGN); 1248 skb_reserve(skb, NET_IP_ALIGN);
1251 skb_put(skb, pktlength - 4); 1249 skb_put(skb, pktlength - 4);
1252 pdata->ops->rx_readfifo(pdata,
1253 (unsigned int *)skb->head, pktwords);
1254 skb->protocol = eth_type_trans(skb, dev); 1250 skb->protocol = eth_type_trans(skb, dev);
1255 skb_checksum_none_assert(skb); 1251 skb_checksum_none_assert(skb);
1256 netif_receive_skb(skb); 1252 netif_receive_skb(skb);
@@ -1565,7 +1561,7 @@ static int smsc911x_open(struct net_device *dev)
1565 smsc911x_reg_write(pdata, FIFO_INT, temp); 1561 smsc911x_reg_write(pdata, FIFO_INT, temp);
1566 1562
1567 /* set RX Data offset to 2 bytes for alignment */ 1563 /* set RX Data offset to 2 bytes for alignment */
1568 smsc911x_reg_write(pdata, RX_CFG, (2 << 8)); 1564 smsc911x_reg_write(pdata, RX_CFG, (NET_IP_ALIGN << 8));
1569 1565
1570 /* enable NAPI polling before enabling RX interrupts */ 1566 /* enable NAPI polling before enabling RX interrupts */
1571 napi_enable(&pdata->napi); 1567 napi_enable(&pdata->napi);
@@ -2070,6 +2066,7 @@ static const struct ethtool_ops smsc911x_ethtool_ops = {
2070 .get_eeprom_len = smsc911x_ethtool_get_eeprom_len, 2066 .get_eeprom_len = smsc911x_ethtool_get_eeprom_len,
2071 .get_eeprom = smsc911x_ethtool_get_eeprom, 2067 .get_eeprom = smsc911x_ethtool_get_eeprom,
2072 .set_eeprom = smsc911x_ethtool_set_eeprom, 2068 .set_eeprom = smsc911x_ethtool_set_eeprom,
2069 .get_ts_info = ethtool_op_get_ts_info,
2073}; 2070};
2074 2071
2075static const struct net_device_ops smsc911x_netdev_ops = { 2072static const struct net_device_ops smsc911x_netdev_ops = {
@@ -2382,7 +2379,6 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2382 SET_NETDEV_DEV(dev, &pdev->dev); 2379 SET_NETDEV_DEV(dev, &pdev->dev);
2383 2380
2384 pdata = netdev_priv(dev); 2381 pdata = netdev_priv(dev);
2385
2386 dev->irq = irq_res->start; 2382 dev->irq = irq_res->start;
2387 irq_flags = irq_res->flags & IRQF_TRIGGER_MASK; 2383 irq_flags = irq_res->flags & IRQF_TRIGGER_MASK;
2388 pdata->ioaddr = ioremap_nocache(res->start, res_size); 2384 pdata->ioaddr = ioremap_nocache(res->start, res_size);
@@ -2446,7 +2442,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2446 if (retval) { 2442 if (retval) {
2447 SMSC_WARN(pdata, probe, 2443 SMSC_WARN(pdata, probe,
2448 "Unable to claim requested irq: %d", dev->irq); 2444 "Unable to claim requested irq: %d", dev->irq);
2449 goto out_free_irq; 2445 goto out_disable_resources;
2450 } 2446 }
2451 2447
2452 retval = register_netdev(dev); 2448 retval = register_netdev(dev);
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 38386478532b..fd33b21f6c96 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -54,7 +54,7 @@ struct smsc9420_ring_info {
54}; 54};
55 55
56struct smsc9420_pdata { 56struct smsc9420_pdata {
57 void __iomem *base_addr; 57 void __iomem *ioaddr;
58 struct pci_dev *pdev; 58 struct pci_dev *pdev;
59 struct net_device *dev; 59 struct net_device *dev;
60 60
@@ -114,13 +114,13 @@ do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
114 114
115static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset) 115static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset)
116{ 116{
117 return ioread32(pd->base_addr + offset); 117 return ioread32(pd->ioaddr + offset);
118} 118}
119 119
120static inline void 120static inline void
121smsc9420_reg_write(struct smsc9420_pdata *pd, u32 offset, u32 value) 121smsc9420_reg_write(struct smsc9420_pdata *pd, u32 offset, u32 value)
122{ 122{
123 iowrite32(value, pd->base_addr + offset); 123 iowrite32(value, pd->ioaddr + offset);
124} 124}
125 125
126static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd) 126static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd)
@@ -469,6 +469,7 @@ static const struct ethtool_ops smsc9420_ethtool_ops = {
469 .set_eeprom = smsc9420_ethtool_set_eeprom, 469 .set_eeprom = smsc9420_ethtool_set_eeprom,
470 .get_regs_len = smsc9420_ethtool_getregslen, 470 .get_regs_len = smsc9420_ethtool_getregslen,
471 .get_regs = smsc9420_ethtool_getregs, 471 .get_regs = smsc9420_ethtool_getregs,
472 .get_ts_info = ethtool_op_get_ts_info,
472}; 473};
473 474
474/* Sets the device MAC address to dev_addr */ 475/* Sets the device MAC address to dev_addr */
@@ -659,7 +660,7 @@ static irqreturn_t smsc9420_isr(int irq, void *dev_id)
659 ulong flags; 660 ulong flags;
660 661
661 BUG_ON(!pd); 662 BUG_ON(!pd);
662 BUG_ON(!pd->base_addr); 663 BUG_ON(!pd->ioaddr);
663 664
664 int_cfg = smsc9420_reg_read(pd, INT_CFG); 665 int_cfg = smsc9420_reg_read(pd, INT_CFG);
665 666
@@ -720,9 +721,12 @@ static irqreturn_t smsc9420_isr(int irq, void *dev_id)
720#ifdef CONFIG_NET_POLL_CONTROLLER 721#ifdef CONFIG_NET_POLL_CONTROLLER
721static void smsc9420_poll_controller(struct net_device *dev) 722static void smsc9420_poll_controller(struct net_device *dev)
722{ 723{
723 disable_irq(dev->irq); 724 struct smsc9420_pdata *pd = netdev_priv(dev);
725 const int irq = pd->pdev->irq;
726
727 disable_irq(irq);
724 smsc9420_isr(0, dev); 728 smsc9420_isr(0, dev);
725 enable_irq(dev->irq); 729 enable_irq(irq);
726} 730}
727#endif /* CONFIG_NET_POLL_CONTROLLER */ 731#endif /* CONFIG_NET_POLL_CONTROLLER */
728 732
@@ -759,7 +763,7 @@ static int smsc9420_stop(struct net_device *dev)
759 smsc9420_stop_rx(pd); 763 smsc9420_stop_rx(pd);
760 smsc9420_free_rx_ring(pd); 764 smsc9420_free_rx_ring(pd);
761 765
762 free_irq(dev->irq, pd); 766 free_irq(pd->pdev->irq, pd);
763 767
764 smsc9420_dmac_soft_reset(pd); 768 smsc9420_dmac_soft_reset(pd);
765 769
@@ -1331,15 +1335,12 @@ out:
1331 1335
1332static int smsc9420_open(struct net_device *dev) 1336static int smsc9420_open(struct net_device *dev)
1333{ 1337{
1334 struct smsc9420_pdata *pd; 1338 struct smsc9420_pdata *pd = netdev_priv(dev);
1335 u32 bus_mode, mac_cr, dmac_control, int_cfg, dma_intr_ena, int_ctl; 1339 u32 bus_mode, mac_cr, dmac_control, int_cfg, dma_intr_ena, int_ctl;
1340 const int irq = pd->pdev->irq;
1336 unsigned long flags; 1341 unsigned long flags;
1337 int result = 0, timeout; 1342 int result = 0, timeout;
1338 1343
1339 BUG_ON(!dev);
1340 pd = netdev_priv(dev);
1341 BUG_ON(!pd);
1342
1343 if (!is_valid_ether_addr(dev->dev_addr)) { 1344 if (!is_valid_ether_addr(dev->dev_addr)) {
1344 smsc_warn(IFUP, "dev_addr is not a valid MAC address"); 1345 smsc_warn(IFUP, "dev_addr is not a valid MAC address");
1345 result = -EADDRNOTAVAIL; 1346 result = -EADDRNOTAVAIL;
@@ -1358,9 +1359,10 @@ static int smsc9420_open(struct net_device *dev)
1358 smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF); 1359 smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF);
1359 smsc9420_pci_flush_write(pd); 1360 smsc9420_pci_flush_write(pd);
1360 1361
1361 if (request_irq(dev->irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED, 1362 result = request_irq(irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED,
1362 DRV_NAME, pd)) { 1363 DRV_NAME, pd);
1363 smsc_warn(IFUP, "Unable to use IRQ = %d", dev->irq); 1364 if (result) {
1365 smsc_warn(IFUP, "Unable to use IRQ = %d", irq);
1364 result = -ENODEV; 1366 result = -ENODEV;
1365 goto out_0; 1367 goto out_0;
1366 } 1368 }
@@ -1395,7 +1397,7 @@ static int smsc9420_open(struct net_device *dev)
1395 smsc9420_pci_flush_write(pd); 1397 smsc9420_pci_flush_write(pd);
1396 1398
1397 /* test the IRQ connection to the ISR */ 1399 /* test the IRQ connection to the ISR */
1398 smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq); 1400 smsc_dbg(IFUP, "Testing ISR using IRQ %d", irq);
1399 pd->software_irq_signal = false; 1401 pd->software_irq_signal = false;
1400 1402
1401 spin_lock_irqsave(&pd->int_lock, flags); 1403 spin_lock_irqsave(&pd->int_lock, flags);
@@ -1430,7 +1432,7 @@ static int smsc9420_open(struct net_device *dev)
1430 goto out_free_irq_1; 1432 goto out_free_irq_1;
1431 } 1433 }
1432 1434
1433 smsc_dbg(IFUP, "ISR passed test using IRQ %d", dev->irq); 1435 smsc_dbg(IFUP, "ISR passed test using IRQ %d", irq);
1434 1436
1435 result = smsc9420_alloc_tx_ring(pd); 1437 result = smsc9420_alloc_tx_ring(pd);
1436 if (result) { 1438 if (result) {
@@ -1490,7 +1492,7 @@ out_free_rx_ring_3:
1490out_free_tx_ring_2: 1492out_free_tx_ring_2:
1491 smsc9420_free_tx_ring(pd); 1493 smsc9420_free_tx_ring(pd);
1492out_free_irq_1: 1494out_free_irq_1:
1493 free_irq(dev->irq, pd); 1495 free_irq(irq, pd);
1494out_0: 1496out_0:
1495 return result; 1497 return result;
1496} 1498}
@@ -1519,7 +1521,7 @@ static int smsc9420_suspend(struct pci_dev *pdev, pm_message_t state)
1519 smsc9420_stop_rx(pd); 1521 smsc9420_stop_rx(pd);
1520 smsc9420_free_rx_ring(pd); 1522 smsc9420_free_rx_ring(pd);
1521 1523
1522 free_irq(dev->irq, pd); 1524 free_irq(pd->pdev->irq, pd);
1523 1525
1524 netif_device_detach(dev); 1526 netif_device_detach(dev);
1525 } 1527 }
@@ -1552,6 +1554,7 @@ static int smsc9420_resume(struct pci_dev *pdev)
1552 smsc_warn(IFUP, "pci_enable_wake failed: %d", err); 1554 smsc_warn(IFUP, "pci_enable_wake failed: %d", err);
1553 1555
1554 if (netif_running(dev)) { 1556 if (netif_running(dev)) {
1557 /* FIXME: gross. It looks like ancient PM relic.*/
1555 err = smsc9420_open(dev); 1558 err = smsc9420_open(dev);
1556 netif_device_attach(dev); 1559 netif_device_attach(dev);
1557 } 1560 }
@@ -1625,8 +1628,6 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1625 /* registers are double mapped with 0 offset for LE and 0x200 for BE */ 1628 /* registers are double mapped with 0 offset for LE and 0x200 for BE */
1626 virt_addr += LAN9420_CPSR_ENDIAN_OFFSET; 1629 virt_addr += LAN9420_CPSR_ENDIAN_OFFSET;
1627 1630
1628 dev->base_addr = (ulong)virt_addr;
1629
1630 pd = netdev_priv(dev); 1631 pd = netdev_priv(dev);
1631 1632
1632 /* pci descriptors are created in the PCI consistent area */ 1633 /* pci descriptors are created in the PCI consistent area */
@@ -1646,7 +1647,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1646 1647
1647 pd->pdev = pdev; 1648 pd->pdev = pdev;
1648 pd->dev = dev; 1649 pd->dev = dev;
1649 pd->base_addr = virt_addr; 1650 pd->ioaddr = virt_addr;
1650 pd->msg_enable = smsc_debug; 1651 pd->msg_enable = smsc_debug;
1651 pd->rx_csum = true; 1652 pd->rx_csum = true;
1652 1653
@@ -1669,7 +1670,6 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1669 1670
1670 dev->netdev_ops = &smsc9420_netdev_ops; 1671 dev->netdev_ops = &smsc9420_netdev_ops;
1671 dev->ethtool_ops = &smsc9420_ethtool_ops; 1672 dev->ethtool_ops = &smsc9420_ethtool_ops;
1672 dev->irq = pdev->irq;
1673 1673
1674 netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT); 1674 netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT);
1675 1675
@@ -1727,7 +1727,7 @@ static void __devexit smsc9420_remove(struct pci_dev *pdev)
1727 pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) * 1727 pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) *
1728 (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr); 1728 (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr);
1729 1729
1730 iounmap(pd->base_addr - LAN9420_CPSR_ENDIAN_OFFSET); 1730 iounmap(pd->ioaddr - LAN9420_CPSR_ENDIAN_OFFSET);
1731 pci_release_regions(pdev); 1731 pci_release_regions(pdev);
1732 free_netdev(dev); 1732 free_netdev(dev);
1733 pci_disable_device(pdev); 1733 pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 0319d640f728..bcd54d6e94fd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -97,6 +97,16 @@ struct stmmac_extra_stats {
97 unsigned long normal_irq_n; 97 unsigned long normal_irq_n;
98}; 98};
99 99
100/* CSR Frequency Access Defines*/
101#define CSR_F_35M 35000000
102#define CSR_F_60M 60000000
103#define CSR_F_100M 100000000
104#define CSR_F_150M 150000000
105#define CSR_F_250M 250000000
106#define CSR_F_300M 300000000
107
108#define MAC_CSR_H_FRQ_MASK 0x20
109
100#define HASH_TABLE_SIZE 64 110#define HASH_TABLE_SIZE 64
101#define PAUSE_TIME 0x200 111#define PAUSE_TIME 0x200
102 112
@@ -137,6 +147,7 @@ struct stmmac_extra_stats {
137#define DMA_HW_FEAT_FLEXIPPSEN 0x04000000 /* Flexible PPS Output */ 147#define DMA_HW_FEAT_FLEXIPPSEN 0x04000000 /* Flexible PPS Output */
138#define DMA_HW_FEAT_SAVLANINS 0x08000000 /* Source Addr or VLAN Insertion */ 148#define DMA_HW_FEAT_SAVLANINS 0x08000000 /* Source Addr or VLAN Insertion */
139#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY interface */ 149#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY interface */
150#define DEFAULT_DMA_PBL 8
140 151
141enum rx_frame_status { /* IPC status */ 152enum rx_frame_status { /* IPC status */
142 good_frame = 0, 153 good_frame = 0,
@@ -228,7 +239,7 @@ struct stmmac_desc_ops {
228 int (*get_rx_owner) (struct dma_desc *p); 239 int (*get_rx_owner) (struct dma_desc *p);
229 void (*set_rx_owner) (struct dma_desc *p); 240 void (*set_rx_owner) (struct dma_desc *p);
230 /* Get the receive frame size */ 241 /* Get the receive frame size */
231 int (*get_rx_frame_len) (struct dma_desc *p); 242 int (*get_rx_frame_len) (struct dma_desc *p, int rx_coe_type);
232 /* Return the reception status looking at the RDES1 */ 243 /* Return the reception status looking at the RDES1 */
233 int (*rx_status) (void *data, struct stmmac_extra_stats *x, 244 int (*rx_status) (void *data, struct stmmac_extra_stats *x,
234 struct dma_desc *p); 245 struct dma_desc *p);
@@ -236,7 +247,8 @@ struct stmmac_desc_ops {
236 247
237struct stmmac_dma_ops { 248struct stmmac_dma_ops {
238 /* DMA core initialization */ 249 /* DMA core initialization */
239 int (*init) (void __iomem *ioaddr, int pbl, u32 dma_tx, u32 dma_rx); 250 int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb,
251 int burst_len, u32 dma_tx, u32 dma_rx);
240 /* Dump DMA registers */ 252 /* Dump DMA registers */
241 void (*dump_regs) (void __iomem *ioaddr); 253 void (*dump_regs) (void __iomem *ioaddr);
242 /* Set tx/rx threshold in the csr6 register 254 /* Set tx/rx threshold in the csr6 register
@@ -261,14 +273,14 @@ struct stmmac_dma_ops {
261struct stmmac_ops { 273struct stmmac_ops {
262 /* MAC core initialization */ 274 /* MAC core initialization */
263 void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned; 275 void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned;
264 /* Support checksum offload engine */ 276 /* Enable and verify that the IPC module is supported */
265 int (*rx_coe) (void __iomem *ioaddr); 277 int (*rx_ipc) (void __iomem *ioaddr);
266 /* Dump MAC registers */ 278 /* Dump MAC registers */
267 void (*dump_regs) (void __iomem *ioaddr); 279 void (*dump_regs) (void __iomem *ioaddr);
268 /* Handle extra events on specific interrupts hw dependent */ 280 /* Handle extra events on specific interrupts hw dependent */
269 void (*host_irq_status) (void __iomem *ioaddr); 281 void (*host_irq_status) (void __iomem *ioaddr);
270 /* Multicast filter setting */ 282 /* Multicast filter setting */
271 void (*set_filter) (struct net_device *dev); 283 void (*set_filter) (struct net_device *dev, int id);
272 /* Flow control setting */ 284 /* Flow control setting */
273 void (*flow_ctrl) (void __iomem *ioaddr, unsigned int duplex, 285 void (*flow_ctrl) (void __iomem *ioaddr, unsigned int duplex,
274 unsigned int fc, unsigned int pause_time); 286 unsigned int fc, unsigned int pause_time);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index cfcef0ea0fa5..23478bf4ed7a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -61,9 +61,11 @@ enum power_event {
61}; 61};
62 62
63/* GMAC HW ADDR regs */ 63/* GMAC HW ADDR regs */
64#define GMAC_ADDR_HIGH(reg) (0x00000040+(reg * 8)) 64#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
65#define GMAC_ADDR_LOW(reg) (0x00000044+(reg * 8)) 65 (reg * 8))
66#define GMAC_MAX_UNICAST_ADDRESSES 16 66#define GMAC_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \
67 (reg * 8))
68#define GMAC_MAX_PERFECT_ADDRESSES 32
67 69
68#define GMAC_AN_CTRL 0x000000c0 /* AN control */ 70#define GMAC_AN_CTRL 0x000000c0 /* AN control */
69#define GMAC_AN_STATUS 0x000000c4 /* AN status */ 71#define GMAC_AN_STATUS 0x000000c4 /* AN status */
@@ -139,10 +141,11 @@ enum rx_tx_priority_ratio {
139}; 141};
140 142
141#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */ 143#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
144#define DMA_BUS_MODE_MB 0x04000000 /* Mixed burst */
142#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */ 145#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
143#define DMA_BUS_MODE_RPBL_SHIFT 17 146#define DMA_BUS_MODE_RPBL_SHIFT 17
144#define DMA_BUS_MODE_USP 0x00800000 147#define DMA_BUS_MODE_USP 0x00800000
145#define DMA_BUS_MODE_4PBL 0x01000000 148#define DMA_BUS_MODE_PBL 0x01000000
146#define DMA_BUS_MODE_AAL 0x02000000 149#define DMA_BUS_MODE_AAL 0x02000000
147 150
148/* DMA CRS Control and Status Register Mapping */ 151/* DMA CRS Control and Status Register Mapping */
@@ -205,4 +208,7 @@ enum rtc_control {
205#define GMAC_MMC_TX_INTR 0x108 208#define GMAC_MMC_TX_INTR 0x108
206#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 209#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
207 210
211/* Synopsys Core versions */
212#define DWMAC_CORE_3_40 34
213
208extern const struct stmmac_dma_ops dwmac1000_dma_ops; 214extern const struct stmmac_dma_ops dwmac1000_dma_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index b1c48b975945..b5e4d02f15c9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -46,7 +46,7 @@ static void dwmac1000_core_init(void __iomem *ioaddr)
46#endif 46#endif
47} 47}
48 48
49static int dwmac1000_rx_coe_supported(void __iomem *ioaddr) 49static int dwmac1000_rx_ipc_enable(void __iomem *ioaddr)
50{ 50{
51 u32 value = readl(ioaddr + GMAC_CONTROL); 51 u32 value = readl(ioaddr + GMAC_CONTROL);
52 52
@@ -84,10 +84,11 @@ static void dwmac1000_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
84 GMAC_ADDR_LOW(reg_n)); 84 GMAC_ADDR_LOW(reg_n));
85} 85}
86 86
87static void dwmac1000_set_filter(struct net_device *dev) 87static void dwmac1000_set_filter(struct net_device *dev, int id)
88{ 88{
89 void __iomem *ioaddr = (void __iomem *) dev->base_addr; 89 void __iomem *ioaddr = (void __iomem *) dev->base_addr;
90 unsigned int value = 0; 90 unsigned int value = 0;
91 unsigned int perfect_addr_number;
91 92
92 CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n", 93 CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
93 __func__, netdev_mc_count(dev), netdev_uc_count(dev)); 94 __func__, netdev_mc_count(dev), netdev_uc_count(dev));
@@ -121,8 +122,14 @@ static void dwmac1000_set_filter(struct net_device *dev)
121 writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH); 122 writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
122 } 123 }
123 124
125 /* Extra 16 regs are available in cores newer than the 3.40. */
126 if (id > DWMAC_CORE_3_40)
127 perfect_addr_number = GMAC_MAX_PERFECT_ADDRESSES;
128 else
129 perfect_addr_number = GMAC_MAX_PERFECT_ADDRESSES / 2;
130
124 /* Handle multiple unicast addresses (perfect filtering)*/ 131 /* Handle multiple unicast addresses (perfect filtering)*/
125 if (netdev_uc_count(dev) > GMAC_MAX_UNICAST_ADDRESSES) 132 if (netdev_uc_count(dev) > perfect_addr_number)
126 /* Switch to promiscuous mode is more than 16 addrs 133 /* Switch to promiscuous mode is more than 16 addrs
127 are required */ 134 are required */
128 value |= GMAC_FRAME_FILTER_PR; 135 value |= GMAC_FRAME_FILTER_PR;
@@ -211,7 +218,7 @@ static void dwmac1000_irq_status(void __iomem *ioaddr)
211 218
212static const struct stmmac_ops dwmac1000_ops = { 219static const struct stmmac_ops dwmac1000_ops = {
213 .core_init = dwmac1000_core_init, 220 .core_init = dwmac1000_core_init,
214 .rx_coe = dwmac1000_rx_coe_supported, 221 .rx_ipc = dwmac1000_rx_ipc_enable,
215 .dump_regs = dwmac1000_dump_regs, 222 .dump_regs = dwmac1000_dump_regs,
216 .host_irq_status = dwmac1000_irq_status, 223 .host_irq_status = dwmac1000_irq_status,
217 .set_filter = dwmac1000_set_filter, 224 .set_filter = dwmac1000_set_filter,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 4d5402a1d262..033500090f55 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -30,8 +30,8 @@
30#include "dwmac1000.h" 30#include "dwmac1000.h"
31#include "dwmac_dma.h" 31#include "dwmac_dma.h"
32 32
33static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx, 33static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb,
34 u32 dma_rx) 34 int mb, int burst_len, u32 dma_tx, u32 dma_rx)
35{ 35{
36 u32 value = readl(ioaddr + DMA_BUS_MODE); 36 u32 value = readl(ioaddr + DMA_BUS_MODE);
37 int limit; 37 int limit;
@@ -48,15 +48,51 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
48 if (limit < 0) 48 if (limit < 0)
49 return -EBUSY; 49 return -EBUSY;
50 50
51 value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL | 51 /*
52 ((pbl << DMA_BUS_MODE_PBL_SHIFT) | 52 * Set the DMA PBL (Programmable Burst Length) mode
53 (pbl << DMA_BUS_MODE_RPBL_SHIFT)); 53 * Before stmmac core 3.50 this mode bit was 4xPBL, and
54 * post 3.5 mode bit acts as 8*PBL.
55 * For core rev < 3.5, when the core is set for 4xPBL mode, the
56 * DMA transfers the data in 4, 8, 16, 32, 64 & 128 beats
57 * depending on pbl value.
58 * For core rev > 3.5, when the core is set for 8xPBL mode, the
59 * DMA transfers the data in 8, 16, 32, 64, 128 & 256 beats
60 * depending on pbl value.
61 */
62 value = DMA_BUS_MODE_PBL | ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
63 (pbl << DMA_BUS_MODE_RPBL_SHIFT));
64
65 /* Set the Fixed burst mode */
66 if (fb)
67 value |= DMA_BUS_MODE_FB;
68
69 /* Mixed Burst has no effect when fb is set */
70 if (mb)
71 value |= DMA_BUS_MODE_MB;
54 72
55#ifdef CONFIG_STMMAC_DA 73#ifdef CONFIG_STMMAC_DA
56 value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */ 74 value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
57#endif 75#endif
58 writel(value, ioaddr + DMA_BUS_MODE); 76 writel(value, ioaddr + DMA_BUS_MODE);
59 77
78 /* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE
79 * for supported bursts.
80 *
81 * Note: This is applicable only for revision GMACv3.61a. For
82 * older version this register is reserved and shall have no
83 * effect.
84 *
85 * Note:
86 * For Fixed Burst Mode: if we directly write 0xFF to this
87 * register using the configurations pass from platform code,
88 * this would ensure that all bursts supported by core are set
89 * and those which are not supported would remain ineffective.
90 *
91 * For Non Fixed Burst Mode: provide the maximum value of the
92 * burst length. Any burst equal or below the provided burst
93 * length would be allowed to perform. */
94 writel(burst_len, ioaddr + DMA_AXI_BUS_MODE);
95
60 /* Mask interrupts by writing to CSR7 */ 96 /* Mask interrupts by writing to CSR7 */
61 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); 97 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
62 98
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 138fb8dd1e87..19e0f4eed2bc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -43,11 +43,6 @@ static void dwmac100_core_init(void __iomem *ioaddr)
43#endif 43#endif
44} 44}
45 45
46static int dwmac100_rx_coe_supported(void __iomem *ioaddr)
47{
48 return 0;
49}
50
51static void dwmac100_dump_mac_regs(void __iomem *ioaddr) 46static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
52{ 47{
53 pr_info("\t----------------------------------------------\n" 48 pr_info("\t----------------------------------------------\n"
@@ -72,6 +67,11 @@ static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
72 readl(ioaddr + MAC_VLAN2)); 67 readl(ioaddr + MAC_VLAN2));
73} 68}
74 69
70static int dwmac100_rx_ipc_enable(void __iomem *ioaddr)
71{
72 return 0;
73}
74
75static void dwmac100_irq_status(void __iomem *ioaddr) 75static void dwmac100_irq_status(void __iomem *ioaddr)
76{ 76{
77 return; 77 return;
@@ -89,7 +89,7 @@ static void dwmac100_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
89 stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); 89 stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
90} 90}
91 91
92static void dwmac100_set_filter(struct net_device *dev) 92static void dwmac100_set_filter(struct net_device *dev, int id)
93{ 93{
94 void __iomem *ioaddr = (void __iomem *) dev->base_addr; 94 void __iomem *ioaddr = (void __iomem *) dev->base_addr;
95 u32 value = readl(ioaddr + MAC_CONTROL); 95 u32 value = readl(ioaddr + MAC_CONTROL);
@@ -160,7 +160,7 @@ static void dwmac100_pmt(void __iomem *ioaddr, unsigned long mode)
160 160
161static const struct stmmac_ops dwmac100_ops = { 161static const struct stmmac_ops dwmac100_ops = {
162 .core_init = dwmac100_core_init, 162 .core_init = dwmac100_core_init,
163 .rx_coe = dwmac100_rx_coe_supported, 163 .rx_ipc = dwmac100_rx_ipc_enable,
164 .dump_regs = dwmac100_dump_mac_regs, 164 .dump_regs = dwmac100_dump_mac_regs,
165 .host_irq_status = dwmac100_irq_status, 165 .host_irq_status = dwmac100_irq_status,
166 .set_filter = dwmac100_set_filter, 166 .set_filter = dwmac100_set_filter,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index bc17fd08b55d..c2b4d55a79b6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -32,8 +32,8 @@
32#include "dwmac100.h" 32#include "dwmac100.h"
33#include "dwmac_dma.h" 33#include "dwmac_dma.h"
34 34
35static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx, 35static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb,
36 u32 dma_rx) 36 int mb, int burst_len, u32 dma_tx, u32 dma_rx)
37{ 37{
38 u32 value = readl(ioaddr + DMA_BUS_MODE); 38 u32 value = readl(ioaddr + DMA_BUS_MODE);
39 int limit; 39 int limit;
@@ -52,7 +52,7 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
52 52
53 /* Enable Application Access by writing to DMA CSR0 */ 53 /* Enable Application Access by writing to DMA CSR0 */
54 writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT), 54 writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
55 ioaddr + DMA_BUS_MODE); 55 ioaddr + DMA_BUS_MODE);
56 56
57 /* Mask interrupts by writing to CSR7 */ 57 /* Mask interrupts by writing to CSR7 */
58 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); 58 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 437edacd602e..6e0360f9cfde 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -32,6 +32,7 @@
32#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */ 32#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
33#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */ 33#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
34#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */ 34#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
35#define DMA_AXI_BUS_MODE 0x00001028 /* AXI Bus Mode */
35#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */ 36#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
36#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */ 37#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
37#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */ 38#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index f20aa12931d0..4e0e18a44fcc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -31,6 +31,8 @@
31#define DWMAC_LIB_DBG(fmt, args...) do { } while (0) 31#define DWMAC_LIB_DBG(fmt, args...) do { } while (0)
32#endif 32#endif
33 33
34#define GMAC_HI_REG_AE 0x80000000
35
34/* CSR1 enables the transmit DMA to check for new descriptor */ 36/* CSR1 enables the transmit DMA to check for new descriptor */
35void dwmac_enable_dma_transmission(void __iomem *ioaddr) 37void dwmac_enable_dma_transmission(void __iomem *ioaddr)
36{ 38{
@@ -233,7 +235,11 @@ void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
233 unsigned long data; 235 unsigned long data;
234 236
235 data = (addr[5] << 8) | addr[4]; 237 data = (addr[5] << 8) | addr[4];
236 writel(data, ioaddr + high); 238 /* For MAC Addr registers se have to set the Address Enable (AE)
239 * bit that has no effect on the High Reg 0 where the bit 31 (MO)
240 * is RO.
241 */
242 writel(data | GMAC_HI_REG_AE, ioaddr + high);
237 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; 243 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
238 writel(data, ioaddr + low); 244 writel(data, ioaddr + low);
239} 245}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index ad1b627f8ec2..2fc8ef95f97a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -22,6 +22,7 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#include <linux/stmmac.h>
25#include "common.h" 26#include "common.h"
26#include "descs_com.h" 27#include "descs_com.h"
27 28
@@ -309,9 +310,17 @@ static void enh_desc_close_tx_desc(struct dma_desc *p)
309 p->des01.etx.interrupt = 1; 310 p->des01.etx.interrupt = 1;
310} 311}
311 312
312static int enh_desc_get_rx_frame_len(struct dma_desc *p) 313static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
313{ 314{
314 return p->des01.erx.frame_length; 315 /* The type-1 checksum offload engines append the checksum at
316 * the end of frame and the two bytes of checksum are added in
317 * the length.
318 * Adjust for that in the framelen for type-1 checksum offload
319 * engines. */
320 if (rx_coe_type == STMMAC_RX_COE_TYPE1)
321 return p->des01.erx.frame_length - 2;
322 else
323 return p->des01.erx.frame_length;
315} 324}
316 325
317const struct stmmac_desc_ops enh_desc_ops = { 326const struct stmmac_desc_ops enh_desc_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 25953bb45a73..68962c549a2d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -22,6 +22,7 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#include <linux/stmmac.h>
25#include "common.h" 26#include "common.h"
26#include "descs_com.h" 27#include "descs_com.h"
27 28
@@ -201,9 +202,17 @@ static void ndesc_close_tx_desc(struct dma_desc *p)
201 p->des01.tx.interrupt = 1; 202 p->des01.tx.interrupt = 1;
202} 203}
203 204
204static int ndesc_get_rx_frame_len(struct dma_desc *p) 205static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
205{ 206{
206 return p->des01.rx.frame_length; 207 /* The type-1 checksum offload engines append the checksum at
208 * the end of frame and the two bytes of checksum are added in
209 * the length.
210 * Adjust for that in the framelen for type-1 checksum offload
211 * engines. */
212 if (rx_coe_type == STMMAC_RX_COE_TYPE1)
213 return p->des01.rx.frame_length - 2;
214 else
215 return p->des01.rx.frame_length;
207} 216}
208 217
209const struct stmmac_desc_ops ndesc_ops = { 218const struct stmmac_desc_ops ndesc_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index b4b095fdcf29..6b5d060ee9de 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -21,7 +21,9 @@
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#define STMMAC_RESOURCE_NAME "stmmaceth" 23#define STMMAC_RESOURCE_NAME "stmmaceth"
24#define DRV_MODULE_VERSION "Feb_2012" 24#define DRV_MODULE_VERSION "March_2012"
25
26#include <linux/clk.h>
25#include <linux/stmmac.h> 27#include <linux/stmmac.h>
26#include <linux/phy.h> 28#include <linux/phy.h>
27#include "common.h" 29#include "common.h"
@@ -56,8 +58,6 @@ struct stmmac_priv {
56 58
57 struct stmmac_extra_stats xstats; 59 struct stmmac_extra_stats xstats;
58 struct napi_struct napi; 60 struct napi_struct napi;
59
60 int rx_coe;
61 int no_csum_insertion; 61 int no_csum_insertion;
62 62
63 struct phy_device *phydev; 63 struct phy_device *phydev;
@@ -81,6 +81,11 @@ struct stmmac_priv {
81 struct stmmac_counters mmc; 81 struct stmmac_counters mmc;
82 struct dma_features dma_cap; 82 struct dma_features dma_cap;
83 int hw_cap_support; 83 int hw_cap_support;
84#ifdef CONFIG_HAVE_CLK
85 struct clk *stmmac_clk;
86#endif
87 int clk_csr;
88 int synopsys_id;
84}; 89};
85 90
86extern int phyaddr; 91extern int phyaddr;
@@ -99,3 +104,42 @@ int stmmac_dvr_remove(struct net_device *ndev);
99struct stmmac_priv *stmmac_dvr_probe(struct device *device, 104struct stmmac_priv *stmmac_dvr_probe(struct device *device,
100 struct plat_stmmacenet_data *plat_dat, 105 struct plat_stmmacenet_data *plat_dat,
101 void __iomem *addr); 106 void __iomem *addr);
107
108#ifdef CONFIG_HAVE_CLK
109static inline int stmmac_clk_enable(struct stmmac_priv *priv)
110{
111 if (!IS_ERR(priv->stmmac_clk))
112 return clk_enable(priv->stmmac_clk);
113
114 return 0;
115}
116
117static inline void stmmac_clk_disable(struct stmmac_priv *priv)
118{
119 if (IS_ERR(priv->stmmac_clk))
120 return;
121
122 clk_disable(priv->stmmac_clk);
123}
124static inline int stmmac_clk_get(struct stmmac_priv *priv)
125{
126 priv->stmmac_clk = clk_get(priv->device, NULL);
127
128 if (IS_ERR(priv->stmmac_clk))
129 return PTR_ERR(priv->stmmac_clk);
130
131 return 0;
132}
133#else
134static inline int stmmac_clk_enable(struct stmmac_priv *priv)
135{
136 return 0;
137}
138static inline void stmmac_clk_disable(struct stmmac_priv *priv)
139{
140}
141static inline int stmmac_clk_get(struct stmmac_priv *priv)
142{
143 return 0;
144}
145#endif /* CONFIG_HAVE_CLK */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index f98e1511660f..ce431846fc6f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -481,6 +481,7 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
481 .get_wol = stmmac_get_wol, 481 .get_wol = stmmac_get_wol,
482 .set_wol = stmmac_set_wol, 482 .set_wol = stmmac_set_wol,
483 .get_sset_count = stmmac_get_sset_count, 483 .get_sset_count = stmmac_get_sset_count,
484 .get_ts_info = ethtool_op_get_ts_info,
484}; 485};
485 486
486void stmmac_set_ethtool_ops(struct net_device *netdev) 487void stmmac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 48d56da62f08..70966330f44e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -163,6 +163,38 @@ static void stmmac_verify_args(void)
163 pause = PAUSE_TIME; 163 pause = PAUSE_TIME;
164} 164}
165 165
166static void stmmac_clk_csr_set(struct stmmac_priv *priv)
167{
168#ifdef CONFIG_HAVE_CLK
169 u32 clk_rate;
170
171 if (IS_ERR(priv->stmmac_clk))
172 return;
173
174 clk_rate = clk_get_rate(priv->stmmac_clk);
175
176 /* Platform provided default clk_csr would be assumed valid
177 * for all other cases except for the below mentioned ones. */
178 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
179 if (clk_rate < CSR_F_35M)
180 priv->clk_csr = STMMAC_CSR_20_35M;
181 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
182 priv->clk_csr = STMMAC_CSR_35_60M;
183 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
184 priv->clk_csr = STMMAC_CSR_60_100M;
185 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
186 priv->clk_csr = STMMAC_CSR_100_150M;
187 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
188 priv->clk_csr = STMMAC_CSR_150_250M;
189 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
190 priv->clk_csr = STMMAC_CSR_250_300M;
191 } /* For values higher than the IEEE 802.3 specified frequency
192 * we can not estimate the proper divider as it is not known
193 * the frequency of clk_csr_i. So we do not change the default
194 * divider. */
195#endif
196}
197
166#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG) 198#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
167static void print_pkt(unsigned char *buf, int len) 199static void print_pkt(unsigned char *buf, int len)
168{ 200{
@@ -307,7 +339,13 @@ static int stmmac_init_phy(struct net_device *dev)
307 priv->speed = 0; 339 priv->speed = 0;
308 priv->oldduplex = -1; 340 priv->oldduplex = -1;
309 341
310 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", priv->plat->bus_id); 342 if (priv->plat->phy_bus_name)
343 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
344 priv->plat->phy_bus_name, priv->plat->bus_id);
345 else
346 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
347 priv->plat->bus_id);
348
311 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 349 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
312 priv->plat->phy_addr); 350 priv->plat->phy_addr);
313 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); 351 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
@@ -884,6 +922,26 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
884 priv->dev->dev_addr); 922 priv->dev->dev_addr);
885} 923}
886 924
925static int stmmac_init_dma_engine(struct stmmac_priv *priv)
926{
927 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0;
928 int mixed_burst = 0;
929
930 /* Some DMA parameters can be passed from the platform;
931 * in case of these are not passed we keep a default
932 * (good for all the chips) and init the DMA! */
933 if (priv->plat->dma_cfg) {
934 pbl = priv->plat->dma_cfg->pbl;
935 fixed_burst = priv->plat->dma_cfg->fixed_burst;
936 mixed_burst = priv->plat->dma_cfg->mixed_burst;
937 burst_len = priv->plat->dma_cfg->burst_len;
938 }
939
940 return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
941 burst_len, priv->dma_tx_phy,
942 priv->dma_rx_phy);
943}
944
887/** 945/**
888 * stmmac_open - open entry point of the driver 946 * stmmac_open - open entry point of the driver
889 * @dev : pointer to the device structure. 947 * @dev : pointer to the device structure.
@@ -898,16 +956,6 @@ static int stmmac_open(struct net_device *dev)
898 struct stmmac_priv *priv = netdev_priv(dev); 956 struct stmmac_priv *priv = netdev_priv(dev);
899 int ret; 957 int ret;
900 958
901 stmmac_check_ether_addr(priv);
902
903 /* MDIO bus Registration */
904 ret = stmmac_mdio_register(dev);
905 if (ret < 0) {
906 pr_debug("%s: MDIO bus (id: %d) registration failed",
907 __func__, priv->plat->bus_id);
908 return ret;
909 }
910
911#ifdef CONFIG_STMMAC_TIMER 959#ifdef CONFIG_STMMAC_TIMER
912 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); 960 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
913 if (unlikely(priv->tm == NULL)) 961 if (unlikely(priv->tm == NULL))
@@ -925,6 +973,10 @@ static int stmmac_open(struct net_device *dev)
925 } else 973 } else
926 priv->tm->enable = 1; 974 priv->tm->enable = 1;
927#endif 975#endif
976 stmmac_clk_enable(priv);
977
978 stmmac_check_ether_addr(priv);
979
928 ret = stmmac_init_phy(dev); 980 ret = stmmac_init_phy(dev);
929 if (unlikely(ret)) { 981 if (unlikely(ret)) {
930 pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); 982 pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
@@ -938,8 +990,7 @@ static int stmmac_open(struct net_device *dev)
938 init_dma_desc_rings(dev); 990 init_dma_desc_rings(dev);
939 991
940 /* DMA initialization and SW reset */ 992 /* DMA initialization and SW reset */
941 ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl, 993 ret = stmmac_init_dma_engine(priv);
942 priv->dma_tx_phy, priv->dma_rx_phy);
943 if (ret < 0) { 994 if (ret < 0) {
944 pr_err("%s: DMA initialization failed\n", __func__); 995 pr_err("%s: DMA initialization failed\n", __func__);
945 goto open_error; 996 goto open_error;
@@ -1026,6 +1077,8 @@ open_error:
1026 if (priv->phydev) 1077 if (priv->phydev)
1027 phy_disconnect(priv->phydev); 1078 phy_disconnect(priv->phydev);
1028 1079
1080 stmmac_clk_disable(priv);
1081
1029 return ret; 1082 return ret;
1030} 1083}
1031 1084
@@ -1077,7 +1130,7 @@ static int stmmac_release(struct net_device *dev)
1077#ifdef CONFIG_STMMAC_DEBUG_FS 1130#ifdef CONFIG_STMMAC_DEBUG_FS
1078 stmmac_exit_fs(); 1131 stmmac_exit_fs();
1079#endif 1132#endif
1080 stmmac_mdio_unregister(dev); 1133 stmmac_clk_disable(priv);
1081 1134
1082 return 0; 1135 return 0;
1083} 1136}
@@ -1276,7 +1329,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1276 struct sk_buff *skb; 1329 struct sk_buff *skb;
1277 int frame_len; 1330 int frame_len;
1278 1331
1279 frame_len = priv->hw->desc->get_rx_frame_len(p); 1332 frame_len = priv->hw->desc->get_rx_frame_len(p,
1333 priv->plat->rx_coe);
1280 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 1334 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
1281 * Type frames (LLC/LLC-SNAP) */ 1335 * Type frames (LLC/LLC-SNAP) */
1282 if (unlikely(status != llc_snap)) 1336 if (unlikely(status != llc_snap))
@@ -1312,7 +1366,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1312#endif 1366#endif
1313 skb->protocol = eth_type_trans(skb, priv->dev); 1367 skb->protocol = eth_type_trans(skb, priv->dev);
1314 1368
1315 if (unlikely(!priv->rx_coe)) { 1369 if (unlikely(!priv->plat->rx_coe)) {
1316 /* No RX COE for old mac10/100 devices */ 1370 /* No RX COE for old mac10/100 devices */
1317 skb_checksum_none_assert(skb); 1371 skb_checksum_none_assert(skb);
1318 netif_receive_skb(skb); 1372 netif_receive_skb(skb);
@@ -1413,7 +1467,7 @@ static void stmmac_set_rx_mode(struct net_device *dev)
1413 struct stmmac_priv *priv = netdev_priv(dev); 1467 struct stmmac_priv *priv = netdev_priv(dev);
1414 1468
1415 spin_lock(&priv->lock); 1469 spin_lock(&priv->lock);
1416 priv->hw->mac->set_filter(dev); 1470 priv->hw->mac->set_filter(dev, priv->synopsys_id);
1417 spin_unlock(&priv->lock); 1471 spin_unlock(&priv->lock);
1418} 1472}
1419 1473
@@ -1459,8 +1513,10 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
1459{ 1513{
1460 struct stmmac_priv *priv = netdev_priv(dev); 1514 struct stmmac_priv *priv = netdev_priv(dev);
1461 1515
1462 if (!priv->rx_coe) 1516 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
1463 features &= ~NETIF_F_RXCSUM; 1517 features &= ~NETIF_F_RXCSUM;
1518 else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1)
1519 features &= ~NETIF_F_IPV6_CSUM;
1464 if (!priv->plat->tx_coe) 1520 if (!priv->plat->tx_coe)
1465 features &= ~NETIF_F_ALL_CSUM; 1521 features &= ~NETIF_F_ALL_CSUM;
1466 1522
@@ -1584,7 +1640,7 @@ static const struct file_operations stmmac_rings_status_fops = {
1584 .open = stmmac_sysfs_ring_open, 1640 .open = stmmac_sysfs_ring_open,
1585 .read = seq_read, 1641 .read = seq_read,
1586 .llseek = seq_lseek, 1642 .llseek = seq_lseek,
1587 .release = seq_release, 1643 .release = single_release,
1588}; 1644};
1589 1645
1590static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v) 1646static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
@@ -1656,7 +1712,7 @@ static const struct file_operations stmmac_dma_cap_fops = {
1656 .open = stmmac_sysfs_dma_cap_open, 1712 .open = stmmac_sysfs_dma_cap_open,
1657 .read = seq_read, 1713 .read = seq_read,
1658 .llseek = seq_lseek, 1714 .llseek = seq_lseek,
1659 .release = seq_release, 1715 .release = single_release,
1660}; 1716};
1661 1717
1662static int stmmac_init_fs(struct net_device *dev) 1718static int stmmac_init_fs(struct net_device *dev)
@@ -1752,7 +1808,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1752 priv->hw->ring = &ring_mode_ops; 1808 priv->hw->ring = &ring_mode_ops;
1753 1809
1754 /* Get and dump the chip ID */ 1810 /* Get and dump the chip ID */
1755 stmmac_get_synopsys_id(priv); 1811 priv->synopsys_id = stmmac_get_synopsys_id(priv);
1756 1812
1757 /* Get the HW capability (new GMAC newer than 3.50a) */ 1813 /* Get the HW capability (new GMAC newer than 3.50a) */
1758 priv->hw_cap_support = stmmac_get_hw_features(priv); 1814 priv->hw_cap_support = stmmac_get_hw_features(priv);
@@ -1765,17 +1821,32 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1765 * register (if supported). 1821 * register (if supported).
1766 */ 1822 */
1767 priv->plat->enh_desc = priv->dma_cap.enh_desc; 1823 priv->plat->enh_desc = priv->dma_cap.enh_desc;
1768 priv->plat->tx_coe = priv->dma_cap.tx_coe;
1769 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; 1824 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
1825
1826 priv->plat->tx_coe = priv->dma_cap.tx_coe;
1827
1828 if (priv->dma_cap.rx_coe_type2)
1829 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
1830 else if (priv->dma_cap.rx_coe_type1)
1831 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
1832
1770 } else 1833 } else
1771 pr_info(" No HW DMA feature register supported"); 1834 pr_info(" No HW DMA feature register supported");
1772 1835
1773 /* Select the enhnaced/normal descriptor structures */ 1836 /* Select the enhnaced/normal descriptor structures */
1774 stmmac_selec_desc_mode(priv); 1837 stmmac_selec_desc_mode(priv);
1775 1838
1776 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr); 1839 /* Enable the IPC (Checksum Offload) and check if the feature has been
1777 if (priv->rx_coe) 1840 * enabled during the core configuration. */
1778 pr_info(" RX Checksum Offload Engine supported\n"); 1841 ret = priv->hw->mac->rx_ipc(priv->ioaddr);
1842 if (!ret) {
1843 pr_warning(" RX IPC Checksum Offload not configured.\n");
1844 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
1845 }
1846
1847 if (priv->plat->rx_coe)
1848 pr_info(" RX Checksum Offload Engine supported (type %d)\n",
1849 priv->plat->rx_coe);
1779 if (priv->plat->tx_coe) 1850 if (priv->plat->tx_coe)
1780 pr_info(" TX Checksum insertion supported\n"); 1851 pr_info(" TX Checksum insertion supported\n");
1781 1852
@@ -1856,6 +1927,28 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
1856 goto error; 1927 goto error;
1857 } 1928 }
1858 1929
1930 if (stmmac_clk_get(priv))
1931 pr_warning("%s: warning: cannot get CSR clock\n", __func__);
1932
1933 /* If a specific clk_csr value is passed from the platform
1934 * this means that the CSR Clock Range selection cannot be
1935 * changed at run-time and it is fixed. Viceversa the driver'll try to
1936 * set the MDC clock dynamically according to the csr actual
1937 * clock input.
1938 */
1939 if (!priv->plat->clk_csr)
1940 stmmac_clk_csr_set(priv);
1941 else
1942 priv->clk_csr = priv->plat->clk_csr;
1943
1944 /* MDIO bus Registration */
1945 ret = stmmac_mdio_register(ndev);
1946 if (ret < 0) {
1947 pr_debug("%s: MDIO bus (id: %d) registration failed",
1948 __func__, priv->plat->bus_id);
1949 goto error;
1950 }
1951
1859 return priv; 1952 return priv;
1860 1953
1861error: 1954error:
@@ -1883,6 +1976,7 @@ int stmmac_dvr_remove(struct net_device *ndev)
1883 priv->hw->dma->stop_tx(priv->ioaddr); 1976 priv->hw->dma->stop_tx(priv->ioaddr);
1884 1977
1885 stmmac_set_mac(priv->ioaddr, false); 1978 stmmac_set_mac(priv->ioaddr, false);
1979 stmmac_mdio_unregister(ndev);
1886 netif_carrier_off(ndev); 1980 netif_carrier_off(ndev);
1887 unregister_netdev(ndev); 1981 unregister_netdev(ndev);
1888 free_netdev(ndev); 1982 free_netdev(ndev);
@@ -1895,6 +1989,7 @@ int stmmac_suspend(struct net_device *ndev)
1895{ 1989{
1896 struct stmmac_priv *priv = netdev_priv(ndev); 1990 struct stmmac_priv *priv = netdev_priv(ndev);
1897 int dis_ic = 0; 1991 int dis_ic = 0;
1992 unsigned long flags;
1898 1993
1899 if (!ndev || !netif_running(ndev)) 1994 if (!ndev || !netif_running(ndev))
1900 return 0; 1995 return 0;
@@ -1902,7 +1997,7 @@ int stmmac_suspend(struct net_device *ndev)
1902 if (priv->phydev) 1997 if (priv->phydev)
1903 phy_stop(priv->phydev); 1998 phy_stop(priv->phydev);
1904 1999
1905 spin_lock(&priv->lock); 2000 spin_lock_irqsave(&priv->lock, flags);
1906 2001
1907 netif_device_detach(ndev); 2002 netif_device_detach(ndev);
1908 netif_stop_queue(ndev); 2003 netif_stop_queue(ndev);
@@ -1925,21 +2020,24 @@ int stmmac_suspend(struct net_device *ndev)
1925 /* Enable Power down mode by programming the PMT regs */ 2020 /* Enable Power down mode by programming the PMT regs */
1926 if (device_may_wakeup(priv->device)) 2021 if (device_may_wakeup(priv->device))
1927 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts); 2022 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
1928 else 2023 else {
1929 stmmac_set_mac(priv->ioaddr, false); 2024 stmmac_set_mac(priv->ioaddr, false);
1930 2025 /* Disable clock in case of PWM is off */
1931 spin_unlock(&priv->lock); 2026 stmmac_clk_disable(priv);
2027 }
2028 spin_unlock_irqrestore(&priv->lock, flags);
1932 return 0; 2029 return 0;
1933} 2030}
1934 2031
1935int stmmac_resume(struct net_device *ndev) 2032int stmmac_resume(struct net_device *ndev)
1936{ 2033{
1937 struct stmmac_priv *priv = netdev_priv(ndev); 2034 struct stmmac_priv *priv = netdev_priv(ndev);
2035 unsigned long flags;
1938 2036
1939 if (!netif_running(ndev)) 2037 if (!netif_running(ndev))
1940 return 0; 2038 return 0;
1941 2039
1942 spin_lock(&priv->lock); 2040 spin_lock_irqsave(&priv->lock, flags);
1943 2041
1944 /* Power Down bit, into the PM register, is cleared 2042 /* Power Down bit, into the PM register, is cleared
1945 * automatically as soon as a magic packet or a Wake-up frame 2043 * automatically as soon as a magic packet or a Wake-up frame
@@ -1948,6 +2046,9 @@ int stmmac_resume(struct net_device *ndev)
1948 * from another devices (e.g. serial console). */ 2046 * from another devices (e.g. serial console). */
1949 if (device_may_wakeup(priv->device)) 2047 if (device_may_wakeup(priv->device))
1950 priv->hw->mac->pmt(priv->ioaddr, 0); 2048 priv->hw->mac->pmt(priv->ioaddr, 0);
2049 else
2050 /* enable the clk prevously disabled */
2051 stmmac_clk_enable(priv);
1951 2052
1952 netif_device_attach(ndev); 2053 netif_device_attach(ndev);
1953 2054
@@ -1964,7 +2065,7 @@ int stmmac_resume(struct net_device *ndev)
1964 2065
1965 netif_start_queue(ndev); 2066 netif_start_queue(ndev);
1966 2067
1967 spin_unlock(&priv->lock); 2068 spin_unlock_irqrestore(&priv->lock, flags);
1968 2069
1969 if (priv->phydev) 2070 if (priv->phydev)
1970 phy_start(priv->phydev); 2071 phy_start(priv->phydev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 73195329aa46..ade108232048 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -34,6 +34,22 @@
34#define MII_BUSY 0x00000001 34#define MII_BUSY 0x00000001
35#define MII_WRITE 0x00000002 35#define MII_WRITE 0x00000002
36 36
37static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr)
38{
39 unsigned long curr;
40 unsigned long finish = jiffies + 3 * HZ;
41
42 do {
43 curr = jiffies;
44 if (readl(ioaddr + mii_addr) & MII_BUSY)
45 cpu_relax();
46 else
47 return 0;
48 } while (!time_after_eq(curr, finish));
49
50 return -EBUSY;
51}
52
37/** 53/**
38 * stmmac_mdio_read 54 * stmmac_mdio_read
39 * @bus: points to the mii_bus structure 55 * @bus: points to the mii_bus structure
@@ -54,11 +70,15 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
54 int data; 70 int data;
55 u16 regValue = (((phyaddr << 11) & (0x0000F800)) | 71 u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
56 ((phyreg << 6) & (0x000007C0))); 72 ((phyreg << 6) & (0x000007C0)));
57 regValue |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2); 73 regValue |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
74
75 if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
76 return -EBUSY;
58 77
59 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
60 writel(regValue, priv->ioaddr + mii_address); 78 writel(regValue, priv->ioaddr + mii_address);
61 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1); 79
80 if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
81 return -EBUSY;
62 82
63 /* Read the data from the MII data register */ 83 /* Read the data from the MII data register */
64 data = (int)readl(priv->ioaddr + mii_data); 84 data = (int)readl(priv->ioaddr + mii_data);
@@ -86,20 +106,18 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
86 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))) 106 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
87 | MII_WRITE; 107 | MII_WRITE;
88 108
89 value |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2); 109 value |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
90
91 110
92 /* Wait until any existing MII operation is complete */ 111 /* Wait until any existing MII operation is complete */
93 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1); 112 if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
113 return -EBUSY;
94 114
95 /* Set the MII address register to write */ 115 /* Set the MII address register to write */
96 writel(phydata, priv->ioaddr + mii_data); 116 writel(phydata, priv->ioaddr + mii_data);
97 writel(value, priv->ioaddr + mii_address); 117 writel(value, priv->ioaddr + mii_address);
98 118
99 /* Wait until any existing MII operation is complete */ 119 /* Wait until any existing MII operation is complete */
100 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1); 120 return stmmac_mdio_busy_wait(priv->ioaddr, mii_address);
101
102 return 0;
103} 121}
104 122
105/** 123/**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index da66ed7c3c5d..58fab5303e9c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -28,6 +28,7 @@
28 28
29struct plat_stmmacenet_data plat_dat; 29struct plat_stmmacenet_data plat_dat;
30struct stmmac_mdio_bus_data mdio_data; 30struct stmmac_mdio_bus_data mdio_data;
31struct stmmac_dma_cfg dma_cfg;
31 32
32static void stmmac_default_data(void) 33static void stmmac_default_data(void)
33{ 34{
@@ -35,7 +36,6 @@ static void stmmac_default_data(void)
35 plat_dat.bus_id = 1; 36 plat_dat.bus_id = 1;
36 plat_dat.phy_addr = 0; 37 plat_dat.phy_addr = 0;
37 plat_dat.interface = PHY_INTERFACE_MODE_GMII; 38 plat_dat.interface = PHY_INTERFACE_MODE_GMII;
38 plat_dat.pbl = 32;
39 plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ 39 plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
40 plat_dat.has_gmac = 1; 40 plat_dat.has_gmac = 1;
41 plat_dat.force_sf_dma_mode = 1; 41 plat_dat.force_sf_dma_mode = 1;
@@ -44,6 +44,10 @@ static void stmmac_default_data(void)
44 mdio_data.phy_reset = NULL; 44 mdio_data.phy_reset = NULL;
45 mdio_data.phy_mask = 0; 45 mdio_data.phy_mask = 0;
46 plat_dat.mdio_bus_data = &mdio_data; 46 plat_dat.mdio_bus_data = &mdio_data;
47
48 dma_cfg.pbl = 32;
49 dma_cfg.burst_len = DMA_AXI_BLEN_256;
50 plat_dat.dma_cfg = &dma_cfg;
47} 51}
48 52
49/** 53/**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 116529a366b2..3dd8f0803808 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -50,7 +50,6 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
50 * once needed on other platforms. 50 * once needed on other platforms.
51 */ 51 */
52 if (of_device_is_compatible(np, "st,spear600-gmac")) { 52 if (of_device_is_compatible(np, "st,spear600-gmac")) {
53 plat->pbl = 8;
54 plat->has_gmac = 1; 53 plat->has_gmac = 1;
55 plat->pmt = 1; 54 plat->pmt = 1;
56 } 55 }
@@ -189,9 +188,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
189 if (priv->plat->exit) 188 if (priv->plat->exit)
190 priv->plat->exit(pdev); 189 priv->plat->exit(pdev);
191 190
192 if (priv->plat->exit)
193 priv->plat->exit(pdev);
194
195 platform_set_drvdata(pdev, NULL); 191 platform_set_drvdata(pdev, NULL);
196 192
197 iounmap((void *)priv->ioaddr); 193 iounmap((void *)priv->ioaddr);
@@ -218,14 +214,26 @@ static int stmmac_pltfr_resume(struct device *dev)
218 214
219int stmmac_pltfr_freeze(struct device *dev) 215int stmmac_pltfr_freeze(struct device *dev)
220{ 216{
217 int ret;
218 struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
221 struct net_device *ndev = dev_get_drvdata(dev); 219 struct net_device *ndev = dev_get_drvdata(dev);
220 struct platform_device *pdev = to_platform_device(dev);
222 221
223 return stmmac_freeze(ndev); 222 ret = stmmac_freeze(ndev);
223 if (plat_dat->exit)
224 plat_dat->exit(pdev);
225
226 return ret;
224} 227}
225 228
226int stmmac_pltfr_restore(struct device *dev) 229int stmmac_pltfr_restore(struct device *dev)
227{ 230{
231 struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
228 struct net_device *ndev = dev_get_drvdata(dev); 232 struct net_device *ndev = dev_get_drvdata(dev);
233 struct platform_device *pdev = to_platform_device(dev);
234
235 if (plat_dat->init)
236 plat_dat->init(pdev);
229 237
230 return stmmac_restore(ndev); 238 return stmmac_restore(ndev);
231} 239}
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index c99b3b0e2eae..703c8cce2a2c 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9838,7 +9838,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9838 goto err_out_release_parent; 9838 goto err_out_release_parent;
9839 } 9839 }
9840 } 9840 }
9841 if (err || dma_mask == DMA_BIT_MASK(32)) { 9841 if (err) {
9842 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 9842 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9843 if (err) { 9843 if (err) {
9844 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); 9844 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 558409ff4058..3cf4ab755838 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -401,7 +401,7 @@ static int gem_rxmac_reset(struct gem *gp)
401 return 1; 401 return 1;
402 } 402 }
403 403
404 udelay(5000); 404 mdelay(5);
405 405
406 /* Execute RX reset command. */ 406 /* Execute RX reset command. */
407 writel(gp->swrst_base | GREG_SWRST_RXRST, 407 writel(gp->swrst_base | GREG_SWRST_RXRST,
@@ -2339,7 +2339,7 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
2339 netif_device_detach(dev); 2339 netif_device_detach(dev);
2340 2340
2341 /* Switch off chip, remember WOL setting */ 2341 /* Switch off chip, remember WOL setting */
2342 gp->asleep_wol = gp->wake_on_lan; 2342 gp->asleep_wol = !!gp->wake_on_lan;
2343 gem_do_stop(dev, gp->asleep_wol); 2343 gem_do_stop(dev, gp->asleep_wol);
2344 2344
2345 /* Unlock the network stack */ 2345 /* Unlock the network stack */
@@ -2898,7 +2898,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
2898 } 2898 }
2899 2899
2900 gp->pdev = pdev; 2900 gp->pdev = pdev;
2901 dev->base_addr = (long) pdev;
2902 gp->dev = dev; 2901 gp->dev = dev;
2903 2902
2904 gp->msg_enable = DEFAULT_MSG; 2903 gp->msg_enable = DEFAULT_MSG;
@@ -2972,7 +2971,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
2972 netif_napi_add(dev, &gp->napi, gem_poll, 64); 2971 netif_napi_add(dev, &gp->napi, gem_poll, 64);
2973 dev->ethtool_ops = &gem_ethtool_ops; 2972 dev->ethtool_ops = &gem_ethtool_ops;
2974 dev->watchdog_timeo = 5 * HZ; 2973 dev->watchdog_timeo = 5 * HZ;
2975 dev->irq = pdev->irq;
2976 dev->dma = 0; 2974 dev->dma = 0;
2977 2975
2978 /* Set that now, in case PM kicks in now */ 2976 /* Set that now, in case PM kicks in now */
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index b95e7e681b38..dfc00c4683e5 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2182,11 +2182,12 @@ static int happy_meal_open(struct net_device *dev)
2182 * into a single source which we register handling at probe time. 2182 * into a single source which we register handling at probe time.
2183 */ 2183 */
2184 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) { 2184 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
2185 if (request_irq(dev->irq, happy_meal_interrupt, 2185 res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED,
2186 IRQF_SHARED, dev->name, (void *)dev)) { 2186 dev->name, dev);
2187 if (res) {
2187 HMD(("EAGAIN\n")); 2188 HMD(("EAGAIN\n"));
2188 printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n", 2189 printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
2189 dev->irq); 2190 hp->irq);
2190 2191
2191 return -EAGAIN; 2192 return -EAGAIN;
2192 } 2193 }
@@ -2199,7 +2200,7 @@ static int happy_meal_open(struct net_device *dev)
2199 spin_unlock_irq(&hp->happy_lock); 2200 spin_unlock_irq(&hp->happy_lock);
2200 2201
2201 if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)) 2202 if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO))
2202 free_irq(dev->irq, dev); 2203 free_irq(hp->irq, dev);
2203 return res; 2204 return res;
2204} 2205}
2205 2206
@@ -2221,7 +2222,7 @@ static int happy_meal_close(struct net_device *dev)
2221 * time and never unregister. 2222 * time and never unregister.
2222 */ 2223 */
2223 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) 2224 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)
2224 free_irq(dev->irq, dev); 2225 free_irq(hp->irq, dev);
2225 2226
2226 return 0; 2227 return 0;
2227} 2228}
@@ -2777,7 +2778,7 @@ static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int i
2777 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; 2778 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
2778 dev->features |= dev->hw_features | NETIF_F_RXCSUM; 2779 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
2779 2780
2780 dev->irq = op->archdata.irqs[0]; 2781 hp->irq = op->archdata.irqs[0];
2781 2782
2782#if defined(CONFIG_SBUS) && defined(CONFIG_PCI) 2783#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
2783 /* Hook up SBUS register/descriptor accessors. */ 2784 /* Hook up SBUS register/descriptor accessors. */
@@ -2981,8 +2982,6 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
2981 if (hme_version_printed++ == 0) 2982 if (hme_version_printed++ == 0)
2982 printk(KERN_INFO "%s", version); 2983 printk(KERN_INFO "%s", version);
2983 2984
2984 dev->base_addr = (long) pdev;
2985
2986 hp = netdev_priv(dev); 2985 hp = netdev_priv(dev);
2987 2986
2988 hp->happy_dev = pdev; 2987 hp->happy_dev = pdev;
@@ -3087,12 +3086,11 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
3087 3086
3088 init_timer(&hp->happy_timer); 3087 init_timer(&hp->happy_timer);
3089 3088
3089 hp->irq = pdev->irq;
3090 hp->dev = dev; 3090 hp->dev = dev;
3091 dev->netdev_ops = &hme_netdev_ops; 3091 dev->netdev_ops = &hme_netdev_ops;
3092 dev->watchdog_timeo = 5*HZ; 3092 dev->watchdog_timeo = 5*HZ;
3093 dev->ethtool_ops = &hme_ethtool_ops; 3093 dev->ethtool_ops = &hme_ethtool_ops;
3094 dev->irq = pdev->irq;
3095 dev->dma = 0;
3096 3094
3097 /* Happy Meal can do it all... */ 3095 /* Happy Meal can do it all... */
3098 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; 3096 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h
index 64f278360d89..f4307654e4ae 100644
--- a/drivers/net/ethernet/sun/sunhme.h
+++ b/drivers/net/ethernet/sun/sunhme.h
@@ -432,6 +432,7 @@ struct happy_meal {
432 432
433 dma_addr_t hblock_dvma; /* DVMA visible address happy block */ 433 dma_addr_t hblock_dvma; /* DVMA visible address happy block */
434 unsigned int happy_flags; /* Driver state flags */ 434 unsigned int happy_flags; /* Driver state flags */
435 int irq;
435 enum happy_transceiver tcvr_type; /* Kind of transceiver in use */ 436 enum happy_transceiver tcvr_type; /* Kind of transceiver in use */
436 unsigned int happy_bursts; /* Get your mind out of the gutter */ 437 unsigned int happy_bursts; /* Get your mind out of the gutter */
437 unsigned int paddr; /* PHY address for transceiver */ 438 unsigned int paddr; /* PHY address for transceiver */
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 38e3ae9155b7..a108db35924e 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -618,7 +618,7 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
618 struct vnet_port *port; 618 struct vnet_port *port;
619 619
620 hlist_for_each_entry(port, n, hp, hash) { 620 hlist_for_each_entry(port, n, hp, hash) {
621 if (!compare_ether_addr(port->raddr, skb->data)) 621 if (ether_addr_equal(port->raddr, skb->data))
622 return port; 622 return port;
623 } 623 }
624 port = NULL; 624 port = NULL;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index ad973ffc9ff3..8846516678c3 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1317,7 +1317,7 @@ static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
1317 1317
1318static void print_rxfd(struct rxf_desc *rxfd) 1318static void print_rxfd(struct rxf_desc *rxfd)
1319{ 1319{
1320 DBG("=== RxF desc CHIP ORDER/ENDIANESS =============\n" 1320 DBG("=== RxF desc CHIP ORDER/ENDIANNESS =============\n"
1321 "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n", 1321 "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n",
1322 rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len); 1322 rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len);
1323} 1323}
@@ -1988,10 +1988,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1988 /* these fields are used for info purposes only 1988 /* these fields are used for info purposes only
1989 * so we can have them same for all ports of the board */ 1989 * so we can have them same for all ports of the board */
1990 ndev->if_port = port; 1990 ndev->if_port = port;
1991 ndev->base_addr = pciaddr;
1992 ndev->mem_start = pciaddr;
1993 ndev->mem_end = pciaddr + regionSize;
1994 ndev->irq = pdev->irq;
1995 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO 1991 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
1996 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 1992 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
1997 NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM 1993 NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 34558766cbf0..d614c374ed9d 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -92,7 +92,7 @@ enum cpdma_state {
92 CPDMA_STATE_TEARDOWN, 92 CPDMA_STATE_TEARDOWN,
93}; 93};
94 94
95const char *cpdma_state_str[] = { "idle", "active", "teardown" }; 95static const char *cpdma_state_str[] = { "idle", "active", "teardown" };
96 96
97struct cpdma_ctlr { 97struct cpdma_ctlr {
98 enum cpdma_state state; 98 enum cpdma_state state;
@@ -276,6 +276,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
276 ctlr->num_chan = CPDMA_MAX_CHANNELS; 276 ctlr->num_chan = CPDMA_MAX_CHANNELS;
277 return ctlr; 277 return ctlr;
278} 278}
279EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
279 280
280int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) 281int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
281{ 282{
@@ -321,6 +322,7 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
321 spin_unlock_irqrestore(&ctlr->lock, flags); 322 spin_unlock_irqrestore(&ctlr->lock, flags);
322 return 0; 323 return 0;
323} 324}
325EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
324 326
325int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) 327int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
326{ 328{
@@ -351,6 +353,7 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
351 spin_unlock_irqrestore(&ctlr->lock, flags); 353 spin_unlock_irqrestore(&ctlr->lock, flags);
352 return 0; 354 return 0;
353} 355}
356EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
354 357
355int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr) 358int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
356{ 359{
@@ -421,6 +424,7 @@ int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
421 spin_unlock_irqrestore(&ctlr->lock, flags); 424 spin_unlock_irqrestore(&ctlr->lock, flags);
422 return 0; 425 return 0;
423} 426}
427EXPORT_SYMBOL_GPL(cpdma_ctlr_dump);
424 428
425int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) 429int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
426{ 430{
@@ -444,6 +448,7 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
444 kfree(ctlr); 448 kfree(ctlr);
445 return ret; 449 return ret;
446} 450}
451EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
447 452
448int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) 453int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
449{ 454{
@@ -528,6 +533,7 @@ err_chan_busy:
528err_chan_alloc: 533err_chan_alloc:
529 return ERR_PTR(ret); 534 return ERR_PTR(ret);
530} 535}
536EXPORT_SYMBOL_GPL(cpdma_chan_create);
531 537
532int cpdma_chan_destroy(struct cpdma_chan *chan) 538int cpdma_chan_destroy(struct cpdma_chan *chan)
533{ 539{
@@ -545,6 +551,7 @@ int cpdma_chan_destroy(struct cpdma_chan *chan)
545 kfree(chan); 551 kfree(chan);
546 return 0; 552 return 0;
547} 553}
554EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
548 555
549int cpdma_chan_get_stats(struct cpdma_chan *chan, 556int cpdma_chan_get_stats(struct cpdma_chan *chan,
550 struct cpdma_chan_stats *stats) 557 struct cpdma_chan_stats *stats)
@@ -693,6 +700,7 @@ unlock_ret:
693 spin_unlock_irqrestore(&chan->lock, flags); 700 spin_unlock_irqrestore(&chan->lock, flags);
694 return ret; 701 return ret;
695} 702}
703EXPORT_SYMBOL_GPL(cpdma_chan_submit);
696 704
697static void __cpdma_chan_free(struct cpdma_chan *chan, 705static void __cpdma_chan_free(struct cpdma_chan *chan,
698 struct cpdma_desc __iomem *desc, 706 struct cpdma_desc __iomem *desc,
@@ -776,6 +784,7 @@ int cpdma_chan_process(struct cpdma_chan *chan, int quota)
776 } 784 }
777 return used; 785 return used;
778} 786}
787EXPORT_SYMBOL_GPL(cpdma_chan_process);
779 788
780int cpdma_chan_start(struct cpdma_chan *chan) 789int cpdma_chan_start(struct cpdma_chan *chan)
781{ 790{
@@ -803,6 +812,7 @@ int cpdma_chan_start(struct cpdma_chan *chan)
803 spin_unlock_irqrestore(&chan->lock, flags); 812 spin_unlock_irqrestore(&chan->lock, flags);
804 return 0; 813 return 0;
805} 814}
815EXPORT_SYMBOL_GPL(cpdma_chan_start);
806 816
807int cpdma_chan_stop(struct cpdma_chan *chan) 817int cpdma_chan_stop(struct cpdma_chan *chan)
808{ 818{
@@ -863,6 +873,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
863 spin_unlock_irqrestore(&chan->lock, flags); 873 spin_unlock_irqrestore(&chan->lock, flags);
864 return 0; 874 return 0;
865} 875}
876EXPORT_SYMBOL_GPL(cpdma_chan_stop);
866 877
867int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) 878int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
868{ 879{
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 174a3348f676..4da93a5d7ec6 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -627,6 +627,7 @@ static const struct ethtool_ops ethtool_ops = {
627 .get_link = ethtool_op_get_link, 627 .get_link = ethtool_op_get_link,
628 .get_coalesce = emac_get_coalesce, 628 .get_coalesce = emac_get_coalesce,
629 .set_coalesce = emac_set_coalesce, 629 .set_coalesce = emac_set_coalesce,
630 .get_ts_info = ethtool_op_get_ts_info,
630}; 631};
631 632
632/** 633/**
@@ -1511,7 +1512,7 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
1511 1512
1512static int match_first_device(struct device *dev, void *data) 1513static int match_first_device(struct device *dev, void *data)
1513{ 1514{
1514 return 1; 1515 return !strncmp(dev_name(dev), "davinci_mdio", 12);
1515} 1516}
1516 1517
1517/** 1518/**
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 2757c7d6e633..e4e47088e26b 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -181,6 +181,11 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
181 __davinci_mdio_reset(data); 181 __davinci_mdio_reset(data);
182 return -EAGAIN; 182 return -EAGAIN;
183 } 183 }
184
185 reg = __raw_readl(&regs->user[0].access);
186 if ((reg & USERACCESS_GO) == 0)
187 return 0;
188
184 dev_err(data->dev, "timed out waiting for user access\n"); 189 dev_err(data->dev, "timed out waiting for user access\n");
185 return -ETIMEDOUT; 190 return -ETIMEDOUT;
186} 191}
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 817ad3bc4957..3e6abf0f2771 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -228,7 +228,7 @@ tlan_get_skb(const struct tlan_list *tag)
228 unsigned long addr; 228 unsigned long addr;
229 229
230 addr = tag->buffer[9].address; 230 addr = tag->buffer[9].address;
231 addr |= (tag->buffer[8].address << 16) << 16; 231 addr |= ((unsigned long) tag->buffer[8].address << 16) << 16;
232 return (struct sk_buff *) addr; 232 return (struct sk_buff *) addr;
233} 233}
234 234
@@ -2545,7 +2545,7 @@ static void tlan_phy_reset(struct net_device *dev)
2545 2545
2546 phy = priv->phy[priv->phy_num]; 2546 phy = priv->phy[priv->phy_num];
2547 2547
2548 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name); 2548 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Resetting PHY.\n", dev->name);
2549 tlan_mii_sync(dev->base_addr); 2549 tlan_mii_sync(dev->base_addr);
2550 value = MII_GC_LOOPBK | MII_GC_RESET; 2550 value = MII_GC_LOOPBK | MII_GC_RESET;
2551 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value); 2551 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 3d501ec7fad7..96070e9b50dc 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -843,7 +843,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
843 if (!is_multicast_ether_addr(buf)) { 843 if (!is_multicast_ether_addr(buf)) {
844 /* Filter packets not for our address. */ 844 /* Filter packets not for our address. */
845 const u8 *mine = dev->dev_addr; 845 const u8 *mine = dev->dev_addr;
846 filter = compare_ether_addr(mine, buf); 846 filter = !ether_addr_equal(mine, buf);
847 } 847 }
848 } 848 }
849 849
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 5c14f82c4954..961c8321451f 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1590,8 +1590,8 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
1590 found = 0; 1590 found = 0;
1591 oldest = NULL; 1591 oldest = NULL;
1592 list_for_each_entry(target, &wl->network_list, list) { 1592 list_for_each_entry(target, &wl->network_list, list) {
1593 if (!compare_ether_addr(&target->hwinfo->bssid[2], 1593 if (ether_addr_equal(&target->hwinfo->bssid[2],
1594 &scan_info->bssid[2])) { 1594 &scan_info->bssid[2])) {
1595 found = 1; 1595 found = 1;
1596 pr_debug("%s: same BBS found scanned list\n", 1596 pr_debug("%s: same BBS found scanned list\n",
1597 __func__); 1597 __func__);
@@ -1691,8 +1691,8 @@ struct gelic_wl_scan_info *gelic_wl_find_best_bss(struct gelic_wl_info *wl)
1691 1691
1692 /* If bss specified, check it only */ 1692 /* If bss specified, check it only */
1693 if (test_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat)) { 1693 if (test_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat)) {
1694 if (!compare_ether_addr(&scan_info->hwinfo->bssid[2], 1694 if (ether_addr_equal(&scan_info->hwinfo->bssid[2],
1695 wl->bssid)) { 1695 wl->bssid)) {
1696 best_bss = scan_info; 1696 best_bss = scan_info;
1697 pr_debug("%s: bssid matched\n", __func__); 1697 pr_debug("%s: bssid matched\n", __func__);
1698 break; 1698 break;
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index fcfa01f7ceb6..0459c096629f 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -689,9 +689,12 @@ static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
689#ifdef CONFIG_NET_POLL_CONTROLLER 689#ifdef CONFIG_NET_POLL_CONTROLLER
690static void rhine_poll(struct net_device *dev) 690static void rhine_poll(struct net_device *dev)
691{ 691{
692 disable_irq(dev->irq); 692 struct rhine_private *rp = netdev_priv(dev);
693 rhine_interrupt(dev->irq, (void *)dev); 693 const int irq = rp->pdev->irq;
694 enable_irq(dev->irq); 694
695 disable_irq(irq);
696 rhine_interrupt(irq, dev);
697 enable_irq(irq);
695} 698}
696#endif 699#endif
697 700
@@ -972,7 +975,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
972 } 975 }
973#endif /* USE_MMIO */ 976#endif /* USE_MMIO */
974 977
975 dev->base_addr = (unsigned long)ioaddr;
976 rp->base = ioaddr; 978 rp->base = ioaddr;
977 979
978 /* Get chip registers into a sane state */ 980 /* Get chip registers into a sane state */
@@ -995,8 +997,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
995 if (!phy_id) 997 if (!phy_id)
996 phy_id = ioread8(ioaddr + 0x6C); 998 phy_id = ioread8(ioaddr + 0x6C);
997 999
998 dev->irq = pdev->irq;
999
1000 spin_lock_init(&rp->lock); 1000 spin_lock_init(&rp->lock);
1001 mutex_init(&rp->task_lock); 1001 mutex_init(&rp->task_lock);
1002 INIT_WORK(&rp->reset_task, rhine_reset_task); 1002 INIT_WORK(&rp->reset_task, rhine_reset_task);
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 8a5d7c100a5e..ea3e0a21ba74 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2488,8 +2488,8 @@ static int velocity_close(struct net_device *dev)
2488 2488
2489 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) 2489 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2490 velocity_get_ip(vptr); 2490 velocity_get_ip(vptr);
2491 if (dev->irq != 0) 2491
2492 free_irq(dev->irq, dev); 2492 free_irq(vptr->pdev->irq, dev);
2493 2493
2494 velocity_free_rings(vptr); 2494 velocity_free_rings(vptr);
2495 2495
@@ -2755,8 +2755,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2755 if (ret < 0) 2755 if (ret < 0)
2756 goto err_free_dev; 2756 goto err_free_dev;
2757 2757
2758 dev->irq = pdev->irq;
2759
2760 ret = velocity_get_pci_info(vptr, pdev); 2758 ret = velocity_get_pci_info(vptr, pdev);
2761 if (ret < 0) { 2759 if (ret < 0) {
2762 /* error message already printed */ 2760 /* error message already printed */
@@ -2779,8 +2777,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2779 2777
2780 mac_wol_reset(regs); 2778 mac_wol_reset(regs);
2781 2779
2782 dev->base_addr = vptr->ioaddr;
2783
2784 for (i = 0; i < 6; i++) 2780 for (i = 0; i < 6; i++)
2785 dev->dev_addr[i] = readb(&regs->PAR[i]); 2781 dev->dev_addr[i] = readb(&regs->PAR[i]);
2786 2782
@@ -2806,7 +2802,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2806 2802
2807 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); 2803 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2808 2804
2809 dev->irq = pdev->irq;
2810 dev->netdev_ops = &velocity_netdev_ops; 2805 dev->netdev_ops = &velocity_netdev_ops;
2811 dev->ethtool_ops = &velocity_ethtool_ops; 2806 dev->ethtool_ops = &velocity_ethtool_ops;
2812 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); 2807 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
diff --git a/drivers/net/ethernet/wiznet/Kconfig b/drivers/net/ethernet/wiznet/Kconfig
new file mode 100644
index 000000000000..cb18043f5830
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/Kconfig
@@ -0,0 +1,73 @@
1#
2# WIZnet devices configuration
3#
4
5config NET_VENDOR_WIZNET
6 bool "WIZnet devices"
7 default y
8 ---help---
9 If you have a network (Ethernet) card belonging to this class, say Y
10 and read the Ethernet-HOWTO, available from
11 <http://www.tldp.org/docs.html#howto>.
12
13 Note that the answer to this question doesn't directly affect the
14 kernel: saying N will just cause the configurator to skip all
15 the questions about WIZnet devices. If you say Y, you will be asked
16 for your specific card in the following questions.
17
18if NET_VENDOR_WIZNET
19
20config WIZNET_W5100
21 tristate "WIZnet W5100 Ethernet support"
22 depends on HAS_IOMEM
23 ---help---
24 Support for WIZnet W5100 chips.
25
26 W5100 is a single chip with integrated 10/100 Ethernet MAC,
27 PHY and hardware TCP/IP stack, but this driver is limited to
28 the MAC and PHY functions only, onchip TCP/IP is unused.
29
30 To compile this driver as a module, choose M here: the module
31 will be called w5100.
32
33config WIZNET_W5300
34 tristate "WIZnet W5300 Ethernet support"
35 depends on HAS_IOMEM
36 ---help---
37 Support for WIZnet W5300 chips.
38
39 W5300 is a single chip with integrated 10/100 Ethernet MAC,
40 PHY and hardware TCP/IP stack, but this driver is limited to
41 the MAC and PHY functions only, onchip TCP/IP is unused.
42
43 To compile this driver as a module, choose M here: the module
44 will be called w5300.
45
46choice
47 prompt "WIZnet interface mode"
48 depends on WIZNET_W5100 || WIZNET_W5300
49 default WIZNET_BUS_ANY
50
51config WIZNET_BUS_DIRECT
52 bool "Direct address bus mode"
53 ---help---
54 In direct address mode host system can directly access all registers
55 after mapping to Memory-Mapped I/O space.
56
57config WIZNET_BUS_INDIRECT
58 bool "Indirect address bus mode"
59 ---help---
60 In indirect address mode host system indirectly accesses registers
61 using Indirect Mode Address Register and Indirect Mode Data Register,
62 which are directly mapped to Memory-Mapped I/O space.
63
64config WIZNET_BUS_ANY
65 bool "Select interface mode in runtime"
66 ---help---
67 If interface mode is unknown in compile time, it can be selected
68 in runtime from board/platform resources configuration.
69
70 Performance may decrease compared to explicitly selected bus mode.
71endchoice
72
73endif # NET_VENDOR_WIZNET
diff --git a/drivers/net/ethernet/wiznet/Makefile b/drivers/net/ethernet/wiznet/Makefile
new file mode 100644
index 000000000000..c614535227e8
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_WIZNET_W5100) += w5100.o
2obj-$(CONFIG_WIZNET_W5300) += w5300.o
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
new file mode 100644
index 000000000000..a75e9ef5a4ce
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -0,0 +1,808 @@
1/*
2 * Ethernet driver for the WIZnet W5100 chip.
3 *
4 * Copyright (C) 2006-2008 WIZnet Co.,Ltd.
5 * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
6 *
7 * Licensed under the GPL-2 or later.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/kconfig.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/platform_device.h>
16#include <linux/platform_data/wiznet.h>
17#include <linux/ethtool.h>
18#include <linux/skbuff.h>
19#include <linux/types.h>
20#include <linux/errno.h>
21#include <linux/delay.h>
22#include <linux/slab.h>
23#include <linux/spinlock.h>
24#include <linux/io.h>
25#include <linux/ioport.h>
26#include <linux/interrupt.h>
27#include <linux/irq.h>
28#include <linux/gpio.h>
29
30#define DRV_NAME "w5100"
31#define DRV_VERSION "2012-04-04"
32
33MODULE_DESCRIPTION("WIZnet W5100 Ethernet driver v"DRV_VERSION);
34MODULE_AUTHOR("Mike Sinkovsky <msink@permonline.ru>");
35MODULE_ALIAS("platform:"DRV_NAME);
36MODULE_LICENSE("GPL");
37
38/*
39 * Registers
40 */
41#define W5100_COMMON_REGS 0x0000
42#define W5100_MR 0x0000 /* Mode Register */
43#define MR_RST 0x80 /* S/W reset */
44#define MR_PB 0x10 /* Ping block */
45#define MR_AI 0x02 /* Address Auto-Increment */
46#define MR_IND 0x01 /* Indirect mode */
47#define W5100_SHAR 0x0009 /* Source MAC address */
48#define W5100_IR 0x0015 /* Interrupt Register */
49#define W5100_IMR 0x0016 /* Interrupt Mask Register */
50#define IR_S0 0x01 /* S0 interrupt */
51#define W5100_RTR 0x0017 /* Retry Time-value Register */
52#define RTR_DEFAULT 2000 /* =0x07d0 (2000) */
53#define W5100_RMSR 0x001a /* Receive Memory Size */
54#define W5100_TMSR 0x001b /* Transmit Memory Size */
55#define W5100_COMMON_REGS_LEN 0x0040
56
57#define W5100_S0_REGS 0x0400
58#define W5100_S0_MR 0x0400 /* S0 Mode Register */
59#define S0_MR_MACRAW 0x04 /* MAC RAW mode (promiscous) */
60#define S0_MR_MACRAW_MF 0x44 /* MAC RAW mode (filtered) */
61#define W5100_S0_CR 0x0401 /* S0 Command Register */
62#define S0_CR_OPEN 0x01 /* OPEN command */
63#define S0_CR_CLOSE 0x10 /* CLOSE command */
64#define S0_CR_SEND 0x20 /* SEND command */
65#define S0_CR_RECV 0x40 /* RECV command */
66#define W5100_S0_IR 0x0402 /* S0 Interrupt Register */
67#define S0_IR_SENDOK 0x10 /* complete sending */
68#define S0_IR_RECV 0x04 /* receiving data */
69#define W5100_S0_SR 0x0403 /* S0 Status Register */
70#define S0_SR_MACRAW 0x42 /* mac raw mode */
71#define W5100_S0_TX_FSR 0x0420 /* S0 Transmit free memory size */
72#define W5100_S0_TX_RD 0x0422 /* S0 Transmit memory read pointer */
73#define W5100_S0_TX_WR 0x0424 /* S0 Transmit memory write pointer */
74#define W5100_S0_RX_RSR 0x0426 /* S0 Receive free memory size */
75#define W5100_S0_RX_RD 0x0428 /* S0 Receive memory read pointer */
76#define W5100_S0_REGS_LEN 0x0040
77
78#define W5100_TX_MEM_START 0x4000
79#define W5100_TX_MEM_END 0x5fff
80#define W5100_TX_MEM_MASK 0x1fff
81#define W5100_RX_MEM_START 0x6000
82#define W5100_RX_MEM_END 0x7fff
83#define W5100_RX_MEM_MASK 0x1fff
84
85/*
86 * Device driver private data structure
87 */
88struct w5100_priv {
89 void __iomem *base;
90 spinlock_t reg_lock;
91 bool indirect;
92 u8 (*read)(struct w5100_priv *priv, u16 addr);
93 void (*write)(struct w5100_priv *priv, u16 addr, u8 data);
94 u16 (*read16)(struct w5100_priv *priv, u16 addr);
95 void (*write16)(struct w5100_priv *priv, u16 addr, u16 data);
96 void (*readbuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
97 void (*writebuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
98 int irq;
99 int link_irq;
100 int link_gpio;
101
102 struct napi_struct napi;
103 struct net_device *ndev;
104 bool promisc;
105 u32 msg_enable;
106};
107
108/************************************************************************
109 *
110 * Lowlevel I/O functions
111 *
112 ***********************************************************************/
113
114/*
115 * In direct address mode host system can directly access W5100 registers
116 * after mapping to Memory-Mapped I/O space.
117 *
118 * 0x8000 bytes are required for memory space.
119 */
120static inline u8 w5100_read_direct(struct w5100_priv *priv, u16 addr)
121{
122 return ioread8(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
123}
124
125static inline void w5100_write_direct(struct w5100_priv *priv,
126 u16 addr, u8 data)
127{
128 iowrite8(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
129}
130
131static u16 w5100_read16_direct(struct w5100_priv *priv, u16 addr)
132{
133 u16 data;
134 data = w5100_read_direct(priv, addr) << 8;
135 data |= w5100_read_direct(priv, addr + 1);
136 return data;
137}
138
139static void w5100_write16_direct(struct w5100_priv *priv, u16 addr, u16 data)
140{
141 w5100_write_direct(priv, addr, data >> 8);
142 w5100_write_direct(priv, addr + 1, data);
143}
144
145static void w5100_readbuf_direct(struct w5100_priv *priv,
146 u16 offset, u8 *buf, int len)
147{
148 u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
149 int i;
150
151 for (i = 0; i < len; i++, addr++) {
152 if (unlikely(addr > W5100_RX_MEM_END))
153 addr = W5100_RX_MEM_START;
154 *buf++ = w5100_read_direct(priv, addr);
155 }
156}
157
158static void w5100_writebuf_direct(struct w5100_priv *priv,
159 u16 offset, u8 *buf, int len)
160{
161 u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
162 int i;
163
164 for (i = 0; i < len; i++, addr++) {
165 if (unlikely(addr > W5100_TX_MEM_END))
166 addr = W5100_TX_MEM_START;
167 w5100_write_direct(priv, addr, *buf++);
168 }
169}
170
171/*
172 * In indirect address mode host system indirectly accesses registers by
173 * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
174 * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space.
175 * Mode Register (MR) is directly accessible.
176 *
177 * Only 0x04 bytes are required for memory space.
178 */
179#define W5100_IDM_AR 0x01 /* Indirect Mode Address Register */
180#define W5100_IDM_DR 0x03 /* Indirect Mode Data Register */
181
182static u8 w5100_read_indirect(struct w5100_priv *priv, u16 addr)
183{
184 unsigned long flags;
185 u8 data;
186
187 spin_lock_irqsave(&priv->reg_lock, flags);
188 w5100_write16_direct(priv, W5100_IDM_AR, addr);
189 mmiowb();
190 data = w5100_read_direct(priv, W5100_IDM_DR);
191 spin_unlock_irqrestore(&priv->reg_lock, flags);
192
193 return data;
194}
195
196static void w5100_write_indirect(struct w5100_priv *priv, u16 addr, u8 data)
197{
198 unsigned long flags;
199
200 spin_lock_irqsave(&priv->reg_lock, flags);
201 w5100_write16_direct(priv, W5100_IDM_AR, addr);
202 mmiowb();
203 w5100_write_direct(priv, W5100_IDM_DR, data);
204 mmiowb();
205 spin_unlock_irqrestore(&priv->reg_lock, flags);
206}
207
208static u16 w5100_read16_indirect(struct w5100_priv *priv, u16 addr)
209{
210 unsigned long flags;
211 u16 data;
212
213 spin_lock_irqsave(&priv->reg_lock, flags);
214 w5100_write16_direct(priv, W5100_IDM_AR, addr);
215 mmiowb();
216 data = w5100_read_direct(priv, W5100_IDM_DR) << 8;
217 data |= w5100_read_direct(priv, W5100_IDM_DR);
218 spin_unlock_irqrestore(&priv->reg_lock, flags);
219
220 return data;
221}
222
223static void w5100_write16_indirect(struct w5100_priv *priv, u16 addr, u16 data)
224{
225 unsigned long flags;
226
227 spin_lock_irqsave(&priv->reg_lock, flags);
228 w5100_write16_direct(priv, W5100_IDM_AR, addr);
229 mmiowb();
230 w5100_write_direct(priv, W5100_IDM_DR, data >> 8);
231 w5100_write_direct(priv, W5100_IDM_DR, data);
232 mmiowb();
233 spin_unlock_irqrestore(&priv->reg_lock, flags);
234}
235
236static void w5100_readbuf_indirect(struct w5100_priv *priv,
237 u16 offset, u8 *buf, int len)
238{
239 u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
240 unsigned long flags;
241 int i;
242
243 spin_lock_irqsave(&priv->reg_lock, flags);
244 w5100_write16_direct(priv, W5100_IDM_AR, addr);
245 mmiowb();
246
247 for (i = 0; i < len; i++, addr++) {
248 if (unlikely(addr > W5100_RX_MEM_END)) {
249 addr = W5100_RX_MEM_START;
250 w5100_write16_direct(priv, W5100_IDM_AR, addr);
251 mmiowb();
252 }
253 *buf++ = w5100_read_direct(priv, W5100_IDM_DR);
254 }
255 mmiowb();
256 spin_unlock_irqrestore(&priv->reg_lock, flags);
257}
258
259static void w5100_writebuf_indirect(struct w5100_priv *priv,
260 u16 offset, u8 *buf, int len)
261{
262 u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
263 unsigned long flags;
264 int i;
265
266 spin_lock_irqsave(&priv->reg_lock, flags);
267 w5100_write16_direct(priv, W5100_IDM_AR, addr);
268 mmiowb();
269
270 for (i = 0; i < len; i++, addr++) {
271 if (unlikely(addr > W5100_TX_MEM_END)) {
272 addr = W5100_TX_MEM_START;
273 w5100_write16_direct(priv, W5100_IDM_AR, addr);
274 mmiowb();
275 }
276 w5100_write_direct(priv, W5100_IDM_DR, *buf++);
277 }
278 mmiowb();
279 spin_unlock_irqrestore(&priv->reg_lock, flags);
280}
281
282#if defined(CONFIG_WIZNET_BUS_DIRECT)
283#define w5100_read w5100_read_direct
284#define w5100_write w5100_write_direct
285#define w5100_read16 w5100_read16_direct
286#define w5100_write16 w5100_write16_direct
287#define w5100_readbuf w5100_readbuf_direct
288#define w5100_writebuf w5100_writebuf_direct
289
290#elif defined(CONFIG_WIZNET_BUS_INDIRECT)
291#define w5100_read w5100_read_indirect
292#define w5100_write w5100_write_indirect
293#define w5100_read16 w5100_read16_indirect
294#define w5100_write16 w5100_write16_indirect
295#define w5100_readbuf w5100_readbuf_indirect
296#define w5100_writebuf w5100_writebuf_indirect
297
298#else /* CONFIG_WIZNET_BUS_ANY */
299#define w5100_read priv->read
300#define w5100_write priv->write
301#define w5100_read16 priv->read16
302#define w5100_write16 priv->write16
303#define w5100_readbuf priv->readbuf
304#define w5100_writebuf priv->writebuf
305#endif
306
307static int w5100_command(struct w5100_priv *priv, u16 cmd)
308{
309 unsigned long timeout = jiffies + msecs_to_jiffies(100);
310
311 w5100_write(priv, W5100_S0_CR, cmd);
312 mmiowb();
313
314 while (w5100_read(priv, W5100_S0_CR) != 0) {
315 if (time_after(jiffies, timeout))
316 return -EIO;
317 cpu_relax();
318 }
319
320 return 0;
321}
322
323static void w5100_write_macaddr(struct w5100_priv *priv)
324{
325 struct net_device *ndev = priv->ndev;
326 int i;
327
328 for (i = 0; i < ETH_ALEN; i++)
329 w5100_write(priv, W5100_SHAR + i, ndev->dev_addr[i]);
330 mmiowb();
331}
332
333static void w5100_hw_reset(struct w5100_priv *priv)
334{
335 w5100_write_direct(priv, W5100_MR, MR_RST);
336 mmiowb();
337 mdelay(5);
338 w5100_write_direct(priv, W5100_MR, priv->indirect ?
339 MR_PB | MR_AI | MR_IND :
340 MR_PB);
341 mmiowb();
342 w5100_write(priv, W5100_IMR, 0);
343 w5100_write_macaddr(priv);
344
345 /* Configure 16K of internal memory
346 * as 8K RX buffer and 8K TX buffer
347 */
348 w5100_write(priv, W5100_RMSR, 0x03);
349 w5100_write(priv, W5100_TMSR, 0x03);
350 mmiowb();
351}
352
353static void w5100_hw_start(struct w5100_priv *priv)
354{
355 w5100_write(priv, W5100_S0_MR, priv->promisc ?
356 S0_MR_MACRAW : S0_MR_MACRAW_MF);
357 mmiowb();
358 w5100_command(priv, S0_CR_OPEN);
359 w5100_write(priv, W5100_IMR, IR_S0);
360 mmiowb();
361}
362
363static void w5100_hw_close(struct w5100_priv *priv)
364{
365 w5100_write(priv, W5100_IMR, 0);
366 mmiowb();
367 w5100_command(priv, S0_CR_CLOSE);
368}
369
370/***********************************************************************
371 *
372 * Device driver functions / callbacks
373 *
374 ***********************************************************************/
375
376static void w5100_get_drvinfo(struct net_device *ndev,
377 struct ethtool_drvinfo *info)
378{
379 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
380 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
381 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
382 sizeof(info->bus_info));
383}
384
385static u32 w5100_get_link(struct net_device *ndev)
386{
387 struct w5100_priv *priv = netdev_priv(ndev);
388
389 if (gpio_is_valid(priv->link_gpio))
390 return !!gpio_get_value(priv->link_gpio);
391
392 return 1;
393}
394
395static u32 w5100_get_msglevel(struct net_device *ndev)
396{
397 struct w5100_priv *priv = netdev_priv(ndev);
398
399 return priv->msg_enable;
400}
401
402static void w5100_set_msglevel(struct net_device *ndev, u32 value)
403{
404 struct w5100_priv *priv = netdev_priv(ndev);
405
406 priv->msg_enable = value;
407}
408
409static int w5100_get_regs_len(struct net_device *ndev)
410{
411 return W5100_COMMON_REGS_LEN + W5100_S0_REGS_LEN;
412}
413
414static void w5100_get_regs(struct net_device *ndev,
415 struct ethtool_regs *regs, void *_buf)
416{
417 struct w5100_priv *priv = netdev_priv(ndev);
418 u8 *buf = _buf;
419 u16 i;
420
421 regs->version = 1;
422 for (i = 0; i < W5100_COMMON_REGS_LEN; i++)
423 *buf++ = w5100_read(priv, W5100_COMMON_REGS + i);
424 for (i = 0; i < W5100_S0_REGS_LEN; i++)
425 *buf++ = w5100_read(priv, W5100_S0_REGS + i);
426}
427
428static void w5100_tx_timeout(struct net_device *ndev)
429{
430 struct w5100_priv *priv = netdev_priv(ndev);
431
432 netif_stop_queue(ndev);
433 w5100_hw_reset(priv);
434 w5100_hw_start(priv);
435 ndev->stats.tx_errors++;
436 ndev->trans_start = jiffies;
437 netif_wake_queue(ndev);
438}
439
440static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
441{
442 struct w5100_priv *priv = netdev_priv(ndev);
443 u16 offset;
444
445 netif_stop_queue(ndev);
446
447 offset = w5100_read16(priv, W5100_S0_TX_WR);
448 w5100_writebuf(priv, offset, skb->data, skb->len);
449 w5100_write16(priv, W5100_S0_TX_WR, offset + skb->len);
450 mmiowb();
451 ndev->stats.tx_bytes += skb->len;
452 ndev->stats.tx_packets++;
453 dev_kfree_skb(skb);
454
455 w5100_command(priv, S0_CR_SEND);
456
457 return NETDEV_TX_OK;
458}
459
460static int w5100_napi_poll(struct napi_struct *napi, int budget)
461{
462 struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi);
463 struct net_device *ndev = priv->ndev;
464 struct sk_buff *skb;
465 int rx_count;
466 u16 rx_len;
467 u16 offset;
468 u8 header[2];
469
470 for (rx_count = 0; rx_count < budget; rx_count++) {
471 u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR);
472 if (rx_buf_len == 0)
473 break;
474
475 offset = w5100_read16(priv, W5100_S0_RX_RD);
476 w5100_readbuf(priv, offset, header, 2);
477 rx_len = get_unaligned_be16(header) - 2;
478
479 skb = netdev_alloc_skb_ip_align(ndev, rx_len);
480 if (unlikely(!skb)) {
481 w5100_write16(priv, W5100_S0_RX_RD,
482 offset + rx_buf_len);
483 w5100_command(priv, S0_CR_RECV);
484 ndev->stats.rx_dropped++;
485 return -ENOMEM;
486 }
487
488 skb_put(skb, rx_len);
489 w5100_readbuf(priv, offset + 2, skb->data, rx_len);
490 w5100_write16(priv, W5100_S0_RX_RD, offset + 2 + rx_len);
491 mmiowb();
492 w5100_command(priv, S0_CR_RECV);
493 skb->protocol = eth_type_trans(skb, ndev);
494
495 netif_receive_skb(skb);
496 ndev->stats.rx_packets++;
497 ndev->stats.rx_bytes += rx_len;
498 }
499
500 if (rx_count < budget) {
501 w5100_write(priv, W5100_IMR, IR_S0);
502 mmiowb();
503 napi_complete(napi);
504 }
505
506 return rx_count;
507}
508
509static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
510{
511 struct net_device *ndev = ndev_instance;
512 struct w5100_priv *priv = netdev_priv(ndev);
513
514 int ir = w5100_read(priv, W5100_S0_IR);
515 if (!ir)
516 return IRQ_NONE;
517 w5100_write(priv, W5100_S0_IR, ir);
518 mmiowb();
519
520 if (ir & S0_IR_SENDOK) {
521 netif_dbg(priv, tx_done, ndev, "tx done\n");
522 netif_wake_queue(ndev);
523 }
524
525 if (ir & S0_IR_RECV) {
526 if (napi_schedule_prep(&priv->napi)) {
527 w5100_write(priv, W5100_IMR, 0);
528 mmiowb();
529 __napi_schedule(&priv->napi);
530 }
531 }
532
533 return IRQ_HANDLED;
534}
535
536static irqreturn_t w5100_detect_link(int irq, void *ndev_instance)
537{
538 struct net_device *ndev = ndev_instance;
539 struct w5100_priv *priv = netdev_priv(ndev);
540
541 if (netif_running(ndev)) {
542 if (gpio_get_value(priv->link_gpio) != 0) {
543 netif_info(priv, link, ndev, "link is up\n");
544 netif_carrier_on(ndev);
545 } else {
546 netif_info(priv, link, ndev, "link is down\n");
547 netif_carrier_off(ndev);
548 }
549 }
550
551 return IRQ_HANDLED;
552}
553
554static void w5100_set_rx_mode(struct net_device *ndev)
555{
556 struct w5100_priv *priv = netdev_priv(ndev);
557 bool set_promisc = (ndev->flags & IFF_PROMISC) != 0;
558
559 if (priv->promisc != set_promisc) {
560 priv->promisc = set_promisc;
561 w5100_hw_start(priv);
562 }
563}
564
565static int w5100_set_macaddr(struct net_device *ndev, void *addr)
566{
567 struct w5100_priv *priv = netdev_priv(ndev);
568 struct sockaddr *sock_addr = addr;
569
570 if (!is_valid_ether_addr(sock_addr->sa_data))
571 return -EADDRNOTAVAIL;
572 memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
573 ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
574 w5100_write_macaddr(priv);
575 return 0;
576}
577
578static int w5100_open(struct net_device *ndev)
579{
580 struct w5100_priv *priv = netdev_priv(ndev);
581
582 netif_info(priv, ifup, ndev, "enabling\n");
583 if (!is_valid_ether_addr(ndev->dev_addr))
584 return -EINVAL;
585 w5100_hw_start(priv);
586 napi_enable(&priv->napi);
587 netif_start_queue(ndev);
588 if (!gpio_is_valid(priv->link_gpio) ||
589 gpio_get_value(priv->link_gpio) != 0)
590 netif_carrier_on(ndev);
591 return 0;
592}
593
594static int w5100_stop(struct net_device *ndev)
595{
596 struct w5100_priv *priv = netdev_priv(ndev);
597
598 netif_info(priv, ifdown, ndev, "shutting down\n");
599 w5100_hw_close(priv);
600 netif_carrier_off(ndev);
601 netif_stop_queue(ndev);
602 napi_disable(&priv->napi);
603 return 0;
604}
605
606static const struct ethtool_ops w5100_ethtool_ops = {
607 .get_drvinfo = w5100_get_drvinfo,
608 .get_msglevel = w5100_get_msglevel,
609 .set_msglevel = w5100_set_msglevel,
610 .get_link = w5100_get_link,
611 .get_regs_len = w5100_get_regs_len,
612 .get_regs = w5100_get_regs,
613};
614
615static const struct net_device_ops w5100_netdev_ops = {
616 .ndo_open = w5100_open,
617 .ndo_stop = w5100_stop,
618 .ndo_start_xmit = w5100_start_tx,
619 .ndo_tx_timeout = w5100_tx_timeout,
620 .ndo_set_rx_mode = w5100_set_rx_mode,
621 .ndo_set_mac_address = w5100_set_macaddr,
622 .ndo_validate_addr = eth_validate_addr,
623 .ndo_change_mtu = eth_change_mtu,
624};
625
626static int __devinit w5100_hw_probe(struct platform_device *pdev)
627{
628 struct wiznet_platform_data *data = pdev->dev.platform_data;
629 struct net_device *ndev = platform_get_drvdata(pdev);
630 struct w5100_priv *priv = netdev_priv(ndev);
631 const char *name = netdev_name(ndev);
632 struct resource *mem;
633 int mem_size;
634 int irq;
635 int ret;
636
637 if (data && is_valid_ether_addr(data->mac_addr)) {
638 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
639 } else {
640 random_ether_addr(ndev->dev_addr);
641 ndev->addr_assign_type |= NET_ADDR_RANDOM;
642 }
643
644 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
645 if (!mem)
646 return -ENXIO;
647 mem_size = resource_size(mem);
648 if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name))
649 return -EBUSY;
650 priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
651 if (!priv->base)
652 return -EBUSY;
653
654 spin_lock_init(&priv->reg_lock);
655 priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE;
656 if (priv->indirect) {
657 priv->read = w5100_read_indirect;
658 priv->write = w5100_write_indirect;
659 priv->read16 = w5100_read16_indirect;
660 priv->write16 = w5100_write16_indirect;
661 priv->readbuf = w5100_readbuf_indirect;
662 priv->writebuf = w5100_writebuf_indirect;
663 } else {
664 priv->read = w5100_read_direct;
665 priv->write = w5100_write_direct;
666 priv->read16 = w5100_read16_direct;
667 priv->write16 = w5100_write16_direct;
668 priv->readbuf = w5100_readbuf_direct;
669 priv->writebuf = w5100_writebuf_direct;
670 }
671
672 w5100_hw_reset(priv);
673 if (w5100_read16(priv, W5100_RTR) != RTR_DEFAULT)
674 return -ENODEV;
675
676 irq = platform_get_irq(pdev, 0);
677 if (irq < 0)
678 return irq;
679 ret = request_irq(irq, w5100_interrupt,
680 IRQ_TYPE_LEVEL_LOW, name, ndev);
681 if (ret < 0)
682 return ret;
683 priv->irq = irq;
684
685 priv->link_gpio = data ? data->link_gpio : -EINVAL;
686 if (gpio_is_valid(priv->link_gpio)) {
687 char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL);
688 if (!link_name)
689 return -ENOMEM;
690 snprintf(link_name, 16, "%s-link", name);
691 priv->link_irq = gpio_to_irq(priv->link_gpio);
692 if (request_any_context_irq(priv->link_irq, w5100_detect_link,
693 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
694 link_name, priv->ndev) < 0)
695 priv->link_gpio = -EINVAL;
696 }
697
698 netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq);
699 return 0;
700}
701
702static int __devinit w5100_probe(struct platform_device *pdev)
703{
704 struct w5100_priv *priv;
705 struct net_device *ndev;
706 int err;
707
708 ndev = alloc_etherdev(sizeof(*priv));
709 if (!ndev)
710 return -ENOMEM;
711 SET_NETDEV_DEV(ndev, &pdev->dev);
712 platform_set_drvdata(pdev, ndev);
713 priv = netdev_priv(ndev);
714 priv->ndev = ndev;
715
716 ether_setup(ndev);
717 ndev->netdev_ops = &w5100_netdev_ops;
718 ndev->ethtool_ops = &w5100_ethtool_ops;
719 ndev->watchdog_timeo = HZ;
720 netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16);
721
722 /* This chip doesn't support VLAN packets with normal MTU,
723 * so disable VLAN for this device.
724 */
725 ndev->features |= NETIF_F_VLAN_CHALLENGED;
726
727 err = register_netdev(ndev);
728 if (err < 0)
729 goto err_register;
730
731 err = w5100_hw_probe(pdev);
732 if (err < 0)
733 goto err_hw_probe;
734
735 return 0;
736
737err_hw_probe:
738 unregister_netdev(ndev);
739err_register:
740 free_netdev(ndev);
741 platform_set_drvdata(pdev, NULL);
742 return err;
743}
744
745static int __devexit w5100_remove(struct platform_device *pdev)
746{
747 struct net_device *ndev = platform_get_drvdata(pdev);
748 struct w5100_priv *priv = netdev_priv(ndev);
749
750 w5100_hw_reset(priv);
751 free_irq(priv->irq, ndev);
752 if (gpio_is_valid(priv->link_gpio))
753 free_irq(priv->link_irq, ndev);
754
755 unregister_netdev(ndev);
756 free_netdev(ndev);
757 platform_set_drvdata(pdev, NULL);
758 return 0;
759}
760
761#ifdef CONFIG_PM
762static int w5100_suspend(struct device *dev)
763{
764 struct platform_device *pdev = to_platform_device(dev);
765 struct net_device *ndev = platform_get_drvdata(pdev);
766 struct w5100_priv *priv = netdev_priv(ndev);
767
768 if (netif_running(ndev)) {
769 netif_carrier_off(ndev);
770 netif_device_detach(ndev);
771
772 w5100_hw_close(priv);
773 }
774 return 0;
775}
776
777static int w5100_resume(struct device *dev)
778{
779 struct platform_device *pdev = to_platform_device(dev);
780 struct net_device *ndev = platform_get_drvdata(pdev);
781 struct w5100_priv *priv = netdev_priv(ndev);
782
783 if (netif_running(ndev)) {
784 w5100_hw_reset(priv);
785 w5100_hw_start(priv);
786
787 netif_device_attach(ndev);
788 if (!gpio_is_valid(priv->link_gpio) ||
789 gpio_get_value(priv->link_gpio) != 0)
790 netif_carrier_on(ndev);
791 }
792 return 0;
793}
794#endif /* CONFIG_PM */
795
796static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
797
798static struct platform_driver w5100_driver = {
799 .driver = {
800 .name = DRV_NAME,
801 .owner = THIS_MODULE,
802 .pm = &w5100_pm_ops,
803 },
804 .probe = w5100_probe,
805 .remove = __devexit_p(w5100_remove),
806};
807
808module_platform_driver(w5100_driver);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
new file mode 100644
index 000000000000..3306a20ec211
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -0,0 +1,720 @@
1/*
2 * Ethernet driver for the WIZnet W5300 chip.
3 *
4 * Copyright (C) 2008-2009 WIZnet Co.,Ltd.
5 * Copyright (C) 2011 Taehun Kim <kth3321 <at> gmail.com>
6 * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
7 *
8 * Licensed under the GPL-2 or later.
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/kconfig.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/platform_device.h>
17#include <linux/platform_data/wiznet.h>
18#include <linux/ethtool.h>
19#include <linux/skbuff.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/delay.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25#include <linux/io.h>
26#include <linux/ioport.h>
27#include <linux/interrupt.h>
28#include <linux/irq.h>
29#include <linux/gpio.h>
30
31#define DRV_NAME "w5300"
32#define DRV_VERSION "2012-04-04"
33
34MODULE_DESCRIPTION("WIZnet W5300 Ethernet driver v"DRV_VERSION);
35MODULE_AUTHOR("Mike Sinkovsky <msink@permonline.ru>");
36MODULE_ALIAS("platform:"DRV_NAME);
37MODULE_LICENSE("GPL");
38
39/*
40 * Registers
41 */
42#define W5300_MR 0x0000 /* Mode Register */
43#define MR_DBW (1 << 15) /* Data bus width */
44#define MR_MPF (1 << 14) /* Mac layer pause frame */
45#define MR_WDF(n) (((n)&7)<<11) /* Write data fetch time */
46#define MR_RDH (1 << 10) /* Read data hold time */
47#define MR_FS (1 << 8) /* FIFO swap */
48#define MR_RST (1 << 7) /* S/W reset */
49#define MR_PB (1 << 4) /* Ping block */
50#define MR_DBS (1 << 2) /* Data bus swap */
51#define MR_IND (1 << 0) /* Indirect mode */
52#define W5300_IR 0x0002 /* Interrupt Register */
53#define W5300_IMR 0x0004 /* Interrupt Mask Register */
54#define IR_S0 0x0001 /* S0 interrupt */
55#define W5300_SHARL 0x0008 /* Source MAC address (0123) */
56#define W5300_SHARH 0x000c /* Source MAC address (45) */
57#define W5300_TMSRL 0x0020 /* Transmit Memory Size (0123) */
58#define W5300_TMSRH 0x0024 /* Transmit Memory Size (4567) */
59#define W5300_RMSRL 0x0028 /* Receive Memory Size (0123) */
60#define W5300_RMSRH 0x002c /* Receive Memory Size (4567) */
61#define W5300_MTYPE 0x0030 /* Memory Type */
62#define W5300_IDR 0x00fe /* Chip ID register */
63#define IDR_W5300 0x5300 /* =0x5300 for WIZnet W5300 */
64#define W5300_S0_MR 0x0200 /* S0 Mode Register */
65#define S0_MR_CLOSED 0x0000 /* Close mode */
66#define S0_MR_MACRAW 0x0004 /* MAC RAW mode (promiscous) */
67#define S0_MR_MACRAW_MF 0x0044 /* MAC RAW mode (filtered) */
68#define W5300_S0_CR 0x0202 /* S0 Command Register */
69#define S0_CR_OPEN 0x0001 /* OPEN command */
70#define S0_CR_CLOSE 0x0010 /* CLOSE command */
71#define S0_CR_SEND 0x0020 /* SEND command */
72#define S0_CR_RECV 0x0040 /* RECV command */
73#define W5300_S0_IMR 0x0204 /* S0 Interrupt Mask Register */
74#define W5300_S0_IR 0x0206 /* S0 Interrupt Register */
75#define S0_IR_RECV 0x0004 /* Receive interrupt */
76#define S0_IR_SENDOK 0x0010 /* Send OK interrupt */
77#define W5300_S0_SSR 0x0208 /* S0 Socket Status Register */
78#define W5300_S0_TX_WRSR 0x0220 /* S0 TX Write Size Register */
79#define W5300_S0_TX_FSR 0x0224 /* S0 TX Free Size Register */
80#define W5300_S0_RX_RSR 0x0228 /* S0 Received data Size */
81#define W5300_S0_TX_FIFO 0x022e /* S0 Transmit FIFO */
82#define W5300_S0_RX_FIFO 0x0230 /* S0 Receive FIFO */
83#define W5300_REGS_LEN 0x0400
84
85/*
86 * Device driver private data structure
87 */
88struct w5300_priv {
89 void __iomem *base;
90 spinlock_t reg_lock;
91 bool indirect;
92 u16 (*read) (struct w5300_priv *priv, u16 addr);
93 void (*write)(struct w5300_priv *priv, u16 addr, u16 data);
94 int irq;
95 int link_irq;
96 int link_gpio;
97
98 struct napi_struct napi;
99 struct net_device *ndev;
100 bool promisc;
101 u32 msg_enable;
102};
103
104/************************************************************************
105 *
106 * Lowlevel I/O functions
107 *
108 ***********************************************************************/
109
110/*
111 * In direct address mode host system can directly access W5300 registers
112 * after mapping to Memory-Mapped I/O space.
113 *
114 * 0x400 bytes are required for memory space.
115 */
116static inline u16 w5300_read_direct(struct w5300_priv *priv, u16 addr)
117{
118 return ioread16(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
119}
120
121static inline void w5300_write_direct(struct w5300_priv *priv,
122 u16 addr, u16 data)
123{
124 iowrite16(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
125}
126
127/*
128 * In indirect address mode host system indirectly accesses registers by
129 * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
130 * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space.
131 * Mode Register (MR) is directly accessible.
132 *
133 * Only 0x06 bytes are required for memory space.
134 */
135#define W5300_IDM_AR 0x0002 /* Indirect Mode Address */
136#define W5300_IDM_DR 0x0004 /* Indirect Mode Data */
137
138static u16 w5300_read_indirect(struct w5300_priv *priv, u16 addr)
139{
140 unsigned long flags;
141 u16 data;
142
143 spin_lock_irqsave(&priv->reg_lock, flags);
144 w5300_write_direct(priv, W5300_IDM_AR, addr);
145 mmiowb();
146 data = w5300_read_direct(priv, W5300_IDM_DR);
147 spin_unlock_irqrestore(&priv->reg_lock, flags);
148
149 return data;
150}
151
152static void w5300_write_indirect(struct w5300_priv *priv, u16 addr, u16 data)
153{
154 unsigned long flags;
155
156 spin_lock_irqsave(&priv->reg_lock, flags);
157 w5300_write_direct(priv, W5300_IDM_AR, addr);
158 mmiowb();
159 w5300_write_direct(priv, W5300_IDM_DR, data);
160 mmiowb();
161 spin_unlock_irqrestore(&priv->reg_lock, flags);
162}
163
164#if defined(CONFIG_WIZNET_BUS_DIRECT)
165#define w5300_read w5300_read_direct
166#define w5300_write w5300_write_direct
167
168#elif defined(CONFIG_WIZNET_BUS_INDIRECT)
169#define w5300_read w5300_read_indirect
170#define w5300_write w5300_write_indirect
171
172#else /* CONFIG_WIZNET_BUS_ANY */
173#define w5300_read priv->read
174#define w5300_write priv->write
175#endif
176
177static u32 w5300_read32(struct w5300_priv *priv, u16 addr)
178{
179 u32 data;
180 data = w5300_read(priv, addr) << 16;
181 data |= w5300_read(priv, addr + 2);
182 return data;
183}
184
185static void w5300_write32(struct w5300_priv *priv, u16 addr, u32 data)
186{
187 w5300_write(priv, addr, data >> 16);
188 w5300_write(priv, addr + 2, data);
189}
190
191static int w5300_command(struct w5300_priv *priv, u16 cmd)
192{
193 unsigned long timeout = jiffies + msecs_to_jiffies(100);
194
195 w5300_write(priv, W5300_S0_CR, cmd);
196 mmiowb();
197
198 while (w5300_read(priv, W5300_S0_CR) != 0) {
199 if (time_after(jiffies, timeout))
200 return -EIO;
201 cpu_relax();
202 }
203
204 return 0;
205}
206
207static void w5300_read_frame(struct w5300_priv *priv, u8 *buf, int len)
208{
209 u16 fifo;
210 int i;
211
212 for (i = 0; i < len; i += 2) {
213 fifo = w5300_read(priv, W5300_S0_RX_FIFO);
214 *buf++ = fifo >> 8;
215 *buf++ = fifo;
216 }
217 fifo = w5300_read(priv, W5300_S0_RX_FIFO);
218 fifo = w5300_read(priv, W5300_S0_RX_FIFO);
219}
220
221static void w5300_write_frame(struct w5300_priv *priv, u8 *buf, int len)
222{
223 u16 fifo;
224 int i;
225
226 for (i = 0; i < len; i += 2) {
227 fifo = *buf++ << 8;
228 fifo |= *buf++;
229 w5300_write(priv, W5300_S0_TX_FIFO, fifo);
230 }
231 w5300_write32(priv, W5300_S0_TX_WRSR, len);
232}
233
234static void w5300_write_macaddr(struct w5300_priv *priv)
235{
236 struct net_device *ndev = priv->ndev;
237 w5300_write32(priv, W5300_SHARL,
238 ndev->dev_addr[0] << 24 |
239 ndev->dev_addr[1] << 16 |
240 ndev->dev_addr[2] << 8 |
241 ndev->dev_addr[3]);
242 w5300_write(priv, W5300_SHARH,
243 ndev->dev_addr[4] << 8 |
244 ndev->dev_addr[5]);
245 mmiowb();
246}
247
248static void w5300_hw_reset(struct w5300_priv *priv)
249{
250 w5300_write_direct(priv, W5300_MR, MR_RST);
251 mmiowb();
252 mdelay(5);
253 w5300_write_direct(priv, W5300_MR, priv->indirect ?
254 MR_WDF(7) | MR_PB | MR_IND :
255 MR_WDF(7) | MR_PB);
256 mmiowb();
257 w5300_write(priv, W5300_IMR, 0);
258 w5300_write_macaddr(priv);
259
260 /* Configure 128K of internal memory
261 * as 64K RX fifo and 64K TX fifo
262 */
263 w5300_write32(priv, W5300_RMSRL, 64 << 24);
264 w5300_write32(priv, W5300_RMSRH, 0);
265 w5300_write32(priv, W5300_TMSRL, 64 << 24);
266 w5300_write32(priv, W5300_TMSRH, 0);
267 w5300_write(priv, W5300_MTYPE, 0x00ff);
268 mmiowb();
269}
270
271static void w5300_hw_start(struct w5300_priv *priv)
272{
273 w5300_write(priv, W5300_S0_MR, priv->promisc ?
274 S0_MR_MACRAW : S0_MR_MACRAW_MF);
275 mmiowb();
276 w5300_command(priv, S0_CR_OPEN);
277 w5300_write(priv, W5300_S0_IMR, S0_IR_RECV | S0_IR_SENDOK);
278 w5300_write(priv, W5300_IMR, IR_S0);
279 mmiowb();
280}
281
282static void w5300_hw_close(struct w5300_priv *priv)
283{
284 w5300_write(priv, W5300_IMR, 0);
285 mmiowb();
286 w5300_command(priv, S0_CR_CLOSE);
287}
288
289/***********************************************************************
290 *
291 * Device driver functions / callbacks
292 *
293 ***********************************************************************/
294
295static void w5300_get_drvinfo(struct net_device *ndev,
296 struct ethtool_drvinfo *info)
297{
298 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
299 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
300 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
301 sizeof(info->bus_info));
302}
303
304static u32 w5300_get_link(struct net_device *ndev)
305{
306 struct w5300_priv *priv = netdev_priv(ndev);
307
308 if (gpio_is_valid(priv->link_gpio))
309 return !!gpio_get_value(priv->link_gpio);
310
311 return 1;
312}
313
314static u32 w5300_get_msglevel(struct net_device *ndev)
315{
316 struct w5300_priv *priv = netdev_priv(ndev);
317
318 return priv->msg_enable;
319}
320
321static void w5300_set_msglevel(struct net_device *ndev, u32 value)
322{
323 struct w5300_priv *priv = netdev_priv(ndev);
324
325 priv->msg_enable = value;
326}
327
328static int w5300_get_regs_len(struct net_device *ndev)
329{
330 return W5300_REGS_LEN;
331}
332
333static void w5300_get_regs(struct net_device *ndev,
334 struct ethtool_regs *regs, void *_buf)
335{
336 struct w5300_priv *priv = netdev_priv(ndev);
337 u8 *buf = _buf;
338 u16 addr;
339 u16 data;
340
341 regs->version = 1;
342 for (addr = 0; addr < W5300_REGS_LEN; addr += 2) {
343 switch (addr & 0x23f) {
344 case W5300_S0_TX_FIFO: /* cannot read TX_FIFO */
345 case W5300_S0_RX_FIFO: /* cannot read RX_FIFO */
346 data = 0xffff;
347 break;
348 default:
349 data = w5300_read(priv, addr);
350 break;
351 }
352 *buf++ = data >> 8;
353 *buf++ = data;
354 }
355}
356
357static void w5300_tx_timeout(struct net_device *ndev)
358{
359 struct w5300_priv *priv = netdev_priv(ndev);
360
361 netif_stop_queue(ndev);
362 w5300_hw_reset(priv);
363 w5300_hw_start(priv);
364 ndev->stats.tx_errors++;
365 ndev->trans_start = jiffies;
366 netif_wake_queue(ndev);
367}
368
369static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
370{
371 struct w5300_priv *priv = netdev_priv(ndev);
372
373 netif_stop_queue(ndev);
374
375 w5300_write_frame(priv, skb->data, skb->len);
376 mmiowb();
377 ndev->stats.tx_packets++;
378 ndev->stats.tx_bytes += skb->len;
379 dev_kfree_skb(skb);
380 netif_dbg(priv, tx_queued, ndev, "tx queued\n");
381
382 w5300_command(priv, S0_CR_SEND);
383
384 return NETDEV_TX_OK;
385}
386
387static int w5300_napi_poll(struct napi_struct *napi, int budget)
388{
389 struct w5300_priv *priv = container_of(napi, struct w5300_priv, napi);
390 struct net_device *ndev = priv->ndev;
391 struct sk_buff *skb;
392 int rx_count;
393 u16 rx_len;
394
395 for (rx_count = 0; rx_count < budget; rx_count++) {
396 u32 rx_fifo_len = w5300_read32(priv, W5300_S0_RX_RSR);
397 if (rx_fifo_len == 0)
398 break;
399
400 rx_len = w5300_read(priv, W5300_S0_RX_FIFO);
401
402 skb = netdev_alloc_skb_ip_align(ndev, roundup(rx_len, 2));
403 if (unlikely(!skb)) {
404 u32 i;
405 for (i = 0; i < rx_fifo_len; i += 2)
406 w5300_read(priv, W5300_S0_RX_FIFO);
407 ndev->stats.rx_dropped++;
408 return -ENOMEM;
409 }
410
411 skb_put(skb, rx_len);
412 w5300_read_frame(priv, skb->data, rx_len);
413 skb->protocol = eth_type_trans(skb, ndev);
414
415 netif_receive_skb(skb);
416 ndev->stats.rx_packets++;
417 ndev->stats.rx_bytes += rx_len;
418 }
419
420 if (rx_count < budget) {
421 w5300_write(priv, W5300_IMR, IR_S0);
422 mmiowb();
423 napi_complete(napi);
424 }
425
426 return rx_count;
427}
428
429static irqreturn_t w5300_interrupt(int irq, void *ndev_instance)
430{
431 struct net_device *ndev = ndev_instance;
432 struct w5300_priv *priv = netdev_priv(ndev);
433
434 int ir = w5300_read(priv, W5300_S0_IR);
435 if (!ir)
436 return IRQ_NONE;
437 w5300_write(priv, W5300_S0_IR, ir);
438 mmiowb();
439
440 if (ir & S0_IR_SENDOK) {
441 netif_dbg(priv, tx_done, ndev, "tx done\n");
442 netif_wake_queue(ndev);
443 }
444
445 if (ir & S0_IR_RECV) {
446 if (napi_schedule_prep(&priv->napi)) {
447 w5300_write(priv, W5300_IMR, 0);
448 mmiowb();
449 __napi_schedule(&priv->napi);
450 }
451 }
452
453 return IRQ_HANDLED;
454}
455
456static irqreturn_t w5300_detect_link(int irq, void *ndev_instance)
457{
458 struct net_device *ndev = ndev_instance;
459 struct w5300_priv *priv = netdev_priv(ndev);
460
461 if (netif_running(ndev)) {
462 if (gpio_get_value(priv->link_gpio) != 0) {
463 netif_info(priv, link, ndev, "link is up\n");
464 netif_carrier_on(ndev);
465 } else {
466 netif_info(priv, link, ndev, "link is down\n");
467 netif_carrier_off(ndev);
468 }
469 }
470
471 return IRQ_HANDLED;
472}
473
474static void w5300_set_rx_mode(struct net_device *ndev)
475{
476 struct w5300_priv *priv = netdev_priv(ndev);
477 bool set_promisc = (ndev->flags & IFF_PROMISC) != 0;
478
479 if (priv->promisc != set_promisc) {
480 priv->promisc = set_promisc;
481 w5300_hw_start(priv);
482 }
483}
484
485static int w5300_set_macaddr(struct net_device *ndev, void *addr)
486{
487 struct w5300_priv *priv = netdev_priv(ndev);
488 struct sockaddr *sock_addr = addr;
489
490 if (!is_valid_ether_addr(sock_addr->sa_data))
491 return -EADDRNOTAVAIL;
492 memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
493 ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
494 w5300_write_macaddr(priv);
495 return 0;
496}
497
498static int w5300_open(struct net_device *ndev)
499{
500 struct w5300_priv *priv = netdev_priv(ndev);
501
502 netif_info(priv, ifup, ndev, "enabling\n");
503 if (!is_valid_ether_addr(ndev->dev_addr))
504 return -EINVAL;
505 w5300_hw_start(priv);
506 napi_enable(&priv->napi);
507 netif_start_queue(ndev);
508 if (!gpio_is_valid(priv->link_gpio) ||
509 gpio_get_value(priv->link_gpio) != 0)
510 netif_carrier_on(ndev);
511 return 0;
512}
513
514static int w5300_stop(struct net_device *ndev)
515{
516 struct w5300_priv *priv = netdev_priv(ndev);
517
518 netif_info(priv, ifdown, ndev, "shutting down\n");
519 w5300_hw_close(priv);
520 netif_carrier_off(ndev);
521 netif_stop_queue(ndev);
522 napi_disable(&priv->napi);
523 return 0;
524}
525
526static const struct ethtool_ops w5300_ethtool_ops = {
527 .get_drvinfo = w5300_get_drvinfo,
528 .get_msglevel = w5300_get_msglevel,
529 .set_msglevel = w5300_set_msglevel,
530 .get_link = w5300_get_link,
531 .get_regs_len = w5300_get_regs_len,
532 .get_regs = w5300_get_regs,
533};
534
535static const struct net_device_ops w5300_netdev_ops = {
536 .ndo_open = w5300_open,
537 .ndo_stop = w5300_stop,
538 .ndo_start_xmit = w5300_start_tx,
539 .ndo_tx_timeout = w5300_tx_timeout,
540 .ndo_set_rx_mode = w5300_set_rx_mode,
541 .ndo_set_mac_address = w5300_set_macaddr,
542 .ndo_validate_addr = eth_validate_addr,
543 .ndo_change_mtu = eth_change_mtu,
544};
545
546static int __devinit w5300_hw_probe(struct platform_device *pdev)
547{
548 struct wiznet_platform_data *data = pdev->dev.platform_data;
549 struct net_device *ndev = platform_get_drvdata(pdev);
550 struct w5300_priv *priv = netdev_priv(ndev);
551 const char *name = netdev_name(ndev);
552 struct resource *mem;
553 int mem_size;
554 int irq;
555 int ret;
556
557 if (data && is_valid_ether_addr(data->mac_addr)) {
558 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
559 } else {
560 random_ether_addr(ndev->dev_addr);
561 ndev->addr_assign_type |= NET_ADDR_RANDOM;
562 }
563
564 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
565 if (!mem)
566 return -ENXIO;
567 mem_size = resource_size(mem);
568 if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name))
569 return -EBUSY;
570 priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
571 if (!priv->base)
572 return -EBUSY;
573
574 spin_lock_init(&priv->reg_lock);
575 priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE;
576 if (priv->indirect) {
577 priv->read = w5300_read_indirect;
578 priv->write = w5300_write_indirect;
579 } else {
580 priv->read = w5300_read_direct;
581 priv->write = w5300_write_direct;
582 }
583
584 w5300_hw_reset(priv);
585 if (w5300_read(priv, W5300_IDR) != IDR_W5300)
586 return -ENODEV;
587
588 irq = platform_get_irq(pdev, 0);
589 if (irq < 0)
590 return irq;
591 ret = request_irq(irq, w5300_interrupt,
592 IRQ_TYPE_LEVEL_LOW, name, ndev);
593 if (ret < 0)
594 return ret;
595 priv->irq = irq;
596
597 priv->link_gpio = data ? data->link_gpio : -EINVAL;
598 if (gpio_is_valid(priv->link_gpio)) {
599 char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL);
600 if (!link_name)
601 return -ENOMEM;
602 snprintf(link_name, 16, "%s-link", name);
603 priv->link_irq = gpio_to_irq(priv->link_gpio);
604 if (request_any_context_irq(priv->link_irq, w5300_detect_link,
605 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
606 link_name, priv->ndev) < 0)
607 priv->link_gpio = -EINVAL;
608 }
609
610 netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq);
611 return 0;
612}
613
614static int __devinit w5300_probe(struct platform_device *pdev)
615{
616 struct w5300_priv *priv;
617 struct net_device *ndev;
618 int err;
619
620 ndev = alloc_etherdev(sizeof(*priv));
621 if (!ndev)
622 return -ENOMEM;
623 SET_NETDEV_DEV(ndev, &pdev->dev);
624 platform_set_drvdata(pdev, ndev);
625 priv = netdev_priv(ndev);
626 priv->ndev = ndev;
627
628 ether_setup(ndev);
629 ndev->netdev_ops = &w5300_netdev_ops;
630 ndev->ethtool_ops = &w5300_ethtool_ops;
631 ndev->watchdog_timeo = HZ;
632 netif_napi_add(ndev, &priv->napi, w5300_napi_poll, 16);
633
634 /* This chip doesn't support VLAN packets with normal MTU,
635 * so disable VLAN for this device.
636 */
637 ndev->features |= NETIF_F_VLAN_CHALLENGED;
638
639 err = register_netdev(ndev);
640 if (err < 0)
641 goto err_register;
642
643 err = w5300_hw_probe(pdev);
644 if (err < 0)
645 goto err_hw_probe;
646
647 return 0;
648
649err_hw_probe:
650 unregister_netdev(ndev);
651err_register:
652 free_netdev(ndev);
653 platform_set_drvdata(pdev, NULL);
654 return err;
655}
656
657static int __devexit w5300_remove(struct platform_device *pdev)
658{
659 struct net_device *ndev = platform_get_drvdata(pdev);
660 struct w5300_priv *priv = netdev_priv(ndev);
661
662 w5300_hw_reset(priv);
663 free_irq(priv->irq, ndev);
664 if (gpio_is_valid(priv->link_gpio))
665 free_irq(priv->link_irq, ndev);
666
667 unregister_netdev(ndev);
668 free_netdev(ndev);
669 platform_set_drvdata(pdev, NULL);
670 return 0;
671}
672
673#ifdef CONFIG_PM
674static int w5300_suspend(struct device *dev)
675{
676 struct platform_device *pdev = to_platform_device(dev);
677 struct net_device *ndev = platform_get_drvdata(pdev);
678 struct w5300_priv *priv = netdev_priv(ndev);
679
680 if (netif_running(ndev)) {
681 netif_carrier_off(ndev);
682 netif_device_detach(ndev);
683
684 w5300_hw_close(priv);
685 }
686 return 0;
687}
688
689static int w5300_resume(struct device *dev)
690{
691 struct platform_device *pdev = to_platform_device(dev);
692 struct net_device *ndev = platform_get_drvdata(pdev);
693 struct w5300_priv *priv = netdev_priv(ndev);
694
695 if (!netif_running(ndev)) {
696 w5300_hw_reset(priv);
697 w5300_hw_start(priv);
698
699 netif_device_attach(ndev);
700 if (!gpio_is_valid(priv->link_gpio) ||
701 gpio_get_value(priv->link_gpio) != 0)
702 netif_carrier_on(ndev);
703 }
704 return 0;
705}
706#endif /* CONFIG_PM */
707
708static SIMPLE_DEV_PM_OPS(w5300_pm_ops, w5300_suspend, w5300_resume);
709
710static struct platform_driver w5300_driver = {
711 .driver = {
712 .name = DRV_NAME,
713 .owner = THIS_MODULE,
714 .pm = &w5300_pm_ops,
715 },
716 .probe = w5300_probe,
717 .remove = __devexit_p(w5300_remove),
718};
719
720module_platform_driver(w5300_driver);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index d21591a2c593..1eaf7128afee 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1000,6 +1000,7 @@ static const struct ethtool_ops temac_ethtool_ops = {
1000 .set_settings = temac_set_settings, 1000 .set_settings = temac_set_settings,
1001 .nway_reset = temac_nway_reset, 1001 .nway_reset = temac_nway_reset,
1002 .get_link = ethtool_op_get_link, 1002 .get_link = ethtool_op_get_link,
1003 .get_ts_info = ethtool_op_get_ts_info,
1003}; 1004};
1004 1005
1005static int __devinit temac_of_probe(struct platform_device *op) 1006static int __devinit temac_of_probe(struct platform_device *op)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index cc83af083fd7..44b8d2bad8c3 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -2,9 +2,7 @@
2 * Definitions for Xilinx Axi Ethernet device driver. 2 * Definitions for Xilinx Axi Ethernet device driver.
3 * 3 *
4 * Copyright (c) 2009 Secret Lab Technologies, Ltd. 4 * Copyright (c) 2009 Secret Lab Technologies, Ltd.
5 * Copyright (c) 2010 Xilinx, Inc. All rights reserved. 5 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
6 * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
7 * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
8 */ 6 */
9 7
10#ifndef XILINX_AXIENET_H 8#ifndef XILINX_AXIENET_H
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 2fcbeba6814b..9c365e192a31 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -4,9 +4,9 @@
4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
7 * Copyright (c) 2010 Xilinx, Inc. All rights reserved. 7 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
8 * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch> 8 * Copyright (c) 2010 - 2011 PetaLogix
9 * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch> 9 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
10 * 10 *
11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
12 * and Spartan6. 12 * and Spartan6.
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index d70b6e79f6c0..e90e1f46121e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -2,9 +2,9 @@
2 * MDIO bus driver for the Xilinx Axi Ethernet device 2 * MDIO bus driver for the Xilinx Axi Ethernet device
3 * 3 *
4 * Copyright (c) 2009 Secret Lab Technologies, Ltd. 4 * Copyright (c) 2009 Secret Lab Technologies, Ltd.
5 * Copyright (c) 2010 Xilinx, Inc. All rights reserved. 5 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
6 * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch> 6 * Copyright (c) 2010 - 2011 PetaLogix
7 * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch> 7 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
8 */ 8 */
9 9
10#include <linux/of_address.h> 10#include <linux/of_address.h>
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
index cf67352cea14..3f431019e615 100644
--- a/drivers/net/ethernet/xscale/Kconfig
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -5,8 +5,8 @@
5config NET_VENDOR_XSCALE 5config NET_VENDOR_XSCALE
6 bool "Intel XScale IXP devices" 6 bool "Intel XScale IXP devices"
7 default y 7 default y
8 depends on NET_VENDOR_INTEL && ((ARM && ARCH_IXP4XX && \ 8 depends on NET_VENDOR_INTEL && (ARM && ARCH_IXP4XX && \
9 IXP4XX_NPE && IXP4XX_QMGR) || ARCH_ENP2611) 9 IXP4XX_NPE && IXP4XX_QMGR)
10 ---help--- 10 ---help---
11 If you have a network (Ethernet) card belonging to this class, say Y 11 If you have a network (Ethernet) card belonging to this class, say Y
12 and read the Ethernet-HOWTO, available from 12 and read the Ethernet-HOWTO, available from
@@ -27,6 +27,4 @@ config IXP4XX_ETH
27 Say Y here if you want to use built-in Ethernet ports 27 Say Y here if you want to use built-in Ethernet ports
28 on IXP4xx processor. 28 on IXP4xx processor.
29 29
30source "drivers/net/ethernet/xscale/ixp2000/Kconfig"
31
32endif # NET_VENDOR_XSCALE 30endif # NET_VENDOR_XSCALE
diff --git a/drivers/net/ethernet/xscale/Makefile b/drivers/net/ethernet/xscale/Makefile
index b195b9d7fe81..abc3b031fba7 100644
--- a/drivers/net/ethernet/xscale/Makefile
+++ b/drivers/net/ethernet/xscale/Makefile
@@ -2,5 +2,4 @@
2# Makefile for the Intel XScale IXP device drivers. 2# Makefile for the Intel XScale IXP device drivers.
3# 3#
4 4
5obj-$(CONFIG_ENP2611_MSF_NET) += ixp2000/
6obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o 5obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
diff --git a/drivers/net/ethernet/xscale/ixp2000/Kconfig b/drivers/net/ethernet/xscale/ixp2000/Kconfig
deleted file mode 100644
index 58dbc5b876bc..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
1config ENP2611_MSF_NET
2 tristate "Radisys ENP2611 MSF network interface support"
3 depends on ARCH_ENP2611
4 ---help---
5 This is a driver for the MSF network interface unit in
6 the IXP2400 on the Radisys ENP2611 platform.
diff --git a/drivers/net/ethernet/xscale/ixp2000/Makefile b/drivers/net/ethernet/xscale/ixp2000/Makefile
deleted file mode 100644
index fd38351ceaa7..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
1obj-$(CONFIG_ENP2611_MSF_NET) += enp2611_mod.o
2
3enp2611_mod-objs := caleb.o enp2611.o ixp2400-msf.o ixpdev.o pm3386.o
diff --git a/drivers/net/ethernet/xscale/ixp2000/caleb.c b/drivers/net/ethernet/xscale/ixp2000/caleb.c
deleted file mode 100644
index 7dea5b95012c..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/caleb.c
+++ /dev/null
@@ -1,136 +0,0 @@
1/*
2 * Helper functions for the SPI-3 bridge FPGA on the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/delay.h>
14#include <asm/io.h>
15#include "caleb.h"
16
17#define CALEB_IDLO 0x00
18#define CALEB_IDHI 0x01
19#define CALEB_RID 0x02
20#define CALEB_RESET 0x03
21#define CALEB_INTREN0 0x04
22#define CALEB_INTREN1 0x05
23#define CALEB_INTRSTAT0 0x06
24#define CALEB_INTRSTAT1 0x07
25#define CALEB_PORTEN 0x08
26#define CALEB_BURST 0x09
27#define CALEB_PORTPAUS 0x0A
28#define CALEB_PORTPAUSD 0x0B
29#define CALEB_PHY0RX 0x10
30#define CALEB_PHY1RX 0x11
31#define CALEB_PHY0TX 0x12
32#define CALEB_PHY1TX 0x13
33#define CALEB_IXPRX_HI_CNTR 0x15
34#define CALEB_PHY0RX_HI_CNTR 0x16
35#define CALEB_PHY1RX_HI_CNTR 0x17
36#define CALEB_IXPRX_CNTR 0x18
37#define CALEB_PHY0RX_CNTR 0x19
38#define CALEB_PHY1RX_CNTR 0x1A
39#define CALEB_IXPTX_CNTR 0x1B
40#define CALEB_PHY0TX_CNTR 0x1C
41#define CALEB_PHY1TX_CNTR 0x1D
42#define CALEB_DEBUG0 0x1E
43#define CALEB_DEBUG1 0x1F
44
45
46static u8 caleb_reg_read(int reg)
47{
48 u8 value;
49
50 value = *((volatile u8 *)(ENP2611_CALEB_VIRT_BASE + reg));
51
52// printk(KERN_INFO "caleb_reg_read(%d) = %.2x\n", reg, value);
53
54 return value;
55}
56
57static void caleb_reg_write(int reg, u8 value)
58{
59 u8 dummy;
60
61// printk(KERN_INFO "caleb_reg_write(%d, %.2x)\n", reg, value);
62
63 *((volatile u8 *)(ENP2611_CALEB_VIRT_BASE + reg)) = value;
64
65 dummy = *((volatile u8 *)ENP2611_CALEB_VIRT_BASE);
66 __asm__ __volatile__("mov %0, %0" : "+r" (dummy));
67}
68
69
70void caleb_reset(void)
71{
72 /*
73 * Perform a chip reset.
74 */
75 caleb_reg_write(CALEB_RESET, 0x02);
76 udelay(1);
77
78 /*
79 * Enable all interrupt sources. This is needed to get
80 * meaningful results out of the status bits (register 6
81 * and 7.)
82 */
83 caleb_reg_write(CALEB_INTREN0, 0xff);
84 caleb_reg_write(CALEB_INTREN1, 0x07);
85
86 /*
87 * Set RX and TX FIFO thresholds to 1.5kb.
88 */
89 caleb_reg_write(CALEB_PHY0RX, 0x11);
90 caleb_reg_write(CALEB_PHY1RX, 0x11);
91 caleb_reg_write(CALEB_PHY0TX, 0x11);
92 caleb_reg_write(CALEB_PHY1TX, 0x11);
93
94 /*
95 * Program SPI-3 burst size.
96 */
97 caleb_reg_write(CALEB_BURST, 0); // 64-byte RBUF mpackets
98// caleb_reg_write(CALEB_BURST, 1); // 128-byte RBUF mpackets
99// caleb_reg_write(CALEB_BURST, 2); // 256-byte RBUF mpackets
100}
101
102void caleb_enable_rx(int port)
103{
104 u8 temp;
105
106 temp = caleb_reg_read(CALEB_PORTEN);
107 temp |= 1 << port;
108 caleb_reg_write(CALEB_PORTEN, temp);
109}
110
111void caleb_disable_rx(int port)
112{
113 u8 temp;
114
115 temp = caleb_reg_read(CALEB_PORTEN);
116 temp &= ~(1 << port);
117 caleb_reg_write(CALEB_PORTEN, temp);
118}
119
120void caleb_enable_tx(int port)
121{
122 u8 temp;
123
124 temp = caleb_reg_read(CALEB_PORTEN);
125 temp |= 1 << (port + 4);
126 caleb_reg_write(CALEB_PORTEN, temp);
127}
128
129void caleb_disable_tx(int port)
130{
131 u8 temp;
132
133 temp = caleb_reg_read(CALEB_PORTEN);
134 temp &= ~(1 << (port + 4));
135 caleb_reg_write(CALEB_PORTEN, temp);
136}
diff --git a/drivers/net/ethernet/xscale/ixp2000/caleb.h b/drivers/net/ethernet/xscale/ixp2000/caleb.h
deleted file mode 100644
index e93a1ef5b8a3..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/caleb.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Helper functions for the SPI-3 bridge FPGA on the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __CALEB_H
13#define __CALEB_H
14
15void caleb_reset(void);
16void caleb_enable_rx(int port);
17void caleb_disable_rx(int port);
18void caleb_enable_tx(int port);
19void caleb_disable_tx(int port);
20
21
22#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/enp2611.c b/drivers/net/ethernet/xscale/ixp2000/enp2611.c
deleted file mode 100644
index 34a6cfd17930..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/enp2611.c
+++ /dev/null
@@ -1,232 +0,0 @@
1/*
2 * IXP2400 MSF network device driver for the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/init.h>
17#include <linux/moduleparam.h>
18#include <asm/hardware/uengine.h>
19#include <asm/mach-types.h>
20#include <asm/io.h>
21#include "ixpdev.h"
22#include "caleb.h"
23#include "ixp2400-msf.h"
24#include "pm3386.h"
25
26/***********************************************************************
27 * The Radisys ENP2611 is a PCI form factor board with three SFP GBIC
28 * slots, connected via two PMC/Sierra 3386s and an SPI-3 bridge FPGA
29 * to the IXP2400.
30 *
31 * +-------------+
32 * SFP GBIC #0 ---+ | +---------+
33 * | PM3386 #0 +-------+ |
34 * SFP GBIC #1 ---+ | | "Caleb" | +---------+
35 * +-------------+ | | | |
36 * | SPI-3 +---------+ IXP2400 |
37 * +-------------+ | bridge | | |
38 * SFP GBIC #2 ---+ | | FPGA | +---------+
39 * | PM3386 #1 +-------+ |
40 * | | +---------+
41 * +-------------+
42 * ^ ^ ^
43 * | 1.25Gbaud | 104MHz | 104MHz
44 * | SERDES ea. | SPI-3 ea. | SPI-3
45 *
46 ***********************************************************************/
47static struct ixp2400_msf_parameters enp2611_msf_parameters =
48{
49 .rx_mode = IXP2400_RX_MODE_UTOPIA_POS |
50 IXP2400_RX_MODE_1x32 |
51 IXP2400_RX_MODE_MPHY |
52 IXP2400_RX_MODE_MPHY_32 |
53 IXP2400_RX_MODE_MPHY_POLLED_STATUS |
54 IXP2400_RX_MODE_MPHY_LEVEL3 |
55 IXP2400_RX_MODE_RBUF_SIZE_64,
56
57 .rxclk01_multiplier = IXP2400_PLL_MULTIPLIER_16,
58
59 .rx_poll_ports = 3,
60
61 .rx_channel_mode = {
62 IXP2400_PORT_RX_MODE_MASTER |
63 IXP2400_PORT_RX_MODE_POS_PHY |
64 IXP2400_PORT_RX_MODE_POS_PHY_L3 |
65 IXP2400_PORT_RX_MODE_ODD_PARITY |
66 IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
67
68 IXP2400_PORT_RX_MODE_MASTER |
69 IXP2400_PORT_RX_MODE_POS_PHY |
70 IXP2400_PORT_RX_MODE_POS_PHY_L3 |
71 IXP2400_PORT_RX_MODE_ODD_PARITY |
72 IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
73
74 IXP2400_PORT_RX_MODE_MASTER |
75 IXP2400_PORT_RX_MODE_POS_PHY |
76 IXP2400_PORT_RX_MODE_POS_PHY_L3 |
77 IXP2400_PORT_RX_MODE_ODD_PARITY |
78 IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
79
80 IXP2400_PORT_RX_MODE_MASTER |
81 IXP2400_PORT_RX_MODE_POS_PHY |
82 IXP2400_PORT_RX_MODE_POS_PHY_L3 |
83 IXP2400_PORT_RX_MODE_ODD_PARITY |
84 IXP2400_PORT_RX_MODE_2_CYCLE_DECODE
85 },
86
87 .tx_mode = IXP2400_TX_MODE_UTOPIA_POS |
88 IXP2400_TX_MODE_1x32 |
89 IXP2400_TX_MODE_MPHY |
90 IXP2400_TX_MODE_MPHY_32 |
91 IXP2400_TX_MODE_MPHY_POLLED_STATUS |
92 IXP2400_TX_MODE_MPHY_LEVEL3 |
93 IXP2400_TX_MODE_TBUF_SIZE_64,
94
95 .txclk01_multiplier = IXP2400_PLL_MULTIPLIER_16,
96
97 .tx_poll_ports = 3,
98
99 .tx_channel_mode = {
100 IXP2400_PORT_TX_MODE_MASTER |
101 IXP2400_PORT_TX_MODE_POS_PHY |
102 IXP2400_PORT_TX_MODE_ODD_PARITY |
103 IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
104
105 IXP2400_PORT_TX_MODE_MASTER |
106 IXP2400_PORT_TX_MODE_POS_PHY |
107 IXP2400_PORT_TX_MODE_ODD_PARITY |
108 IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
109
110 IXP2400_PORT_TX_MODE_MASTER |
111 IXP2400_PORT_TX_MODE_POS_PHY |
112 IXP2400_PORT_TX_MODE_ODD_PARITY |
113 IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
114
115 IXP2400_PORT_TX_MODE_MASTER |
116 IXP2400_PORT_TX_MODE_POS_PHY |
117 IXP2400_PORT_TX_MODE_ODD_PARITY |
118 IXP2400_PORT_TX_MODE_2_CYCLE_DECODE
119 }
120};
121
122static struct net_device *nds[3];
123static struct timer_list link_check_timer;
124
125/* @@@ Poll the SFP moddef0 line too. */
126/* @@@ Try to use the pm3386 DOOL interrupt as well. */
127static void enp2611_check_link_status(unsigned long __dummy)
128{
129 int i;
130
131 for (i = 0; i < 3; i++) {
132 struct net_device *dev;
133 int status;
134
135 dev = nds[i];
136 if (dev == NULL)
137 continue;
138
139 status = pm3386_is_link_up(i);
140 if (status && !netif_carrier_ok(dev)) {
141 /* @@@ Should report autonegotiation status. */
142 printk(KERN_INFO "%s: NIC Link is Up\n", dev->name);
143
144 pm3386_enable_tx(i);
145 caleb_enable_tx(i);
146 netif_carrier_on(dev);
147 } else if (!status && netif_carrier_ok(dev)) {
148 printk(KERN_INFO "%s: NIC Link is Down\n", dev->name);
149
150 netif_carrier_off(dev);
151 caleb_disable_tx(i);
152 pm3386_disable_tx(i);
153 }
154 }
155
156 link_check_timer.expires = jiffies + HZ / 10;
157 add_timer(&link_check_timer);
158}
159
160static void enp2611_set_port_admin_status(int port, int up)
161{
162 if (up) {
163 caleb_enable_rx(port);
164
165 pm3386_set_carrier(port, 1);
166 pm3386_enable_rx(port);
167 } else {
168 caleb_disable_tx(port);
169 pm3386_disable_tx(port);
170 /* @@@ Flush out pending packets. */
171 pm3386_set_carrier(port, 0);
172
173 pm3386_disable_rx(port);
174 caleb_disable_rx(port);
175 }
176}
177
178static int __init enp2611_init_module(void)
179{
180 int ports;
181 int i;
182
183 if (!machine_is_enp2611())
184 return -ENODEV;
185
186 caleb_reset();
187 pm3386_reset();
188
189 ports = pm3386_port_count();
190 for (i = 0; i < ports; i++) {
191 nds[i] = ixpdev_alloc(i, sizeof(struct ixpdev_priv));
192 if (nds[i] == NULL) {
193 while (--i >= 0)
194 free_netdev(nds[i]);
195 return -ENOMEM;
196 }
197
198 pm3386_init_port(i);
199 pm3386_get_mac(i, nds[i]->dev_addr);
200 }
201
202 ixp2400_msf_init(&enp2611_msf_parameters);
203
204 if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) {
205 for (i = 0; i < ports; i++)
206 if (nds[i])
207 free_netdev(nds[i]);
208 return -EINVAL;
209 }
210
211 init_timer(&link_check_timer);
212 link_check_timer.function = enp2611_check_link_status;
213 link_check_timer.expires = jiffies;
214 add_timer(&link_check_timer);
215
216 return 0;
217}
218
219static void __exit enp2611_cleanup_module(void)
220{
221 int i;
222
223 del_timer_sync(&link_check_timer);
224
225 ixpdev_deinit();
226 for (i = 0; i < 3; i++)
227 free_netdev(nds[i]);
228}
229
230module_init(enp2611_init_module);
231module_exit(enp2611_cleanup_module);
232MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c
deleted file mode 100644
index f5ffd7e05d26..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c
+++ /dev/null
@@ -1,212 +0,0 @@
1/*
2 * Generic library functions for the MSF (Media and Switch Fabric) unit
3 * found on the Intel IXP2400 network processor.
4 *
5 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
6 * Dedicated to Marija Kulikova.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as
10 * published by the Free Software Foundation; either version 2.1 of the
11 * License, or (at your option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <mach/hardware.h>
17#include <mach/ixp2000-regs.h>
18#include <asm/delay.h>
19#include <asm/io.h>
20#include "ixp2400-msf.h"
21
22/*
23 * This is the Intel recommended PLL init procedure as described on
24 * page 340 of the IXP2400/IXP2800 Programmer's Reference Manual.
25 */
26static void ixp2400_pll_init(struct ixp2400_msf_parameters *mp)
27{
28 int rx_dual_clock;
29 int tx_dual_clock;
30 u32 value;
31
32 /*
33 * If the RX mode is not 1x32, we have to enable both RX PLLs
34 * (#0 and #1.) The same thing for the TX direction.
35 */
36 rx_dual_clock = !!(mp->rx_mode & IXP2400_RX_MODE_WIDTH_MASK);
37 tx_dual_clock = !!(mp->tx_mode & IXP2400_TX_MODE_WIDTH_MASK);
38
39 /*
40 * Read initial value.
41 */
42 value = ixp2000_reg_read(IXP2000_MSF_CLK_CNTRL);
43
44 /*
45 * Put PLLs in powerdown and bypass mode.
46 */
47 value |= 0x0000f0f0;
48 ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
49
50 /*
51 * Set single or dual clock mode bits.
52 */
53 value &= ~0x03000000;
54 value |= (rx_dual_clock << 24) | (tx_dual_clock << 25);
55
56 /*
57 * Set multipliers.
58 */
59 value &= ~0x00ff0000;
60 value |= mp->rxclk01_multiplier << 16;
61 value |= mp->rxclk23_multiplier << 18;
62 value |= mp->txclk01_multiplier << 20;
63 value |= mp->txclk23_multiplier << 22;
64
65 /*
66 * And write value.
67 */
68 ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
69
70 /*
71 * Disable PLL bypass mode.
72 */
73 value &= ~(0x00005000 | rx_dual_clock << 13 | tx_dual_clock << 15);
74 ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
75
76 /*
77 * Turn on PLLs.
78 */
79 value &= ~(0x00000050 | rx_dual_clock << 5 | tx_dual_clock << 7);
80 ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
81
82 /*
83 * Wait for PLLs to lock. There are lock status bits, but IXP2400
84 * erratum #65 says that these lock bits should not be relied upon
85 * as they might not accurately reflect the true state of the PLLs.
86 */
87 udelay(100);
88}
89
90/*
91 * Needed according to p480 of Programmer's Reference Manual.
92 */
93static void ixp2400_msf_free_rbuf_entries(struct ixp2400_msf_parameters *mp)
94{
95 int size_bits;
96 int i;
97
98 /*
99 * Work around IXP2400 erratum #69 (silent RBUF-to-DRAM transfer
100 * corruption) in the Intel-recommended way: do not add the RBUF
101 * elements susceptible to corruption to the freelist.
102 */
103 size_bits = mp->rx_mode & IXP2400_RX_MODE_RBUF_SIZE_MASK;
104 if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_64) {
105 for (i = 1; i < 128; i++) {
106 if (i == 9 || i == 18 || i == 27)
107 continue;
108 ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
109 }
110 } else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_128) {
111 for (i = 1; i < 64; i++) {
112 if (i == 4 || i == 9 || i == 13)
113 continue;
114 ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
115 }
116 } else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_256) {
117 for (i = 1; i < 32; i++) {
118 if (i == 2 || i == 4 || i == 6)
119 continue;
120 ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
121 }
122 }
123}
124
125static u32 ixp2400_msf_valid_channels(u32 reg)
126{
127 u32 channels;
128
129 channels = 0;
130 switch (reg & IXP2400_RX_MODE_WIDTH_MASK) {
131 case IXP2400_RX_MODE_1x32:
132 channels = 0x1;
133 if (reg & IXP2400_RX_MODE_MPHY &&
134 !(reg & IXP2400_RX_MODE_MPHY_32))
135 channels = 0xf;
136 break;
137
138 case IXP2400_RX_MODE_2x16:
139 channels = 0x5;
140 break;
141
142 case IXP2400_RX_MODE_4x8:
143 channels = 0xf;
144 break;
145
146 case IXP2400_RX_MODE_1x16_2x8:
147 channels = 0xd;
148 break;
149 }
150
151 return channels;
152}
153
154static void ixp2400_msf_enable_rx(struct ixp2400_msf_parameters *mp)
155{
156 u32 value;
157
158 value = ixp2000_reg_read(IXP2000_MSF_RX_CONTROL) & 0x0fffffff;
159 value |= ixp2400_msf_valid_channels(mp->rx_mode) << 28;
160 ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, value);
161}
162
163static void ixp2400_msf_enable_tx(struct ixp2400_msf_parameters *mp)
164{
165 u32 value;
166
167 value = ixp2000_reg_read(IXP2000_MSF_TX_CONTROL) & 0x0fffffff;
168 value |= ixp2400_msf_valid_channels(mp->tx_mode) << 28;
169 ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, value);
170}
171
172
173void ixp2400_msf_init(struct ixp2400_msf_parameters *mp)
174{
175 u32 value;
176 int i;
177
178 /*
179 * Init the RX/TX PLLs based on the passed parameter block.
180 */
181 ixp2400_pll_init(mp);
182
183 /*
184 * Reset MSF. Bit 7 in IXP_RESET_0 resets the MSF.
185 */
186 value = ixp2000_reg_read(IXP2000_RESET0);
187 ixp2000_reg_write(IXP2000_RESET0, value | 0x80);
188 ixp2000_reg_write(IXP2000_RESET0, value & ~0x80);
189
190 /*
191 * Initialise the RX section.
192 */
193 ixp2000_reg_write(IXP2000_MSF_RX_MPHY_POLL_LIMIT, mp->rx_poll_ports - 1);
194 ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, mp->rx_mode);
195 for (i = 0; i < 4; i++) {
196 ixp2000_reg_write(IXP2000_MSF_RX_UP_CONTROL_0 + i,
197 mp->rx_channel_mode[i]);
198 }
199 ixp2400_msf_free_rbuf_entries(mp);
200 ixp2400_msf_enable_rx(mp);
201
202 /*
203 * Initialise the TX section.
204 */
205 ixp2000_reg_write(IXP2000_MSF_TX_MPHY_POLL_LIMIT, mp->tx_poll_ports - 1);
206 ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, mp->tx_mode);
207 for (i = 0; i < 4; i++) {
208 ixp2000_reg_write(IXP2000_MSF_TX_UP_CONTROL_0 + i,
209 mp->tx_channel_mode[i]);
210 }
211 ixp2400_msf_enable_tx(mp);
212}
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h
deleted file mode 100644
index 3ac1af2771da..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2 * Generic library functions for the MSF (Media and Switch Fabric) unit
3 * found on the Intel IXP2400 network processor.
4 *
5 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
6 * Dedicated to Marija Kulikova.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as
10 * published by the Free Software Foundation; either version 2.1 of the
11 * License, or (at your option) any later version.
12 */
13
14#ifndef __IXP2400_MSF_H
15#define __IXP2400_MSF_H
16
17struct ixp2400_msf_parameters
18{
19 u32 rx_mode;
20 unsigned rxclk01_multiplier:2;
21 unsigned rxclk23_multiplier:2;
22 unsigned rx_poll_ports:6;
23 u32 rx_channel_mode[4];
24
25 u32 tx_mode;
26 unsigned txclk01_multiplier:2;
27 unsigned txclk23_multiplier:2;
28 unsigned tx_poll_ports:6;
29 u32 tx_channel_mode[4];
30};
31
32void ixp2400_msf_init(struct ixp2400_msf_parameters *mp);
33
34#define IXP2400_PLL_MULTIPLIER_48 0x00
35#define IXP2400_PLL_MULTIPLIER_24 0x01
36#define IXP2400_PLL_MULTIPLIER_16 0x02
37#define IXP2400_PLL_MULTIPLIER_12 0x03
38
39#define IXP2400_RX_MODE_CSIX 0x00400000
40#define IXP2400_RX_MODE_UTOPIA_POS 0x00000000
41#define IXP2400_RX_MODE_WIDTH_MASK 0x00300000
42#define IXP2400_RX_MODE_1x16_2x8 0x00300000
43#define IXP2400_RX_MODE_4x8 0x00200000
44#define IXP2400_RX_MODE_2x16 0x00100000
45#define IXP2400_RX_MODE_1x32 0x00000000
46#define IXP2400_RX_MODE_MPHY 0x00080000
47#define IXP2400_RX_MODE_SPHY 0x00000000
48#define IXP2400_RX_MODE_MPHY_32 0x00040000
49#define IXP2400_RX_MODE_MPHY_4 0x00000000
50#define IXP2400_RX_MODE_MPHY_POLLED_STATUS 0x00020000
51#define IXP2400_RX_MODE_MPHY_DIRECT_STATUS 0x00000000
52#define IXP2400_RX_MODE_CBUS_FULL_DUPLEX 0x00010000
53#define IXP2400_RX_MODE_CBUS_SIMPLEX 0x00000000
54#define IXP2400_RX_MODE_MPHY_LEVEL2 0x00004000
55#define IXP2400_RX_MODE_MPHY_LEVEL3 0x00000000
56#define IXP2400_RX_MODE_CBUS_8BIT 0x00002000
57#define IXP2400_RX_MODE_CBUS_4BIT 0x00000000
58#define IXP2400_RX_MODE_CSIX_SINGLE_FREELIST 0x00000200
59#define IXP2400_RX_MODE_CSIX_SPLIT_FREELISTS 0x00000000
60#define IXP2400_RX_MODE_RBUF_SIZE_MASK 0x0000000c
61#define IXP2400_RX_MODE_RBUF_SIZE_256 0x00000008
62#define IXP2400_RX_MODE_RBUF_SIZE_128 0x00000004
63#define IXP2400_RX_MODE_RBUF_SIZE_64 0x00000000
64
65#define IXP2400_PORT_RX_MODE_SLAVE 0x00000040
66#define IXP2400_PORT_RX_MODE_MASTER 0x00000000
67#define IXP2400_PORT_RX_MODE_POS_PHY_L3 0x00000020
68#define IXP2400_PORT_RX_MODE_POS_PHY_L2 0x00000000
69#define IXP2400_PORT_RX_MODE_POS_PHY 0x00000010
70#define IXP2400_PORT_RX_MODE_UTOPIA 0x00000000
71#define IXP2400_PORT_RX_MODE_EVEN_PARITY 0x0000000c
72#define IXP2400_PORT_RX_MODE_ODD_PARITY 0x00000008
73#define IXP2400_PORT_RX_MODE_NO_PARITY 0x00000000
74#define IXP2400_PORT_RX_MODE_UTOPIA_BIG_CELLS 0x00000002
75#define IXP2400_PORT_RX_MODE_UTOPIA_NORMAL_CELLS 0x00000000
76#define IXP2400_PORT_RX_MODE_2_CYCLE_DECODE 0x00000001
77#define IXP2400_PORT_RX_MODE_1_CYCLE_DECODE 0x00000000
78
79#define IXP2400_TX_MODE_CSIX 0x00400000
80#define IXP2400_TX_MODE_UTOPIA_POS 0x00000000
81#define IXP2400_TX_MODE_WIDTH_MASK 0x00300000
82#define IXP2400_TX_MODE_1x16_2x8 0x00300000
83#define IXP2400_TX_MODE_4x8 0x00200000
84#define IXP2400_TX_MODE_2x16 0x00100000
85#define IXP2400_TX_MODE_1x32 0x00000000
86#define IXP2400_TX_MODE_MPHY 0x00080000
87#define IXP2400_TX_MODE_SPHY 0x00000000
88#define IXP2400_TX_MODE_MPHY_32 0x00040000
89#define IXP2400_TX_MODE_MPHY_4 0x00000000
90#define IXP2400_TX_MODE_MPHY_POLLED_STATUS 0x00020000
91#define IXP2400_TX_MODE_MPHY_DIRECT_STATUS 0x00000000
92#define IXP2400_TX_MODE_CBUS_FULL_DUPLEX 0x00010000
93#define IXP2400_TX_MODE_CBUS_SIMPLEX 0x00000000
94#define IXP2400_TX_MODE_MPHY_LEVEL2 0x00004000
95#define IXP2400_TX_MODE_MPHY_LEVEL3 0x00000000
96#define IXP2400_TX_MODE_CBUS_8BIT 0x00002000
97#define IXP2400_TX_MODE_CBUS_4BIT 0x00000000
98#define IXP2400_TX_MODE_TBUF_SIZE_MASK 0x0000000c
99#define IXP2400_TX_MODE_TBUF_SIZE_256 0x00000008
100#define IXP2400_TX_MODE_TBUF_SIZE_128 0x00000004
101#define IXP2400_TX_MODE_TBUF_SIZE_64 0x00000000
102
103#define IXP2400_PORT_TX_MODE_SLAVE 0x00000040
104#define IXP2400_PORT_TX_MODE_MASTER 0x00000000
105#define IXP2400_PORT_TX_MODE_POS_PHY 0x00000010
106#define IXP2400_PORT_TX_MODE_UTOPIA 0x00000000
107#define IXP2400_PORT_TX_MODE_EVEN_PARITY 0x0000000c
108#define IXP2400_PORT_TX_MODE_ODD_PARITY 0x00000008
109#define IXP2400_PORT_TX_MODE_NO_PARITY 0x00000000
110#define IXP2400_PORT_TX_MODE_UTOPIA_BIG_CELLS 0x00000002
111#define IXP2400_PORT_TX_MODE_2_CYCLE_DECODE 0x00000001
112#define IXP2400_PORT_TX_MODE_1_CYCLE_DECODE 0x00000000
113
114
115#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc
deleted file mode 100644
index 42a73e357afa..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc
+++ /dev/null
@@ -1,408 +0,0 @@
1/*
2 * RX ucode for the Intel IXP2400 in POS-PHY mode.
3 * Copyright (C) 2004, 2005 Lennert Buytenhek
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * Assumptions made in this code:
12 * - The IXP2400 MSF is configured for POS-PHY mode, in a mode where
13 * only one full element list is used. This includes, for example,
14 * 1x32 SPHY and 1x32 MPHY32, but not 4x8 SPHY or 1x32 MPHY4. (This
15 * is not an exhaustive list.)
16 * - The RBUF uses 64-byte mpackets.
17 * - RX descriptors reside in SRAM, and have the following format:
18 * struct rx_desc
19 * {
20 * // to uengine
21 * u32 buf_phys_addr;
22 * u32 buf_length;
23 *
24 * // from uengine
25 * u32 channel;
26 * u32 pkt_length;
27 * };
28 * - Packet data resides in DRAM.
29 * - Packet buffer addresses are 8-byte aligned.
30 * - Scratch ring 0 is rx_pending.
31 * - Scratch ring 1 is rx_done, and has status condition 'full'.
32 * - The host triggers rx_done flush and rx_pending refill on seeing INTA.
33 * - This code is run on all eight threads of the microengine it runs on.
34 *
35 * Local memory is used for per-channel RX state.
36 */
37
38#define RX_THREAD_FREELIST_0 0x0030
39#define RBUF_ELEMENT_DONE 0x0044
40
41#define CHANNEL_FLAGS *l$index0[0]
42#define CHANNEL_FLAG_RECEIVING 1
43#define PACKET_LENGTH *l$index0[1]
44#define PACKET_CHECKSUM *l$index0[2]
45#define BUFFER_HANDLE *l$index0[3]
46#define BUFFER_START *l$index0[4]
47#define BUFFER_LENGTH *l$index0[5]
48
49#define CHANNEL_STATE_SIZE 24 // in bytes
50#define CHANNEL_STATE_SHIFT 5 // ceil(log2(state size))
51
52
53 .sig volatile sig1
54 .sig volatile sig2
55 .sig volatile sig3
56
57 .sig mpacket_arrived
58 .reg add_to_rx_freelist
59 .reg read $rsw0, $rsw1
60 .xfer_order $rsw0 $rsw1
61
62 .reg zero
63
64 /*
65 * Initialise add_to_rx_freelist.
66 */
67 .begin
68 .reg temp
69 .reg temp2
70
71 immed[add_to_rx_freelist, RX_THREAD_FREELIST_0]
72 immed_w1[add_to_rx_freelist, (&$rsw0 | (&mpacket_arrived << 12))]
73
74 local_csr_rd[ACTIVE_CTX_STS]
75 immed[temp, 0]
76 alu[temp2, temp, and, 0x1f]
77 alu_shf[add_to_rx_freelist, add_to_rx_freelist, or, temp2, <<20]
78 alu[temp2, temp, and, 0x80]
79 alu_shf[add_to_rx_freelist, add_to_rx_freelist, or, temp2, <<18]
80 .end
81
82 immed[zero, 0]
83
84 /*
85 * Skip context 0 initialisation?
86 */
87 .begin
88 br!=ctx[0, mpacket_receive_loop#]
89 .end
90
91 /*
92 * Initialise local memory.
93 */
94 .begin
95 .reg addr
96 .reg temp
97
98 immed[temp, 0]
99 init_local_mem_loop#:
100 alu_shf[addr, --, b, temp, <<CHANNEL_STATE_SHIFT]
101 local_csr_wr[ACTIVE_LM_ADDR_0, addr]
102 nop
103 nop
104 nop
105
106 immed[CHANNEL_FLAGS, 0]
107
108 alu[temp, temp, +, 1]
109 alu[--, temp, and, 0x20]
110 beq[init_local_mem_loop#]
111 .end
112
113 /*
114 * Initialise signal pipeline.
115 */
116 .begin
117 local_csr_wr[SAME_ME_SIGNAL, (&sig1 << 3)]
118 .set_sig sig1
119
120 local_csr_wr[SAME_ME_SIGNAL, (&sig2 << 3)]
121 .set_sig sig2
122
123 local_csr_wr[SAME_ME_SIGNAL, (&sig3 << 3)]
124 .set_sig sig3
125 .end
126
127mpacket_receive_loop#:
128 /*
129 * Synchronise and wait for mpacket.
130 */
131 .begin
132 ctx_arb[sig1]
133 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig1 << 3))]
134
135 msf[fast_wr, --, add_to_rx_freelist, 0]
136 .set_sig mpacket_arrived
137 ctx_arb[mpacket_arrived]
138 .set $rsw0 $rsw1
139 .end
140
141 /*
142 * We halt if we see {inbparerr,parerr,null,soperror}.
143 */
144 .begin
145 alu_shf[--, 0x1b, and, $rsw0, >>8]
146 bne[abort_rswerr#]
147 .end
148
149 /*
150 * Point local memory pointer to this channel's state area.
151 */
152 .begin
153 .reg chanaddr
154
155 alu[chanaddr, $rsw0, and, 0x1f]
156 alu_shf[chanaddr, --, b, chanaddr, <<CHANNEL_STATE_SHIFT]
157 local_csr_wr[ACTIVE_LM_ADDR_0, chanaddr]
158 nop
159 nop
160 nop
161 .end
162
163 /*
164 * Check whether we received a SOP mpacket while we were already
165 * working on a packet, or a non-SOP mpacket while there was no
166 * packet pending. (SOP == RECEIVING -> abort) If everything's
167 * okay, update the RECEIVING flag to reflect our new state.
168 */
169 .begin
170 .reg temp
171 .reg eop
172
173 #if CHANNEL_FLAG_RECEIVING != 1
174 #error CHANNEL_FLAG_RECEIVING is not 1
175 #endif
176
177 alu_shf[temp, 1, and, $rsw0, >>15]
178 alu[temp, temp, xor, CHANNEL_FLAGS]
179 alu[--, temp, and, CHANNEL_FLAG_RECEIVING]
180 beq[abort_proterr#]
181
182 alu_shf[eop, 1, and, $rsw0, >>14]
183 alu[CHANNEL_FLAGS, temp, xor, eop]
184 .end
185
186 /*
187 * Copy the mpacket into the right spot, and in case of EOP,
188 * write back the descriptor and pass the packet on.
189 */
190 .begin
191 .reg buffer_offset
192 .reg _packet_length
193 .reg _packet_checksum
194 .reg _buffer_handle
195 .reg _buffer_start
196 .reg _buffer_length
197
198 /*
199 * Determine buffer_offset, _packet_length and
200 * _packet_checksum.
201 */
202 .begin
203 .reg temp
204
205 alu[--, 1, and, $rsw0, >>15]
206 beq[not_sop#]
207
208 immed[PACKET_LENGTH, 0]
209 immed[PACKET_CHECKSUM, 0]
210
211 not_sop#:
212 alu[buffer_offset, --, b, PACKET_LENGTH]
213 alu_shf[temp, 0xff, and, $rsw0, >>16]
214 alu[_packet_length, buffer_offset, +, temp]
215 alu[PACKET_LENGTH, --, b, _packet_length]
216
217 immed[temp, 0xffff]
218 alu[temp, $rsw1, and, temp]
219 alu[_packet_checksum, PACKET_CHECKSUM, +, temp]
220 alu[PACKET_CHECKSUM, --, b, _packet_checksum]
221 .end
222
223 /*
224 * Allocate buffer in case of SOP.
225 */
226 .begin
227 .reg temp
228
229 alu[temp, 1, and, $rsw0, >>15]
230 beq[skip_buffer_alloc#]
231
232 .begin
233 .sig zzz
234 .reg read $stemp $stemp2
235 .xfer_order $stemp $stemp2
236
237 rx_nobufs#:
238 scratch[get, $stemp, zero, 0, 1], ctx_swap[zzz]
239 alu[_buffer_handle, --, b, $stemp]
240 beq[rx_nobufs#]
241
242 sram[read, $stemp, _buffer_handle, 0, 2],
243 ctx_swap[zzz]
244 alu[_buffer_start, --, b, $stemp]
245 alu[_buffer_length, --, b, $stemp2]
246 .end
247
248 skip_buffer_alloc#:
249 .end
250
251 /*
252 * Resynchronise.
253 */
254 .begin
255 ctx_arb[sig2]
256 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig2 << 3))]
257 .end
258
259 /*
260 * Synchronise buffer state.
261 */
262 .begin
263 .reg temp
264
265 alu[temp, 1, and, $rsw0, >>15]
266 beq[copy_from_local_mem#]
267
268 alu[BUFFER_HANDLE, --, b, _buffer_handle]
269 alu[BUFFER_START, --, b, _buffer_start]
270 alu[BUFFER_LENGTH, --, b, _buffer_length]
271 br[sync_state_done#]
272
273 copy_from_local_mem#:
274 alu[_buffer_handle, --, b, BUFFER_HANDLE]
275 alu[_buffer_start, --, b, BUFFER_START]
276 alu[_buffer_length, --, b, BUFFER_LENGTH]
277
278 sync_state_done#:
279 .end
280
281#if 0
282 /*
283 * Debug buffer state management.
284 */
285 .begin
286 .reg temp
287
288 alu[temp, 1, and, $rsw0, >>14]
289 beq[no_poison#]
290 immed[BUFFER_HANDLE, 0xdead]
291 immed[BUFFER_START, 0xdead]
292 immed[BUFFER_LENGTH, 0xdead]
293 no_poison#:
294
295 immed[temp, 0xdead]
296 alu[--, _buffer_handle, -, temp]
297 beq[state_corrupted#]
298 alu[--, _buffer_start, -, temp]
299 beq[state_corrupted#]
300 alu[--, _buffer_length, -, temp]
301 beq[state_corrupted#]
302 .end
303#endif
304
305 /*
306 * Check buffer length.
307 */
308 .begin
309 alu[--, _buffer_length, -, _packet_length]
310 blo[buffer_overflow#]
311 .end
312
313 /*
314 * Copy the mpacket and give back the RBUF element.
315 */
316 .begin
317 .reg element
318 .reg xfer_size
319 .reg temp
320 .sig copy_sig
321
322 alu_shf[element, 0x7f, and, $rsw0, >>24]
323 alu_shf[xfer_size, 0xff, and, $rsw0, >>16]
324
325 alu[xfer_size, xfer_size, -, 1]
326 alu_shf[xfer_size, 0x10, or, xfer_size, >>3]
327 alu_shf[temp, 0x10, or, xfer_size, <<21]
328 alu_shf[temp, temp, or, element, <<11]
329 alu_shf[--, temp, or, 1, <<18]
330
331 dram[rbuf_rd, --, _buffer_start, buffer_offset, max_8],
332 indirect_ref, sig_done[copy_sig]
333 ctx_arb[copy_sig]
334
335 alu[temp, RBUF_ELEMENT_DONE, or, element, <<16]
336 msf[fast_wr, --, temp, 0]
337 .end
338
339 /*
340 * If EOP, write back the packet descriptor.
341 */
342 .begin
343 .reg write $stemp $stemp2
344 .xfer_order $stemp $stemp2
345 .sig zzz
346
347 alu_shf[--, 1, and, $rsw0, >>14]
348 beq[no_writeback#]
349
350 alu[$stemp, $rsw0, and, 0x1f]
351 alu[$stemp2, --, b, _packet_length]
352 sram[write, $stemp, _buffer_handle, 8, 2], ctx_swap[zzz]
353
354 no_writeback#:
355 .end
356
357 /*
358 * Resynchronise.
359 */
360 .begin
361 ctx_arb[sig3]
362 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig3 << 3))]
363 .end
364
365 /*
366 * If EOP, put the buffer back onto the scratch ring.
367 */
368 .begin
369 .reg write $stemp
370 .sig zzz
371
372 br_inp_state[SCR_Ring1_Status, rx_done_ring_overflow#]
373
374 alu_shf[--, 1, and, $rsw0, >>14]
375 beq[mpacket_receive_loop#]
376
377 alu[--, 1, and, $rsw0, >>10]
378 bne[rxerr#]
379
380 alu[$stemp, --, b, _buffer_handle]
381 scratch[put, $stemp, zero, 4, 1], ctx_swap[zzz]
382 cap[fast_wr, 0, XSCALE_INT_A]
383 br[mpacket_receive_loop#]
384
385 rxerr#:
386 alu[$stemp, --, b, _buffer_handle]
387 scratch[put, $stemp, zero, 0, 1], ctx_swap[zzz]
388 br[mpacket_receive_loop#]
389 .end
390 .end
391
392
393abort_rswerr#:
394 halt
395
396abort_proterr#:
397 halt
398
399state_corrupted#:
400 halt
401
402buffer_overflow#:
403 halt
404
405rx_done_ring_overflow#:
406 halt
407
408
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode
deleted file mode 100644
index e8aee2f81aad..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode
+++ /dev/null
@@ -1,130 +0,0 @@
1static struct ixp2000_uengine_code ixp2400_rx =
2{
3 .cpu_model_bitmask = 0x000003fe,
4 .cpu_min_revision = 0,
5 .cpu_max_revision = 255,
6
7 .uengine_parameters = IXP2000_UENGINE_8_CONTEXTS |
8 IXP2000_UENGINE_PRN_UPDATE_EVERY |
9 IXP2000_UENGINE_NN_FROM_PREVIOUS |
10 IXP2000_UENGINE_ASSERT_EMPTY_AT_0 |
11 IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT |
12 IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT,
13
14 .initial_reg_values = (struct ixp2000_reg_value []) {
15 { -1, -1 }
16 },
17
18 .num_insns = 109,
19 .insns = (u8 []) {
20 0xf0, 0x00, 0x0c, 0xc0, 0x05,
21 0xf4, 0x44, 0x0c, 0x00, 0x05,
22 0xfc, 0x04, 0x4c, 0x00, 0x00,
23 0xf0, 0x00, 0x00, 0x3b, 0x00,
24 0xb4, 0x40, 0xf0, 0x3b, 0x1f,
25 0x8a, 0xc0, 0x50, 0x3e, 0x05,
26 0xb4, 0x40, 0xf0, 0x3b, 0x80,
27 0x9a, 0xe0, 0x00, 0x3e, 0x05,
28 0xf0, 0x00, 0x00, 0x07, 0x00,
29 0xd8, 0x05, 0xc0, 0x00, 0x11,
30 0xf0, 0x00, 0x00, 0x0f, 0x00,
31 0x91, 0xb0, 0x20, 0x0e, 0x00,
32 0xfc, 0x06, 0x60, 0x0b, 0x00,
33 0xf0, 0x00, 0x0c, 0x03, 0x00,
34 0xf0, 0x00, 0x0c, 0x03, 0x00,
35 0xf0, 0x00, 0x0c, 0x03, 0x00,
36 0xf0, 0x00, 0x0c, 0x02, 0x00,
37 0xb0, 0xc0, 0x30, 0x0f, 0x01,
38 0xa4, 0x70, 0x00, 0x0f, 0x20,
39 0xd8, 0x02, 0xc0, 0x01, 0x00,
40 0xfc, 0x10, 0xac, 0x23, 0x08,
41 0xfc, 0x10, 0xac, 0x43, 0x10,
42 0xfc, 0x10, 0xac, 0x63, 0x18,
43 0xe0, 0x00, 0x00, 0x00, 0x02,
44 0xfc, 0x10, 0xae, 0x23, 0x88,
45 0x3d, 0x00, 0x04, 0x03, 0x20,
46 0xe0, 0x00, 0x00, 0x00, 0x10,
47 0x84, 0x82, 0x02, 0x01, 0x3b,
48 0xd8, 0x1a, 0x00, 0x01, 0x01,
49 0xb4, 0x00, 0x8c, 0x7d, 0x80,
50 0x91, 0xb0, 0x80, 0x22, 0x00,
51 0xfc, 0x06, 0x60, 0x23, 0x00,
52 0xf0, 0x00, 0x0c, 0x03, 0x00,
53 0xf0, 0x00, 0x0c, 0x03, 0x00,
54 0xf0, 0x00, 0x0c, 0x03, 0x00,
55 0x94, 0xf0, 0x92, 0x01, 0x21,
56 0xac, 0x40, 0x60, 0x26, 0x00,
57 0xa4, 0x30, 0x0c, 0x04, 0x06,
58 0xd8, 0x1a, 0x40, 0x01, 0x00,
59 0x94, 0xe0, 0xa2, 0x01, 0x21,
60 0xac, 0x20, 0x00, 0x28, 0x06,
61 0x84, 0xf2, 0x02, 0x01, 0x21,
62 0xd8, 0x0b, 0x40, 0x01, 0x00,
63 0xf0, 0x00, 0x0c, 0x02, 0x01,
64 0xf0, 0x00, 0x0c, 0x02, 0x02,
65 0xa0, 0x00, 0x08, 0x04, 0x00,
66 0x95, 0x00, 0xc6, 0x01, 0xff,
67 0xa0, 0x80, 0x10, 0x30, 0x00,
68 0xa0, 0x60, 0x1c, 0x00, 0x01,
69 0xf0, 0x0f, 0xf0, 0x33, 0xff,
70 0xb4, 0x00, 0xc0, 0x31, 0x81,
71 0xb0, 0x80, 0xb0, 0x32, 0x02,
72 0xa0, 0x20, 0x20, 0x2c, 0x00,
73 0x94, 0xf0, 0xd2, 0x01, 0x21,
74 0xd8, 0x0f, 0x40, 0x01, 0x00,
75 0x19, 0x40, 0x10, 0x04, 0x20,
76 0xa0, 0x00, 0x26, 0x04, 0x00,
77 0xd8, 0x0d, 0xc0, 0x01, 0x00,
78 0x00, 0x42, 0x10, 0x80, 0x02,
79 0xb0, 0x00, 0x46, 0x04, 0x00,
80 0xb0, 0x00, 0x56, 0x08, 0x00,
81 0xe0, 0x00, 0x00, 0x00, 0x04,
82 0xfc, 0x10, 0xae, 0x43, 0x90,
83 0x84, 0xf0, 0x32, 0x01, 0x21,
84 0xd8, 0x11, 0x40, 0x01, 0x00,
85 0xa0, 0x60, 0x3c, 0x00, 0x02,
86 0xa0, 0x20, 0x40, 0x10, 0x00,
87 0xa0, 0x20, 0x50, 0x14, 0x00,
88 0xd8, 0x12, 0x00, 0x00, 0x18,
89 0xa0, 0x00, 0x28, 0x0c, 0x00,
90 0xb0, 0x00, 0x48, 0x10, 0x00,
91 0xb0, 0x00, 0x58, 0x14, 0x00,
92 0xaa, 0xf0, 0x00, 0x14, 0x01,
93 0xd8, 0x1a, 0xc0, 0x01, 0x05,
94 0x85, 0x80, 0x42, 0x01, 0xff,
95 0x95, 0x00, 0x66, 0x01, 0xff,
96 0xba, 0xc0, 0x60, 0x1b, 0x01,
97 0x9a, 0x30, 0x60, 0x19, 0x30,
98 0x9a, 0xb0, 0x70, 0x1a, 0x30,
99 0x9b, 0x50, 0x78, 0x1e, 0x04,
100 0x8a, 0xe2, 0x08, 0x1e, 0x21,
101 0x6a, 0x4e, 0x00, 0x13, 0x00,
102 0xe0, 0x00, 0x00, 0x00, 0x30,
103 0x9b, 0x00, 0x7a, 0x92, 0x04,
104 0x3d, 0x00, 0x04, 0x1f, 0x20,
105 0x84, 0xe2, 0x02, 0x01, 0x21,
106 0xd8, 0x16, 0x80, 0x01, 0x00,
107 0xa4, 0x18, 0x0c, 0x7d, 0x80,
108 0xa0, 0x58, 0x1c, 0x00, 0x01,
109 0x01, 0x42, 0x00, 0xa0, 0x02,
110 0xe0, 0x00, 0x00, 0x00, 0x08,
111 0xfc, 0x10, 0xae, 0x63, 0x98,
112 0xd8, 0x1b, 0x00, 0xc2, 0x14,
113 0x84, 0xe2, 0x02, 0x01, 0x21,
114 0xd8, 0x05, 0xc0, 0x01, 0x00,
115 0x84, 0xa2, 0x02, 0x01, 0x21,
116 0xd8, 0x19, 0x40, 0x01, 0x01,
117 0xa0, 0x58, 0x0c, 0x00, 0x02,
118 0x1a, 0x40, 0x00, 0x04, 0x24,
119 0x33, 0x00, 0x01, 0x2f, 0x20,
120 0xd8, 0x05, 0xc0, 0x00, 0x18,
121 0xa0, 0x58, 0x0c, 0x00, 0x02,
122 0x1a, 0x40, 0x00, 0x04, 0x20,
123 0xd8, 0x05, 0xc0, 0x00, 0x18,
124 0xe0, 0x00, 0x02, 0x00, 0x00,
125 0xe0, 0x00, 0x02, 0x00, 0x00,
126 0xe0, 0x00, 0x02, 0x00, 0x00,
127 0xe0, 0x00, 0x02, 0x00, 0x00,
128 0xe0, 0x00, 0x02, 0x00, 0x00,
129 }
130};
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc
deleted file mode 100644
index d090d1884fb7..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc
+++ /dev/null
@@ -1,272 +0,0 @@
1/*
2 * TX ucode for the Intel IXP2400 in POS-PHY mode.
3 * Copyright (C) 2004, 2005 Lennert Buytenhek
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * Assumptions made in this code:
12 * - The IXP2400 MSF is configured for POS-PHY mode, in a mode where
13 * only one TBUF partition is used. This includes, for example,
14 * 1x32 SPHY and 1x32 MPHY32, but not 4x8 SPHY or 1x32 MPHY4. (This
15 * is not an exhaustive list.)
16 * - The TBUF uses 64-byte mpackets.
17 * - TX descriptors reside in SRAM, and have the following format:
18 * struct tx_desc
19 * {
20 * // to uengine
21 * u32 buf_phys_addr;
22 * u32 pkt_length;
23 * u32 channel;
24 * };
25 * - Packet data resides in DRAM.
26 * - Packet buffer addresses are 8-byte aligned.
27 * - Scratch ring 2 is tx_pending.
28 * - Scratch ring 3 is tx_done, and has status condition 'full'.
29 * - This code is run on all eight threads of the microengine it runs on.
30 */
31
32#define TX_SEQUENCE_0 0x0060
33#define TBUF_CTRL 0x1800
34
35#define PARTITION_SIZE 128
36#define PARTITION_THRESH 96
37
38
39 .sig volatile sig1
40 .sig volatile sig2
41 .sig volatile sig3
42
43 .reg @old_tx_seq_0
44 .reg @mpkts_in_flight
45 .reg @next_tbuf_mpacket
46
47 .reg @buffer_handle
48 .reg @buffer_start
49 .reg @packet_length
50 .reg @channel
51 .reg @packet_offset
52
53 .reg zero
54
55 immed[zero, 0]
56
57 /*
58 * Skip context 0 initialisation?
59 */
60 .begin
61 br!=ctx[0, mpacket_tx_loop#]
62 .end
63
64 /*
65 * Wait until all pending TBUF elements have been transmitted.
66 */
67 .begin
68 .reg read $tx
69 .sig zzz
70
71 loop_empty#:
72 msf[read, $tx, zero, TX_SEQUENCE_0, 1], ctx_swap[zzz]
73 alu_shf[--, --, b, $tx, >>31]
74 beq[loop_empty#]
75
76 alu[@old_tx_seq_0, --, b, $tx]
77 .end
78
79 immed[@mpkts_in_flight, 0]
80 alu[@next_tbuf_mpacket, @old_tx_seq_0, and, (PARTITION_SIZE - 1)]
81
82 immed[@buffer_handle, 0]
83
84 /*
85 * Initialise signal pipeline.
86 */
87 .begin
88 local_csr_wr[SAME_ME_SIGNAL, (&sig1 << 3)]
89 .set_sig sig1
90
91 local_csr_wr[SAME_ME_SIGNAL, (&sig2 << 3)]
92 .set_sig sig2
93
94 local_csr_wr[SAME_ME_SIGNAL, (&sig3 << 3)]
95 .set_sig sig3
96 .end
97
98mpacket_tx_loop#:
99 .begin
100 .reg tbuf_element_index
101 .reg buffer_handle
102 .reg sop_eop
103 .reg packet_data
104 .reg channel
105 .reg mpacket_size
106
107 /*
108 * If there is no packet currently being transmitted,
109 * dequeue the next TX descriptor, and fetch the buffer
110 * address, packet length and destination channel number.
111 */
112 .begin
113 .reg read $stemp $stemp2 $stemp3
114 .xfer_order $stemp $stemp2 $stemp3
115 .sig zzz
116
117 ctx_arb[sig1]
118
119 alu[--, --, b, @buffer_handle]
120 bne[already_got_packet#]
121
122 tx_nobufs#:
123 scratch[get, $stemp, zero, 8, 1], ctx_swap[zzz]
124 alu[@buffer_handle, --, b, $stemp]
125 beq[tx_nobufs#]
126
127 sram[read, $stemp, $stemp, 0, 3], ctx_swap[zzz]
128 alu[@buffer_start, --, b, $stemp]
129 alu[@packet_length, --, b, $stemp2]
130 beq[zero_byte_packet#]
131 alu[@channel, --, b, $stemp3]
132 immed[@packet_offset, 0]
133
134 already_got_packet#:
135 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig1 << 3))]
136 .end
137
138 /*
139 * Determine tbuf element index, SOP/EOP flags, mpacket
140 * offset and mpacket size and cache buffer_handle and
141 * channel number.
142 */
143 .begin
144 alu[tbuf_element_index, --, b, @next_tbuf_mpacket]
145 alu[@next_tbuf_mpacket, @next_tbuf_mpacket, +, 1]
146 alu[@next_tbuf_mpacket, @next_tbuf_mpacket, and,
147 (PARTITION_SIZE - 1)]
148
149 alu[buffer_handle, --, b, @buffer_handle]
150 immed[@buffer_handle, 0]
151
152 immed[sop_eop, 1]
153
154 alu[packet_data, --, b, @packet_offset]
155 bne[no_sop#]
156 alu[sop_eop, sop_eop, or, 2]
157 no_sop#:
158 alu[packet_data, packet_data, +, @buffer_start]
159
160 alu[channel, --, b, @channel]
161
162 alu[mpacket_size, @packet_length, -, @packet_offset]
163 alu[--, 64, -, mpacket_size]
164 bhs[eop#]
165 alu[@buffer_handle, --, b, buffer_handle]
166 immed[mpacket_size, 64]
167 alu[sop_eop, sop_eop, and, 2]
168 eop#:
169
170 alu[@packet_offset, @packet_offset, +, mpacket_size]
171 .end
172
173 /*
174 * Wait until there's enough space in the TBUF.
175 */
176 .begin
177 .reg read $tx
178 .reg temp
179 .sig zzz
180
181 ctx_arb[sig2]
182
183 br[test_space#]
184
185 loop_space#:
186 msf[read, $tx, zero, TX_SEQUENCE_0, 1], ctx_swap[zzz]
187
188 alu[temp, $tx, -, @old_tx_seq_0]
189 alu[temp, temp, and, 0xff]
190 alu[@mpkts_in_flight, @mpkts_in_flight, -, temp]
191
192 alu[@old_tx_seq_0, --, b, $tx]
193
194 test_space#:
195 alu[--, PARTITION_THRESH, -, @mpkts_in_flight]
196 blo[loop_space#]
197
198 alu[@mpkts_in_flight, @mpkts_in_flight, +, 1]
199
200 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig2 << 3))]
201 .end
202
203 /*
204 * Copy the packet data to the TBUF.
205 */
206 .begin
207 .reg temp
208 .sig copy_sig
209
210 alu[temp, mpacket_size, -, 1]
211 alu_shf[temp, 0x10, or, temp, >>3]
212 alu_shf[temp, 0x10, or, temp, <<21]
213 alu_shf[temp, temp, or, tbuf_element_index, <<11]
214 alu_shf[--, temp, or, 1, <<18]
215
216 dram[tbuf_wr, --, packet_data, 0, max_8],
217 indirect_ref, sig_done[copy_sig]
218 ctx_arb[copy_sig]
219 .end
220
221 /*
222 * Mark TBUF element as ready-to-be-transmitted.
223 */
224 .begin
225 .reg write $tsw $tsw2
226 .xfer_order $tsw $tsw2
227 .reg temp
228 .sig zzz
229
230 alu_shf[temp, channel, or, mpacket_size, <<24]
231 alu_shf[$tsw, temp, or, sop_eop, <<8]
232 immed[$tsw2, 0]
233
234 immed[temp, TBUF_CTRL]
235 alu_shf[temp, temp, or, tbuf_element_index, <<3]
236 msf[write, $tsw, temp, 0, 2], ctx_swap[zzz]
237 .end
238
239 /*
240 * Resynchronise.
241 */
242 .begin
243 ctx_arb[sig3]
244 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig3 << 3))]
245 .end
246
247 /*
248 * If this was an EOP mpacket, recycle the TX buffer
249 * and signal the host.
250 */
251 .begin
252 .reg write $stemp
253 .sig zzz
254
255 alu[--, sop_eop, and, 1]
256 beq[mpacket_tx_loop#]
257
258 tx_done_ring_full#:
259 br_inp_state[SCR_Ring3_Status, tx_done_ring_full#]
260
261 alu[$stemp, --, b, buffer_handle]
262 scratch[put, $stemp, zero, 12, 1], ctx_swap[zzz]
263 cap[fast_wr, 0, XSCALE_INT_A]
264 br[mpacket_tx_loop#]
265 .end
266 .end
267
268
269zero_byte_packet#:
270 halt
271
272
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode
deleted file mode 100644
index a433e24b0a51..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode
+++ /dev/null
@@ -1,98 +0,0 @@
1static struct ixp2000_uengine_code ixp2400_tx =
2{
3 .cpu_model_bitmask = 0x000003fe,
4 .cpu_min_revision = 0,
5 .cpu_max_revision = 255,
6
7 .uengine_parameters = IXP2000_UENGINE_8_CONTEXTS |
8 IXP2000_UENGINE_PRN_UPDATE_EVERY |
9 IXP2000_UENGINE_NN_FROM_PREVIOUS |
10 IXP2000_UENGINE_ASSERT_EMPTY_AT_0 |
11 IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT |
12 IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT,
13
14 .initial_reg_values = (struct ixp2000_reg_value []) {
15 { -1, -1 }
16 },
17
18 .num_insns = 77,
19 .insns = (u8 []) {
20 0xf0, 0x00, 0x00, 0x07, 0x00,
21 0xd8, 0x03, 0x00, 0x00, 0x11,
22 0x3c, 0x40, 0x00, 0x04, 0xe0,
23 0x81, 0xf2, 0x02, 0x01, 0x00,
24 0xd8, 0x00, 0x80, 0x01, 0x00,
25 0xb0, 0x08, 0x06, 0x00, 0x00,
26 0xf0, 0x00, 0x0c, 0x00, 0x80,
27 0xb4, 0x49, 0x02, 0x03, 0x7f,
28 0xf0, 0x00, 0x02, 0x83, 0x00,
29 0xfc, 0x10, 0xac, 0x23, 0x08,
30 0xfc, 0x10, 0xac, 0x43, 0x10,
31 0xfc, 0x10, 0xac, 0x63, 0x18,
32 0xe0, 0x00, 0x00, 0x00, 0x02,
33 0xa0, 0x30, 0x02, 0x80, 0x00,
34 0xd8, 0x06, 0x00, 0x01, 0x01,
35 0x19, 0x40, 0x00, 0x04, 0x28,
36 0xb0, 0x0a, 0x06, 0x00, 0x00,
37 0xd8, 0x03, 0xc0, 0x01, 0x00,
38 0x00, 0x44, 0x00, 0x80, 0x80,
39 0xa0, 0x09, 0x06, 0x00, 0x00,
40 0xb0, 0x0b, 0x06, 0x04, 0x00,
41 0xd8, 0x13, 0x00, 0x01, 0x00,
42 0xb0, 0x0c, 0x06, 0x08, 0x00,
43 0xf0, 0x00, 0x0c, 0x00, 0xa0,
44 0xfc, 0x10, 0xae, 0x23, 0x88,
45 0xa0, 0x00, 0x12, 0x40, 0x00,
46 0xb0, 0xc9, 0x02, 0x43, 0x01,
47 0xb4, 0x49, 0x02, 0x43, 0x7f,
48 0xb0, 0x00, 0x22, 0x80, 0x00,
49 0xf0, 0x00, 0x02, 0x83, 0x00,
50 0xf0, 0x00, 0x0c, 0x04, 0x02,
51 0xb0, 0x40, 0x6c, 0x00, 0xa0,
52 0xd8, 0x08, 0x80, 0x01, 0x01,
53 0xaa, 0x00, 0x2c, 0x08, 0x02,
54 0xa0, 0xc0, 0x30, 0x18, 0x90,
55 0xa0, 0x00, 0x43, 0x00, 0x00,
56 0xba, 0xc0, 0x32, 0xc0, 0xa0,
57 0xaa, 0xb0, 0x00, 0x0f, 0x40,
58 0xd8, 0x0a, 0x80, 0x01, 0x04,
59 0xb0, 0x0a, 0x00, 0x08, 0x00,
60 0xf0, 0x00, 0x00, 0x0f, 0x40,
61 0xa4, 0x00, 0x2c, 0x08, 0x02,
62 0xa0, 0x8a, 0x00, 0x0c, 0xa0,
63 0xe0, 0x00, 0x00, 0x00, 0x04,
64 0xd8, 0x0c, 0x80, 0x00, 0x18,
65 0x3c, 0x40, 0x00, 0x04, 0xe0,
66 0xba, 0x80, 0x42, 0x01, 0x80,
67 0xb4, 0x40, 0x40, 0x13, 0xff,
68 0xaa, 0x88, 0x00, 0x10, 0x80,
69 0xb0, 0x08, 0x06, 0x00, 0x00,
70 0xaa, 0xf0, 0x0d, 0x80, 0x80,
71 0xd8, 0x0b, 0x40, 0x01, 0x05,
72 0xa0, 0x88, 0x0c, 0x04, 0x80,
73 0xfc, 0x10, 0xae, 0x43, 0x90,
74 0xba, 0xc0, 0x50, 0x0f, 0x01,
75 0x9a, 0x30, 0x50, 0x15, 0x30,
76 0x9a, 0xb0, 0x50, 0x16, 0x30,
77 0x9b, 0x50, 0x58, 0x16, 0x01,
78 0x8a, 0xe2, 0x08, 0x16, 0x21,
79 0x6b, 0x4e, 0x00, 0x83, 0x03,
80 0xe0, 0x00, 0x00, 0x00, 0x30,
81 0x9a, 0x80, 0x70, 0x0e, 0x04,
82 0x8b, 0x88, 0x08, 0x1e, 0x02,
83 0xf0, 0x00, 0x0c, 0x01, 0x81,
84 0xf0, 0x01, 0x80, 0x1f, 0x00,
85 0x9b, 0xd0, 0x78, 0x1e, 0x01,
86 0x3d, 0x42, 0x00, 0x1c, 0x20,
87 0xe0, 0x00, 0x00, 0x00, 0x08,
88 0xfc, 0x10, 0xae, 0x63, 0x98,
89 0xa4, 0x30, 0x0c, 0x04, 0x02,
90 0xd8, 0x03, 0x00, 0x01, 0x00,
91 0xd8, 0x11, 0xc1, 0x42, 0x14,
92 0xa0, 0x18, 0x00, 0x08, 0x00,
93 0x1a, 0x40, 0x00, 0x04, 0x2c,
94 0x33, 0x00, 0x01, 0x2f, 0x20,
95 0xd8, 0x03, 0x00, 0x00, 0x18,
96 0xe0, 0x00, 0x02, 0x00, 0x00,
97 }
98};
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev.c b/drivers/net/ethernet/xscale/ixp2000/ixpdev.c
deleted file mode 100644
index 45008377c8bf..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixpdev.c
+++ /dev/null
@@ -1,437 +0,0 @@
1/*
2 * IXP2000 MSF network device driver
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/moduleparam.h>
19#include <linux/gfp.h>
20#include <asm/hardware/uengine.h>
21#include <asm/io.h>
22#include "ixp2400_rx.ucode"
23#include "ixp2400_tx.ucode"
24#include "ixpdev_priv.h"
25#include "ixpdev.h"
26#include "pm3386.h"
27
28#define DRV_MODULE_VERSION "0.2"
29
30static int nds_count;
31static struct net_device **nds;
32static int nds_open;
33static void (*set_port_admin_status)(int port, int up);
34
35static struct ixpdev_rx_desc * const rx_desc =
36 (struct ixpdev_rx_desc *)(IXP2000_SRAM0_VIRT_BASE + RX_BUF_DESC_BASE);
37static struct ixpdev_tx_desc * const tx_desc =
38 (struct ixpdev_tx_desc *)(IXP2000_SRAM0_VIRT_BASE + TX_BUF_DESC_BASE);
39static int tx_pointer;
40
41
42static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
43{
44 struct ixpdev_priv *ip = netdev_priv(dev);
45 struct ixpdev_tx_desc *desc;
46 int entry;
47 unsigned long flags;
48
49 if (unlikely(skb->len > PAGE_SIZE)) {
50 /* @@@ Count drops. */
51 dev_kfree_skb(skb);
52 return NETDEV_TX_OK;
53 }
54
55 entry = tx_pointer;
56 tx_pointer = (tx_pointer + 1) % TX_BUF_COUNT;
57
58 desc = tx_desc + entry;
59 desc->pkt_length = skb->len;
60 desc->channel = ip->channel;
61
62 skb_copy_and_csum_dev(skb, phys_to_virt(desc->buf_addr));
63 dev_kfree_skb(skb);
64
65 ixp2000_reg_write(RING_TX_PENDING,
66 TX_BUF_DESC_BASE + (entry * sizeof(struct ixpdev_tx_desc)));
67
68 local_irq_save(flags);
69 ip->tx_queue_entries++;
70 if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
71 netif_stop_queue(dev);
72 local_irq_restore(flags);
73
74 return NETDEV_TX_OK;
75}
76
77
78static int ixpdev_rx(struct net_device *dev, int processed, int budget)
79{
80 while (processed < budget) {
81 struct ixpdev_rx_desc *desc;
82 struct sk_buff *skb;
83 void *buf;
84 u32 _desc;
85
86 _desc = ixp2000_reg_read(RING_RX_DONE);
87 if (_desc == 0)
88 return 0;
89
90 desc = rx_desc +
91 ((_desc - RX_BUF_DESC_BASE) / sizeof(struct ixpdev_rx_desc));
92 buf = phys_to_virt(desc->buf_addr);
93
94 if (desc->pkt_length < 4 || desc->pkt_length > PAGE_SIZE) {
95 printk(KERN_ERR "ixp2000: rx err, length %d\n",
96 desc->pkt_length);
97 goto err;
98 }
99
100 if (desc->channel < 0 || desc->channel >= nds_count) {
101 printk(KERN_ERR "ixp2000: rx err, channel %d\n",
102 desc->channel);
103 goto err;
104 }
105
106 /* @@@ Make FCS stripping configurable. */
107 desc->pkt_length -= 4;
108
109 if (unlikely(!netif_running(nds[desc->channel])))
110 goto err;
111
112 skb = netdev_alloc_skb_ip_align(dev, desc->pkt_length);
113 if (likely(skb != NULL)) {
114 skb_copy_to_linear_data(skb, buf, desc->pkt_length);
115 skb_put(skb, desc->pkt_length);
116 skb->protocol = eth_type_trans(skb, nds[desc->channel]);
117
118 netif_receive_skb(skb);
119 }
120
121err:
122 ixp2000_reg_write(RING_RX_PENDING, _desc);
123 processed++;
124 }
125
126 return processed;
127}
128
129/* dev always points to nds[0]. */
130static int ixpdev_poll(struct napi_struct *napi, int budget)
131{
132 struct ixpdev_priv *ip = container_of(napi, struct ixpdev_priv, napi);
133 struct net_device *dev = ip->dev;
134 int rx;
135
136 rx = 0;
137 do {
138 ixp2000_reg_write(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0x00ff);
139
140 rx = ixpdev_rx(dev, rx, budget);
141 if (rx >= budget)
142 break;
143 } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff);
144
145 napi_complete(napi);
146 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff);
147
148 return rx;
149}
150
151static void ixpdev_tx_complete(void)
152{
153 int channel;
154 u32 wake;
155
156 wake = 0;
157 while (1) {
158 struct ixpdev_priv *ip;
159 u32 desc;
160 int entry;
161
162 desc = ixp2000_reg_read(RING_TX_DONE);
163 if (desc == 0)
164 break;
165
166 /* @@@ Check whether entries come back in order. */
167 entry = (desc - TX_BUF_DESC_BASE) / sizeof(struct ixpdev_tx_desc);
168 channel = tx_desc[entry].channel;
169
170 if (channel < 0 || channel >= nds_count) {
171 printk(KERN_ERR "ixp2000: txcomp channel index "
172 "out of bounds (%d, %.8i, %d)\n",
173 channel, (unsigned int)desc, entry);
174 continue;
175 }
176
177 ip = netdev_priv(nds[channel]);
178 if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
179 wake |= 1 << channel;
180 ip->tx_queue_entries--;
181 }
182
183 for (channel = 0; wake != 0; channel++) {
184 if (wake & (1 << channel)) {
185 netif_wake_queue(nds[channel]);
186 wake &= ~(1 << channel);
187 }
188 }
189}
190
191static irqreturn_t ixpdev_interrupt(int irq, void *dev_id)
192{
193 u32 status;
194
195 status = ixp2000_reg_read(IXP2000_IRQ_THD_STATUS_A_0);
196 if (status == 0)
197 return IRQ_NONE;
198
199 /*
200 * Any of the eight receive units signaled RX?
201 */
202 if (status & 0x00ff) {
203 struct net_device *dev = nds[0];
204 struct ixpdev_priv *ip = netdev_priv(dev);
205
206 ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff);
207 if (likely(napi_schedule_prep(&ip->napi))) {
208 __napi_schedule(&ip->napi);
209 } else {
210 printk(KERN_CRIT "ixp2000: irq while polling!!\n");
211 }
212 }
213
214 /*
215 * Any of the eight transmit units signaled TXdone?
216 */
217 if (status & 0xff00) {
218 ixp2000_reg_wrb(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0xff00);
219 ixpdev_tx_complete();
220 }
221
222 return IRQ_HANDLED;
223}
224
225#ifdef CONFIG_NET_POLL_CONTROLLER
226static void ixpdev_poll_controller(struct net_device *dev)
227{
228 disable_irq(IRQ_IXP2000_THDA0);
229 ixpdev_interrupt(IRQ_IXP2000_THDA0, dev);
230 enable_irq(IRQ_IXP2000_THDA0);
231}
232#endif
233
234static int ixpdev_open(struct net_device *dev)
235{
236 struct ixpdev_priv *ip = netdev_priv(dev);
237 int err;
238
239 napi_enable(&ip->napi);
240 if (!nds_open++) {
241 err = request_irq(IRQ_IXP2000_THDA0, ixpdev_interrupt,
242 IRQF_SHARED, "ixp2000_eth", nds);
243 if (err) {
244 nds_open--;
245 napi_disable(&ip->napi);
246 return err;
247 }
248
249 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0xffff);
250 }
251
252 set_port_admin_status(ip->channel, 1);
253 netif_start_queue(dev);
254
255 return 0;
256}
257
258static int ixpdev_close(struct net_device *dev)
259{
260 struct ixpdev_priv *ip = netdev_priv(dev);
261
262 netif_stop_queue(dev);
263 napi_disable(&ip->napi);
264 set_port_admin_status(ip->channel, 0);
265
266 if (!--nds_open) {
267 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0xffff);
268 free_irq(IRQ_IXP2000_THDA0, nds);
269 }
270
271 return 0;
272}
273
274static struct net_device_stats *ixpdev_get_stats(struct net_device *dev)
275{
276 struct ixpdev_priv *ip = netdev_priv(dev);
277
278 pm3386_get_stats(ip->channel, &(dev->stats));
279
280 return &(dev->stats);
281}
282
283static const struct net_device_ops ixpdev_netdev_ops = {
284 .ndo_open = ixpdev_open,
285 .ndo_stop = ixpdev_close,
286 .ndo_start_xmit = ixpdev_xmit,
287 .ndo_change_mtu = eth_change_mtu,
288 .ndo_validate_addr = eth_validate_addr,
289 .ndo_set_mac_address = eth_mac_addr,
290 .ndo_get_stats = ixpdev_get_stats,
291#ifdef CONFIG_NET_POLL_CONTROLLER
292 .ndo_poll_controller = ixpdev_poll_controller,
293#endif
294};
295
296struct net_device *ixpdev_alloc(int channel, int sizeof_priv)
297{
298 struct net_device *dev;
299 struct ixpdev_priv *ip;
300
301 dev = alloc_etherdev(sizeof_priv);
302 if (dev == NULL)
303 return NULL;
304
305 dev->netdev_ops = &ixpdev_netdev_ops;
306
307 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
308
309 ip = netdev_priv(dev);
310 ip->dev = dev;
311 netif_napi_add(dev, &ip->napi, ixpdev_poll, 64);
312 ip->channel = channel;
313 ip->tx_queue_entries = 0;
314
315 return dev;
316}
317
318int ixpdev_init(int __nds_count, struct net_device **__nds,
319 void (*__set_port_admin_status)(int port, int up))
320{
321 int i;
322 int err;
323
324 BUILD_BUG_ON(RX_BUF_COUNT > 192 || TX_BUF_COUNT > 192);
325
326 printk(KERN_INFO "IXP2000 MSF ethernet driver %s\n", DRV_MODULE_VERSION);
327
328 nds_count = __nds_count;
329 nds = __nds;
330 set_port_admin_status = __set_port_admin_status;
331
332 for (i = 0; i < RX_BUF_COUNT; i++) {
333 void *buf;
334
335 buf = (void *)get_zeroed_page(GFP_KERNEL);
336 if (buf == NULL) {
337 err = -ENOMEM;
338 while (--i >= 0)
339 free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
340 goto err_out;
341 }
342 rx_desc[i].buf_addr = virt_to_phys(buf);
343 rx_desc[i].buf_length = PAGE_SIZE;
344 }
345
346 /* @@@ Maybe we shouldn't be preallocating TX buffers. */
347 for (i = 0; i < TX_BUF_COUNT; i++) {
348 void *buf;
349
350 buf = (void *)get_zeroed_page(GFP_KERNEL);
351 if (buf == NULL) {
352 err = -ENOMEM;
353 while (--i >= 0)
354 free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
355 goto err_free_rx;
356 }
357 tx_desc[i].buf_addr = virt_to_phys(buf);
358 }
359
360 /* 256 entries, ring status set means 'empty', base address 0x0000. */
361 ixp2000_reg_write(RING_RX_PENDING_BASE, 0x44000000);
362 ixp2000_reg_write(RING_RX_PENDING_HEAD, 0x00000000);
363 ixp2000_reg_write(RING_RX_PENDING_TAIL, 0x00000000);
364
365 /* 256 entries, ring status set means 'full', base address 0x0400. */
366 ixp2000_reg_write(RING_RX_DONE_BASE, 0x40000400);
367 ixp2000_reg_write(RING_RX_DONE_HEAD, 0x00000000);
368 ixp2000_reg_write(RING_RX_DONE_TAIL, 0x00000000);
369
370 for (i = 0; i < RX_BUF_COUNT; i++) {
371 ixp2000_reg_write(RING_RX_PENDING,
372 RX_BUF_DESC_BASE + (i * sizeof(struct ixpdev_rx_desc)));
373 }
374
375 ixp2000_uengine_load(0, &ixp2400_rx);
376 ixp2000_uengine_start_contexts(0, 0xff);
377
378 /* 256 entries, ring status set means 'empty', base address 0x0800. */
379 ixp2000_reg_write(RING_TX_PENDING_BASE, 0x44000800);
380 ixp2000_reg_write(RING_TX_PENDING_HEAD, 0x00000000);
381 ixp2000_reg_write(RING_TX_PENDING_TAIL, 0x00000000);
382
383 /* 256 entries, ring status set means 'full', base address 0x0c00. */
384 ixp2000_reg_write(RING_TX_DONE_BASE, 0x40000c00);
385 ixp2000_reg_write(RING_TX_DONE_HEAD, 0x00000000);
386 ixp2000_reg_write(RING_TX_DONE_TAIL, 0x00000000);
387
388 ixp2000_uengine_load(1, &ixp2400_tx);
389 ixp2000_uengine_start_contexts(1, 0xff);
390
391 for (i = 0; i < nds_count; i++) {
392 err = register_netdev(nds[i]);
393 if (err) {
394 while (--i >= 0)
395 unregister_netdev(nds[i]);
396 goto err_free_tx;
397 }
398 }
399
400 for (i = 0; i < nds_count; i++) {
401 printk(KERN_INFO "%s: IXP2000 MSF ethernet (port %d), %pM.\n",
402 nds[i]->name, i, nds[i]->dev_addr);
403 }
404
405 return 0;
406
407err_free_tx:
408 for (i = 0; i < TX_BUF_COUNT; i++)
409 free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
410
411err_free_rx:
412 for (i = 0; i < RX_BUF_COUNT; i++)
413 free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
414
415err_out:
416 return err;
417}
418
419void ixpdev_deinit(void)
420{
421 int i;
422
423 /* @@@ Flush out pending packets. */
424
425 for (i = 0; i < nds_count; i++)
426 unregister_netdev(nds[i]);
427
428 ixp2000_uengine_stop_contexts(1, 0xff);
429 ixp2000_uengine_stop_contexts(0, 0xff);
430 ixp2000_uengine_reset(0x3);
431
432 for (i = 0; i < TX_BUF_COUNT; i++)
433 free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
434
435 for (i = 0; i < RX_BUF_COUNT; i++)
436 free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
437}
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev.h b/drivers/net/ethernet/xscale/ixp2000/ixpdev.h
deleted file mode 100644
index 391ece623243..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixpdev.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * IXP2000 MSF network device driver
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __IXPDEV_H
13#define __IXPDEV_H
14
15struct ixpdev_priv
16{
17 struct net_device *dev;
18 struct napi_struct napi;
19 int channel;
20 int tx_queue_entries;
21};
22
23struct net_device *ixpdev_alloc(int channel, int sizeof_priv);
24int ixpdev_init(int num_ports, struct net_device **nds,
25 void (*set_port_admin_status)(int port, int up));
26void ixpdev_deinit(void);
27
28
29#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h b/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h
deleted file mode 100644
index 86aa08ea0c33..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h
+++ /dev/null
@@ -1,57 +0,0 @@
1/*
2 * IXP2000 MSF network device driver
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __IXPDEV_PRIV_H
13#define __IXPDEV_PRIV_H
14
15#define RX_BUF_DESC_BASE 0x00001000
16#define RX_BUF_COUNT ((3 * PAGE_SIZE) / (4 * sizeof(struct ixpdev_rx_desc)))
17#define TX_BUF_DESC_BASE 0x00002000
18#define TX_BUF_COUNT ((3 * PAGE_SIZE) / (4 * sizeof(struct ixpdev_tx_desc)))
19#define TX_BUF_COUNT_PER_CHAN (TX_BUF_COUNT / 4)
20
21#define RING_RX_PENDING ((u32 *)IXP2000_SCRATCH_RING_VIRT_BASE)
22#define RING_RX_DONE ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 4))
23#define RING_TX_PENDING ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 8))
24#define RING_TX_DONE ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 12))
25
26#define SCRATCH_REG(x) ((u32 *)(IXP2000_GLOBAL_REG_VIRT_BASE | 0x0800 | (x)))
27#define RING_RX_PENDING_BASE SCRATCH_REG(0x00)
28#define RING_RX_PENDING_HEAD SCRATCH_REG(0x04)
29#define RING_RX_PENDING_TAIL SCRATCH_REG(0x08)
30#define RING_RX_DONE_BASE SCRATCH_REG(0x10)
31#define RING_RX_DONE_HEAD SCRATCH_REG(0x14)
32#define RING_RX_DONE_TAIL SCRATCH_REG(0x18)
33#define RING_TX_PENDING_BASE SCRATCH_REG(0x20)
34#define RING_TX_PENDING_HEAD SCRATCH_REG(0x24)
35#define RING_TX_PENDING_TAIL SCRATCH_REG(0x28)
36#define RING_TX_DONE_BASE SCRATCH_REG(0x30)
37#define RING_TX_DONE_HEAD SCRATCH_REG(0x34)
38#define RING_TX_DONE_TAIL SCRATCH_REG(0x38)
39
40struct ixpdev_rx_desc
41{
42 u32 buf_addr;
43 u32 buf_length;
44 u32 channel;
45 u32 pkt_length;
46};
47
48struct ixpdev_tx_desc
49{
50 u32 buf_addr;
51 u32 pkt_length;
52 u32 channel;
53 u32 unused;
54};
55
56
57#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/pm3386.c b/drivers/net/ethernet/xscale/ixp2000/pm3386.c
deleted file mode 100644
index e08d3f9863b8..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/pm3386.c
+++ /dev/null
@@ -1,351 +0,0 @@
1/*
2 * Helper functions for the PM3386s on the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/delay.h>
14#include <linux/netdevice.h>
15#include <asm/io.h>
16#include "pm3386.h"
17
18/*
19 * Read from register 'reg' of PM3386 device 'pm'.
20 */
21static u16 pm3386_reg_read(int pm, int reg)
22{
23 void *_reg;
24 u16 value;
25
26 _reg = (void *)ENP2611_PM3386_0_VIRT_BASE;
27 if (pm == 1)
28 _reg = (void *)ENP2611_PM3386_1_VIRT_BASE;
29
30 value = *((volatile u16 *)(_reg + (reg << 1)));
31
32// printk(KERN_INFO "pm3386_reg_read(%d, %.3x) = %.8x\n", pm, reg, value);
33
34 return value;
35}
36
37/*
38 * Write to register 'reg' of PM3386 device 'pm', and perform
39 * a readback from the identification register.
40 */
41static void pm3386_reg_write(int pm, int reg, u16 value)
42{
43 void *_reg;
44 u16 dummy;
45
46// printk(KERN_INFO "pm3386_reg_write(%d, %.3x, %.8x)\n", pm, reg, value);
47
48 _reg = (void *)ENP2611_PM3386_0_VIRT_BASE;
49 if (pm == 1)
50 _reg = (void *)ENP2611_PM3386_1_VIRT_BASE;
51
52 *((volatile u16 *)(_reg + (reg << 1))) = value;
53
54 dummy = *((volatile u16 *)_reg);
55 __asm__ __volatile__("mov %0, %0" : "+r" (dummy));
56}
57
58/*
59 * Read from port 'port' register 'reg', where the registers
60 * for the different ports are 'spacing' registers apart.
61 */
62static u16 pm3386_port_reg_read(int port, int _reg, int spacing)
63{
64 int reg;
65
66 reg = _reg;
67 if (port & 1)
68 reg += spacing;
69
70 return pm3386_reg_read(port >> 1, reg);
71}
72
73/*
74 * Write to port 'port' register 'reg', where the registers
75 * for the different ports are 'spacing' registers apart.
76 */
77static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value)
78{
79 int reg;
80
81 reg = _reg;
82 if (port & 1)
83 reg += spacing;
84
85 pm3386_reg_write(port >> 1, reg, value);
86}
87
88int pm3386_secondary_present(void)
89{
90 return pm3386_reg_read(1, 0) == 0x3386;
91}
92
93void pm3386_reset(void)
94{
95 u8 mac[3][6];
96 int secondary;
97
98 secondary = pm3386_secondary_present();
99
100 /* Save programmed MAC addresses. */
101 pm3386_get_mac(0, mac[0]);
102 pm3386_get_mac(1, mac[1]);
103 if (secondary)
104 pm3386_get_mac(2, mac[2]);
105
106 /* Assert analog and digital reset. */
107 pm3386_reg_write(0, 0x002, 0x0060);
108 if (secondary)
109 pm3386_reg_write(1, 0x002, 0x0060);
110 mdelay(1);
111
112 /* Deassert analog reset. */
113 pm3386_reg_write(0, 0x002, 0x0062);
114 if (secondary)
115 pm3386_reg_write(1, 0x002, 0x0062);
116 mdelay(10);
117
118 /* Deassert digital reset. */
119 pm3386_reg_write(0, 0x002, 0x0063);
120 if (secondary)
121 pm3386_reg_write(1, 0x002, 0x0063);
122 mdelay(10);
123
124 /* Restore programmed MAC addresses. */
125 pm3386_set_mac(0, mac[0]);
126 pm3386_set_mac(1, mac[1]);
127 if (secondary)
128 pm3386_set_mac(2, mac[2]);
129
130 /* Disable carrier on all ports. */
131 pm3386_set_carrier(0, 0);
132 pm3386_set_carrier(1, 0);
133 if (secondary)
134 pm3386_set_carrier(2, 0);
135}
136
137static u16 swaph(u16 x)
138{
139 return ((x << 8) | (x >> 8)) & 0xffff;
140}
141
142int pm3386_port_count(void)
143{
144 return 2 + pm3386_secondary_present();
145}
146
147void pm3386_init_port(int port)
148{
149 int pm = port >> 1;
150
151 /*
152 * Work around ENP2611 bootloader programming MAC address
153 * in reverse.
154 */
155 if (pm3386_port_reg_read(port, 0x30a, 0x100) == 0x0000 &&
156 (pm3386_port_reg_read(port, 0x309, 0x100) & 0xff00) == 0x5000) {
157 u16 temp[3];
158
159 temp[0] = pm3386_port_reg_read(port, 0x308, 0x100);
160 temp[1] = pm3386_port_reg_read(port, 0x309, 0x100);
161 temp[2] = pm3386_port_reg_read(port, 0x30a, 0x100);
162 pm3386_port_reg_write(port, 0x308, 0x100, swaph(temp[2]));
163 pm3386_port_reg_write(port, 0x309, 0x100, swaph(temp[1]));
164 pm3386_port_reg_write(port, 0x30a, 0x100, swaph(temp[0]));
165 }
166
167 /*
168 * Initialise narrowbanding mode. See application note 2010486
169 * for more information. (@@@ We also need to issue a reset
170 * when ROOL or DOOL are detected.)
171 */
172 pm3386_port_reg_write(port, 0x708, 0x10, 0xd055);
173 udelay(500);
174 pm3386_port_reg_write(port, 0x708, 0x10, 0x5055);
175
176 /*
177 * SPI-3 ingress block. Set 64 bytes SPI-3 burst size
178 * towards SPI-3 bridge.
179 */
180 pm3386_port_reg_write(port, 0x122, 0x20, 0x0002);
181
182 /*
183 * Enable ingress protocol checking, and soft reset the
184 * SPI-3 ingress block.
185 */
186 pm3386_reg_write(pm, 0x103, 0x0003);
187 while (!(pm3386_reg_read(pm, 0x103) & 0x80))
188 ;
189
190 /*
191 * SPI-3 egress block. Gather 12288 bytes of the current
192 * packet in the TX fifo before initiating transmit on the
193 * SERDES interface. (Prevents TX underflows.)
194 */
195 pm3386_port_reg_write(port, 0x221, 0x20, 0x0007);
196
197 /*
198 * Enforce odd parity from the SPI-3 bridge, and soft reset
199 * the SPI-3 egress block.
200 */
201 pm3386_reg_write(pm, 0x203, 0x000d & ~(4 << (port & 1)));
202 while ((pm3386_reg_read(pm, 0x203) & 0x000c) != 0x000c)
203 ;
204
205 /*
206 * EGMAC block. Set this channels to reject long preambles,
207 * not send or transmit PAUSE frames, enable preamble checking,
208 * disable frame length checking, enable FCS appending, enable
209 * TX frame padding.
210 */
211 pm3386_port_reg_write(port, 0x302, 0x100, 0x0113);
212
213 /*
214 * Soft reset the EGMAC block.
215 */
216 pm3386_port_reg_write(port, 0x301, 0x100, 0x8000);
217 pm3386_port_reg_write(port, 0x301, 0x100, 0x0000);
218
219 /*
220 * Auto-sense autonegotiation status.
221 */
222 pm3386_port_reg_write(port, 0x306, 0x100, 0x0100);
223
224 /*
225 * Allow reception of jumbo frames.
226 */
227 pm3386_port_reg_write(port, 0x310, 0x100, 9018);
228
229 /*
230 * Allow transmission of jumbo frames.
231 */
232 pm3386_port_reg_write(port, 0x336, 0x100, 9018);
233
234 /* @@@ Should set 0x337/0x437 (RX forwarding threshold.) */
235
236 /*
237 * Set autonegotiation parameters to 'no PAUSE, full duplex.'
238 */
239 pm3386_port_reg_write(port, 0x31c, 0x100, 0x0020);
240
241 /*
242 * Enable and restart autonegotiation.
243 */
244 pm3386_port_reg_write(port, 0x318, 0x100, 0x0003);
245 pm3386_port_reg_write(port, 0x318, 0x100, 0x0002);
246}
247
248void pm3386_get_mac(int port, u8 *mac)
249{
250 u16 temp;
251
252 temp = pm3386_port_reg_read(port, 0x308, 0x100);
253 mac[0] = temp & 0xff;
254 mac[1] = (temp >> 8) & 0xff;
255
256 temp = pm3386_port_reg_read(port, 0x309, 0x100);
257 mac[2] = temp & 0xff;
258 mac[3] = (temp >> 8) & 0xff;
259
260 temp = pm3386_port_reg_read(port, 0x30a, 0x100);
261 mac[4] = temp & 0xff;
262 mac[5] = (temp >> 8) & 0xff;
263}
264
265void pm3386_set_mac(int port, u8 *mac)
266{
267 pm3386_port_reg_write(port, 0x308, 0x100, (mac[1] << 8) | mac[0]);
268 pm3386_port_reg_write(port, 0x309, 0x100, (mac[3] << 8) | mac[2]);
269 pm3386_port_reg_write(port, 0x30a, 0x100, (mac[5] << 8) | mac[4]);
270}
271
272static u32 pm3386_get_stat(int port, u16 base)
273{
274 u32 value;
275
276 value = pm3386_port_reg_read(port, base, 0x100);
277 value |= pm3386_port_reg_read(port, base + 1, 0x100) << 16;
278
279 return value;
280}
281
282void pm3386_get_stats(int port, struct net_device_stats *stats)
283{
284 /*
285 * Snapshot statistics counters.
286 */
287 pm3386_port_reg_write(port, 0x500, 0x100, 0x0001);
288 while (pm3386_port_reg_read(port, 0x500, 0x100) & 0x0001)
289 ;
290
291 memset(stats, 0, sizeof(*stats));
292
293 stats->rx_packets = pm3386_get_stat(port, 0x510);
294 stats->tx_packets = pm3386_get_stat(port, 0x590);
295 stats->rx_bytes = pm3386_get_stat(port, 0x514);
296 stats->tx_bytes = pm3386_get_stat(port, 0x594);
297 /* @@@ Add other stats. */
298}
299
300void pm3386_set_carrier(int port, int state)
301{
302 pm3386_port_reg_write(port, 0x703, 0x10, state ? 0x1001 : 0x0000);
303}
304
305int pm3386_is_link_up(int port)
306{
307 u16 temp;
308
309 temp = pm3386_port_reg_read(port, 0x31a, 0x100);
310 temp = pm3386_port_reg_read(port, 0x31a, 0x100);
311
312 return !!(temp & 0x0002);
313}
314
315void pm3386_enable_rx(int port)
316{
317 u16 temp;
318
319 temp = pm3386_port_reg_read(port, 0x303, 0x100);
320 temp |= 0x1000;
321 pm3386_port_reg_write(port, 0x303, 0x100, temp);
322}
323
324void pm3386_disable_rx(int port)
325{
326 u16 temp;
327
328 temp = pm3386_port_reg_read(port, 0x303, 0x100);
329 temp &= 0xefff;
330 pm3386_port_reg_write(port, 0x303, 0x100, temp);
331}
332
333void pm3386_enable_tx(int port)
334{
335 u16 temp;
336
337 temp = pm3386_port_reg_read(port, 0x303, 0x100);
338 temp |= 0x4000;
339 pm3386_port_reg_write(port, 0x303, 0x100, temp);
340}
341
342void pm3386_disable_tx(int port)
343{
344 u16 temp;
345
346 temp = pm3386_port_reg_read(port, 0x303, 0x100);
347 temp &= 0xbfff;
348 pm3386_port_reg_write(port, 0x303, 0x100, temp);
349}
350
351MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/xscale/ixp2000/pm3386.h b/drivers/net/ethernet/xscale/ixp2000/pm3386.h
deleted file mode 100644
index cc4183dca911..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/pm3386.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Helper functions for the PM3386s on the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __PM3386_H
13#define __PM3386_H
14
15void pm3386_reset(void);
16int pm3386_port_count(void);
17void pm3386_init_port(int port);
18void pm3386_get_mac(int port, u8 *mac);
19void pm3386_set_mac(int port, u8 *mac);
20void pm3386_get_stats(int port, struct net_device_stats *stats);
21void pm3386_set_carrier(int port, int state);
22int pm3386_is_link_up(int port);
23void pm3386_enable_rx(int port);
24void pm3386_disable_rx(int port);
25void pm3386_enable_tx(int port);
26void pm3386_disable_tx(int port);
27
28
29#endif
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 41a8b5a9849e..482648fcf0b6 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1002,12 +1002,41 @@ static int ixp4xx_nway_reset(struct net_device *dev)
1002 return phy_start_aneg(port->phydev); 1002 return phy_start_aneg(port->phydev);
1003} 1003}
1004 1004
1005int ixp46x_phc_index = -1;
1006
1007static int ixp4xx_get_ts_info(struct net_device *dev,
1008 struct ethtool_ts_info *info)
1009{
1010 if (!cpu_is_ixp46x()) {
1011 info->so_timestamping =
1012 SOF_TIMESTAMPING_TX_SOFTWARE |
1013 SOF_TIMESTAMPING_RX_SOFTWARE |
1014 SOF_TIMESTAMPING_SOFTWARE;
1015 info->phc_index = -1;
1016 return 0;
1017 }
1018 info->so_timestamping =
1019 SOF_TIMESTAMPING_TX_HARDWARE |
1020 SOF_TIMESTAMPING_RX_HARDWARE |
1021 SOF_TIMESTAMPING_RAW_HARDWARE;
1022 info->phc_index = ixp46x_phc_index;
1023 info->tx_types =
1024 (1 << HWTSTAMP_TX_OFF) |
1025 (1 << HWTSTAMP_TX_ON);
1026 info->rx_filters =
1027 (1 << HWTSTAMP_FILTER_NONE) |
1028 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
1029 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ);
1030 return 0;
1031}
1032
1005static const struct ethtool_ops ixp4xx_ethtool_ops = { 1033static const struct ethtool_ops ixp4xx_ethtool_ops = {
1006 .get_drvinfo = ixp4xx_get_drvinfo, 1034 .get_drvinfo = ixp4xx_get_drvinfo,
1007 .get_settings = ixp4xx_get_settings, 1035 .get_settings = ixp4xx_get_settings,
1008 .set_settings = ixp4xx_set_settings, 1036 .set_settings = ixp4xx_set_settings,
1009 .nway_reset = ixp4xx_nway_reset, 1037 .nway_reset = ixp4xx_nway_reset,
1010 .get_link = ethtool_op_get_link, 1038 .get_link = ethtool_op_get_link,
1039 .get_ts_info = ixp4xx_get_ts_info,
1011}; 1040};
1012 1041
1013 1042
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 168c8f41d09f..d4719632ffc6 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -113,10 +113,9 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
113 113
114 SET_NETDEV_DEV(dev, &pdev->dev); 114 SET_NETDEV_DEV(dev, &pdev->dev);
115 115
116 if (pci_request_regions(pdev, "rrunner")) { 116 ret = pci_request_regions(pdev, "rrunner");
117 ret = -EIO; 117 if (ret < 0)
118 goto out; 118 goto out;
119 }
120 119
121 pci_set_drvdata(pdev, dev); 120 pci_set_drvdata(pdev, dev);
122 121
@@ -124,11 +123,8 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
124 123
125 spin_lock_init(&rrpriv->lock); 124 spin_lock_init(&rrpriv->lock);
126 125
127 dev->irq = pdev->irq;
128 dev->netdev_ops = &rr_netdev_ops; 126 dev->netdev_ops = &rr_netdev_ops;
129 127
130 dev->base_addr = pci_resource_start(pdev, 0);
131
132 /* display version info if adapter is found */ 128 /* display version info if adapter is found */
133 if (!version_disp) { 129 if (!version_disp) {
134 /* set display flag to TRUE so that */ 130 /* set display flag to TRUE so that */
@@ -146,16 +142,15 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
146 pci_set_master(pdev); 142 pci_set_master(pdev);
147 143
148 printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI " 144 printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI "
149 "at 0x%08lx, irq %i, PCI latency %i\n", dev->name, 145 "at 0x%llx, irq %i, PCI latency %i\n", dev->name,
150 dev->base_addr, dev->irq, pci_latency); 146 (unsigned long long)pci_resource_start(pdev, 0),
147 pdev->irq, pci_latency);
151 148
152 /* 149 /*
153 * Remap the regs into kernel space. 150 * Remap the MMIO regs into kernel space.
154 */ 151 */
155 152 rrpriv->regs = pci_iomap(pdev, 0, 0x1000);
156 rrpriv->regs = ioremap(dev->base_addr, 0x1000); 153 if (!rrpriv->regs) {
157
158 if (!rrpriv->regs){
159 printk(KERN_ERR "%s: Unable to map I/O register, " 154 printk(KERN_ERR "%s: Unable to map I/O register, "
160 "RoadRunner will be disabled.\n", dev->name); 155 "RoadRunner will be disabled.\n", dev->name);
161 ret = -EIO; 156 ret = -EIO;
@@ -202,8 +197,6 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
202 197
203 rr_init(dev); 198 rr_init(dev);
204 199
205 dev->base_addr = 0;
206
207 ret = register_netdev(dev); 200 ret = register_netdev(dev);
208 if (ret) 201 if (ret)
209 goto out; 202 goto out;
@@ -217,7 +210,7 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
217 pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring, 210 pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring,
218 rrpriv->tx_ring_dma); 211 rrpriv->tx_ring_dma);
219 if (rrpriv->regs) 212 if (rrpriv->regs)
220 iounmap(rrpriv->regs); 213 pci_iounmap(pdev, rrpriv->regs);
221 if (pdev) { 214 if (pdev) {
222 pci_release_regions(pdev); 215 pci_release_regions(pdev);
223 pci_set_drvdata(pdev, NULL); 216 pci_set_drvdata(pdev, NULL);
@@ -231,29 +224,26 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
231static void __devexit rr_remove_one (struct pci_dev *pdev) 224static void __devexit rr_remove_one (struct pci_dev *pdev)
232{ 225{
233 struct net_device *dev = pci_get_drvdata(pdev); 226 struct net_device *dev = pci_get_drvdata(pdev);
227 struct rr_private *rr = netdev_priv(dev);
234 228
235 if (dev) { 229 if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)) {
236 struct rr_private *rr = netdev_priv(dev); 230 printk(KERN_ERR "%s: trying to unload running NIC\n",
237 231 dev->name);
238 if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)){ 232 writel(HALT_NIC, &rr->regs->HostCtrl);
239 printk(KERN_ERR "%s: trying to unload running NIC\n",
240 dev->name);
241 writel(HALT_NIC, &rr->regs->HostCtrl);
242 }
243
244 pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
245 rr->evt_ring_dma);
246 pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
247 rr->rx_ring_dma);
248 pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
249 rr->tx_ring_dma);
250 unregister_netdev(dev);
251 iounmap(rr->regs);
252 free_netdev(dev);
253 pci_release_regions(pdev);
254 pci_disable_device(pdev);
255 pci_set_drvdata(pdev, NULL);
256 } 233 }
234
235 unregister_netdev(dev);
236 pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
237 rr->evt_ring_dma);
238 pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
239 rr->rx_ring_dma);
240 pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
241 rr->tx_ring_dma);
242 pci_iounmap(pdev, rr->regs);
243 pci_release_regions(pdev);
244 pci_disable_device(pdev);
245 pci_set_drvdata(pdev, NULL);
246 free_netdev(dev);
257} 247}
258 248
259 249
@@ -1229,9 +1219,9 @@ static int rr_open(struct net_device *dev)
1229 readl(&regs->HostCtrl); 1219 readl(&regs->HostCtrl);
1230 spin_unlock_irqrestore(&rrpriv->lock, flags); 1220 spin_unlock_irqrestore(&rrpriv->lock, flags);
1231 1221
1232 if (request_irq(dev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) { 1222 if (request_irq(pdev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) {
1233 printk(KERN_WARNING "%s: Requested IRQ %d is busy\n", 1223 printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
1234 dev->name, dev->irq); 1224 dev->name, pdev->irq);
1235 ecode = -EAGAIN; 1225 ecode = -EAGAIN;
1236 goto error; 1226 goto error;
1237 } 1227 }
@@ -1338,16 +1328,15 @@ static void rr_dump(struct net_device *dev)
1338 1328
1339static int rr_close(struct net_device *dev) 1329static int rr_close(struct net_device *dev)
1340{ 1330{
1341 struct rr_private *rrpriv; 1331 struct rr_private *rrpriv = netdev_priv(dev);
1342 struct rr_regs __iomem *regs; 1332 struct rr_regs __iomem *regs = rrpriv->regs;
1333 struct pci_dev *pdev = rrpriv->pci_dev;
1343 unsigned long flags; 1334 unsigned long flags;
1344 u32 tmp; 1335 u32 tmp;
1345 short i; 1336 short i;
1346 1337
1347 netif_stop_queue(dev); 1338 netif_stop_queue(dev);
1348 1339
1349 rrpriv = netdev_priv(dev);
1350 regs = rrpriv->regs;
1351 1340
1352 /* 1341 /*
1353 * Lock to make sure we are not cleaning up while another CPU 1342 * Lock to make sure we are not cleaning up while another CPU
@@ -1386,15 +1375,15 @@ static int rr_close(struct net_device *dev)
1386 rr_raz_tx(rrpriv, dev); 1375 rr_raz_tx(rrpriv, dev);
1387 rr_raz_rx(rrpriv, dev); 1376 rr_raz_rx(rrpriv, dev);
1388 1377
1389 pci_free_consistent(rrpriv->pci_dev, 256 * sizeof(struct ring_ctrl), 1378 pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl),
1390 rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma); 1379 rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
1391 rrpriv->rx_ctrl = NULL; 1380 rrpriv->rx_ctrl = NULL;
1392 1381
1393 pci_free_consistent(rrpriv->pci_dev, sizeof(struct rr_info), 1382 pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info,
1394 rrpriv->info, rrpriv->info_dma); 1383 rrpriv->info_dma);
1395 rrpriv->info = NULL; 1384 rrpriv->info = NULL;
1396 1385
1397 free_irq(dev->irq, dev); 1386 free_irq(pdev->irq, dev);
1398 spin_unlock_irqrestore(&rrpriv->lock, flags); 1387 spin_unlock_irqrestore(&rrpriv->lock, flags);
1399 1388
1400 return 0; 1389 return 0;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index c35824552792..4ffcd57b011b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -27,6 +27,7 @@
27 27
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/hyperv.h> 29#include <linux/hyperv.h>
30#include <linux/rndis.h>
30 31
31/* Fwd declaration */ 32/* Fwd declaration */
32struct hv_netvsc_packet; 33struct hv_netvsc_packet;
@@ -506,295 +507,6 @@ struct netvsc_device {
506 void *extension; 507 void *extension;
507}; 508};
508 509
509
510/* Status codes */
511
512
513#ifndef STATUS_SUCCESS
514#define STATUS_SUCCESS (0x00000000L)
515#endif
516
517#ifndef STATUS_UNSUCCESSFUL
518#define STATUS_UNSUCCESSFUL (0xC0000001L)
519#endif
520
521#ifndef STATUS_PENDING
522#define STATUS_PENDING (0x00000103L)
523#endif
524
525#ifndef STATUS_INSUFFICIENT_RESOURCES
526#define STATUS_INSUFFICIENT_RESOURCES (0xC000009AL)
527#endif
528
529#ifndef STATUS_BUFFER_OVERFLOW
530#define STATUS_BUFFER_OVERFLOW (0x80000005L)
531#endif
532
533#ifndef STATUS_NOT_SUPPORTED
534#define STATUS_NOT_SUPPORTED (0xC00000BBL)
535#endif
536
537#define RNDIS_STATUS_SUCCESS (STATUS_SUCCESS)
538#define RNDIS_STATUS_PENDING (STATUS_PENDING)
539#define RNDIS_STATUS_NOT_RECOGNIZED (0x00010001L)
540#define RNDIS_STATUS_NOT_COPIED (0x00010002L)
541#define RNDIS_STATUS_NOT_ACCEPTED (0x00010003L)
542#define RNDIS_STATUS_CALL_ACTIVE (0x00010007L)
543
544#define RNDIS_STATUS_ONLINE (0x40010003L)
545#define RNDIS_STATUS_RESET_START (0x40010004L)
546#define RNDIS_STATUS_RESET_END (0x40010005L)
547#define RNDIS_STATUS_RING_STATUS (0x40010006L)
548#define RNDIS_STATUS_CLOSED (0x40010007L)
549#define RNDIS_STATUS_WAN_LINE_UP (0x40010008L)
550#define RNDIS_STATUS_WAN_LINE_DOWN (0x40010009L)
551#define RNDIS_STATUS_WAN_FRAGMENT (0x4001000AL)
552#define RNDIS_STATUS_MEDIA_CONNECT (0x4001000BL)
553#define RNDIS_STATUS_MEDIA_DISCONNECT (0x4001000CL)
554#define RNDIS_STATUS_HARDWARE_LINE_UP (0x4001000DL)
555#define RNDIS_STATUS_HARDWARE_LINE_DOWN (0x4001000EL)
556#define RNDIS_STATUS_INTERFACE_UP (0x4001000FL)
557#define RNDIS_STATUS_INTERFACE_DOWN (0x40010010L)
558#define RNDIS_STATUS_MEDIA_BUSY (0x40010011L)
559#define RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION (0x40010012L)
560#define RNDIS_STATUS_WW_INDICATION RDIA_SPECIFIC_INDICATION
561#define RNDIS_STATUS_LINK_SPEED_CHANGE (0x40010013L)
562
563#define RNDIS_STATUS_NOT_RESETTABLE (0x80010001L)
564#define RNDIS_STATUS_SOFT_ERRORS (0x80010003L)
565#define RNDIS_STATUS_HARD_ERRORS (0x80010004L)
566#define RNDIS_STATUS_BUFFER_OVERFLOW (STATUS_BUFFER_OVERFLOW)
567
568#define RNDIS_STATUS_FAILURE (STATUS_UNSUCCESSFUL)
569#define RNDIS_STATUS_RESOURCES (STATUS_INSUFFICIENT_RESOURCES)
570#define RNDIS_STATUS_CLOSING (0xC0010002L)
571#define RNDIS_STATUS_BAD_VERSION (0xC0010004L)
572#define RNDIS_STATUS_BAD_CHARACTERISTICS (0xC0010005L)
573#define RNDIS_STATUS_ADAPTER_NOT_FOUND (0xC0010006L)
574#define RNDIS_STATUS_OPEN_FAILED (0xC0010007L)
575#define RNDIS_STATUS_DEVICE_FAILED (0xC0010008L)
576#define RNDIS_STATUS_MULTICAST_FULL (0xC0010009L)
577#define RNDIS_STATUS_MULTICAST_EXISTS (0xC001000AL)
578#define RNDIS_STATUS_MULTICAST_NOT_FOUND (0xC001000BL)
579#define RNDIS_STATUS_REQUEST_ABORTED (0xC001000CL)
580#define RNDIS_STATUS_RESET_IN_PROGRESS (0xC001000DL)
581#define RNDIS_STATUS_CLOSING_INDICATING (0xC001000EL)
582#define RNDIS_STATUS_NOT_SUPPORTED (STATUS_NOT_SUPPORTED)
583#define RNDIS_STATUS_INVALID_PACKET (0xC001000FL)
584#define RNDIS_STATUS_OPEN_LIST_FULL (0xC0010010L)
585#define RNDIS_STATUS_ADAPTER_NOT_READY (0xC0010011L)
586#define RNDIS_STATUS_ADAPTER_NOT_OPEN (0xC0010012L)
587#define RNDIS_STATUS_NOT_INDICATING (0xC0010013L)
588#define RNDIS_STATUS_INVALID_LENGTH (0xC0010014L)
589#define RNDIS_STATUS_INVALID_DATA (0xC0010015L)
590#define RNDIS_STATUS_BUFFER_TOO_SHORT (0xC0010016L)
591#define RNDIS_STATUS_INVALID_OID (0xC0010017L)
592#define RNDIS_STATUS_ADAPTER_REMOVED (0xC0010018L)
593#define RNDIS_STATUS_UNSUPPORTED_MEDIA (0xC0010019L)
594#define RNDIS_STATUS_GROUP_ADDRESS_IN_USE (0xC001001AL)
595#define RNDIS_STATUS_FILE_NOT_FOUND (0xC001001BL)
596#define RNDIS_STATUS_ERROR_READING_FILE (0xC001001CL)
597#define RNDIS_STATUS_ALREADY_MAPPED (0xC001001DL)
598#define RNDIS_STATUS_RESOURCE_CONFLICT (0xC001001EL)
599#define RNDIS_STATUS_NO_CABLE (0xC001001FL)
600
601#define RNDIS_STATUS_INVALID_SAP (0xC0010020L)
602#define RNDIS_STATUS_SAP_IN_USE (0xC0010021L)
603#define RNDIS_STATUS_INVALID_ADDRESS (0xC0010022L)
604#define RNDIS_STATUS_VC_NOT_ACTIVATED (0xC0010023L)
605#define RNDIS_STATUS_DEST_OUT_OF_ORDER (0xC0010024L)
606#define RNDIS_STATUS_VC_NOT_AVAILABLE (0xC0010025L)
607#define RNDIS_STATUS_CELLRATE_NOT_AVAILABLE (0xC0010026L)
608#define RNDIS_STATUS_INCOMPATABLE_QOS (0xC0010027L)
609#define RNDIS_STATUS_AAL_PARAMS_UNSUPPORTED (0xC0010028L)
610#define RNDIS_STATUS_NO_ROUTE_TO_DESTINATION (0xC0010029L)
611
612#define RNDIS_STATUS_TOKEN_RING_OPEN_ERROR (0xC0011000L)
613
614/* Object Identifiers used by NdisRequest Query/Set Information */
615/* General Objects */
616#define RNDIS_OID_GEN_SUPPORTED_LIST 0x00010101
617#define RNDIS_OID_GEN_HARDWARE_STATUS 0x00010102
618#define RNDIS_OID_GEN_MEDIA_SUPPORTED 0x00010103
619#define RNDIS_OID_GEN_MEDIA_IN_USE 0x00010104
620#define RNDIS_OID_GEN_MAXIMUM_LOOKAHEAD 0x00010105
621#define RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE 0x00010106
622#define RNDIS_OID_GEN_LINK_SPEED 0x00010107
623#define RNDIS_OID_GEN_TRANSMIT_BUFFER_SPACE 0x00010108
624#define RNDIS_OID_GEN_RECEIVE_BUFFER_SPACE 0x00010109
625#define RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE 0x0001010A
626#define RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE 0x0001010B
627#define RNDIS_OID_GEN_VENDOR_ID 0x0001010C
628#define RNDIS_OID_GEN_VENDOR_DESCRIPTION 0x0001010D
629#define RNDIS_OID_GEN_CURRENT_PACKET_FILTER 0x0001010E
630#define RNDIS_OID_GEN_CURRENT_LOOKAHEAD 0x0001010F
631#define RNDIS_OID_GEN_DRIVER_VERSION 0x00010110
632#define RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE 0x00010111
633#define RNDIS_OID_GEN_PROTOCOL_OPTIONS 0x00010112
634#define RNDIS_OID_GEN_MAC_OPTIONS 0x00010113
635#define RNDIS_OID_GEN_MEDIA_CONNECT_STATUS 0x00010114
636#define RNDIS_OID_GEN_MAXIMUM_SEND_PACKETS 0x00010115
637#define RNDIS_OID_GEN_VENDOR_DRIVER_VERSION 0x00010116
638#define RNDIS_OID_GEN_NETWORK_LAYER_ADDRESSES 0x00010118
639#define RNDIS_OID_GEN_TRANSPORT_HEADER_OFFSET 0x00010119
640#define RNDIS_OID_GEN_MACHINE_NAME 0x0001021A
641#define RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER 0x0001021B
642
643#define RNDIS_OID_GEN_XMIT_OK 0x00020101
644#define RNDIS_OID_GEN_RCV_OK 0x00020102
645#define RNDIS_OID_GEN_XMIT_ERROR 0x00020103
646#define RNDIS_OID_GEN_RCV_ERROR 0x00020104
647#define RNDIS_OID_GEN_RCV_NO_BUFFER 0x00020105
648
649#define RNDIS_OID_GEN_DIRECTED_BYTES_XMIT 0x00020201
650#define RNDIS_OID_GEN_DIRECTED_FRAMES_XMIT 0x00020202
651#define RNDIS_OID_GEN_MULTICAST_BYTES_XMIT 0x00020203
652#define RNDIS_OID_GEN_MULTICAST_FRAMES_XMIT 0x00020204
653#define RNDIS_OID_GEN_BROADCAST_BYTES_XMIT 0x00020205
654#define RNDIS_OID_GEN_BROADCAST_FRAMES_XMIT 0x00020206
655#define RNDIS_OID_GEN_DIRECTED_BYTES_RCV 0x00020207
656#define RNDIS_OID_GEN_DIRECTED_FRAMES_RCV 0x00020208
657#define RNDIS_OID_GEN_MULTICAST_BYTES_RCV 0x00020209
658#define RNDIS_OID_GEN_MULTICAST_FRAMES_RCV 0x0002020A
659#define RNDIS_OID_GEN_BROADCAST_BYTES_RCV 0x0002020B
660#define RNDIS_OID_GEN_BROADCAST_FRAMES_RCV 0x0002020C
661
662#define RNDIS_OID_GEN_RCV_CRC_ERROR 0x0002020D
663#define RNDIS_OID_GEN_TRANSMIT_QUEUE_LENGTH 0x0002020E
664
665#define RNDIS_OID_GEN_GET_TIME_CAPS 0x0002020F
666#define RNDIS_OID_GEN_GET_NETCARD_TIME 0x00020210
667
668/* These are connection-oriented general OIDs. */
669/* These replace the above OIDs for connection-oriented media. */
670#define RNDIS_OID_GEN_CO_SUPPORTED_LIST 0x00010101
671#define RNDIS_OID_GEN_CO_HARDWARE_STATUS 0x00010102
672#define RNDIS_OID_GEN_CO_MEDIA_SUPPORTED 0x00010103
673#define RNDIS_OID_GEN_CO_MEDIA_IN_USE 0x00010104
674#define RNDIS_OID_GEN_CO_LINK_SPEED 0x00010105
675#define RNDIS_OID_GEN_CO_VENDOR_ID 0x00010106
676#define RNDIS_OID_GEN_CO_VENDOR_DESCRIPTION 0x00010107
677#define RNDIS_OID_GEN_CO_DRIVER_VERSION 0x00010108
678#define RNDIS_OID_GEN_CO_PROTOCOL_OPTIONS 0x00010109
679#define RNDIS_OID_GEN_CO_MAC_OPTIONS 0x0001010A
680#define RNDIS_OID_GEN_CO_MEDIA_CONNECT_STATUS 0x0001010B
681#define RNDIS_OID_GEN_CO_VENDOR_DRIVER_VERSION 0x0001010C
682#define RNDIS_OID_GEN_CO_MINIMUM_LINK_SPEED 0x0001010D
683
684#define RNDIS_OID_GEN_CO_GET_TIME_CAPS 0x00010201
685#define RNDIS_OID_GEN_CO_GET_NETCARD_TIME 0x00010202
686
687/* These are connection-oriented statistics OIDs. */
688#define RNDIS_OID_GEN_CO_XMIT_PDUS_OK 0x00020101
689#define RNDIS_OID_GEN_CO_RCV_PDUS_OK 0x00020102
690#define RNDIS_OID_GEN_CO_XMIT_PDUS_ERROR 0x00020103
691#define RNDIS_OID_GEN_CO_RCV_PDUS_ERROR 0x00020104
692#define RNDIS_OID_GEN_CO_RCV_PDUS_NO_BUFFER 0x00020105
693
694
695#define RNDIS_OID_GEN_CO_RCV_CRC_ERROR 0x00020201
696#define RNDIS_OID_GEN_CO_TRANSMIT_QUEUE_LENGTH 0x00020202
697#define RNDIS_OID_GEN_CO_BYTES_XMIT 0x00020203
698#define RNDIS_OID_GEN_CO_BYTES_RCV 0x00020204
699#define RNDIS_OID_GEN_CO_BYTES_XMIT_OUTSTANDING 0x00020205
700#define RNDIS_OID_GEN_CO_NETCARD_LOAD 0x00020206
701
702/* These are objects for Connection-oriented media call-managers. */
703#define RNDIS_OID_CO_ADD_PVC 0xFF000001
704#define RNDIS_OID_CO_DELETE_PVC 0xFF000002
705#define RNDIS_OID_CO_GET_CALL_INFORMATION 0xFF000003
706#define RNDIS_OID_CO_ADD_ADDRESS 0xFF000004
707#define RNDIS_OID_CO_DELETE_ADDRESS 0xFF000005
708#define RNDIS_OID_CO_GET_ADDRESSES 0xFF000006
709#define RNDIS_OID_CO_ADDRESS_CHANGE 0xFF000007
710#define RNDIS_OID_CO_SIGNALING_ENABLED 0xFF000008
711#define RNDIS_OID_CO_SIGNALING_DISABLED 0xFF000009
712
713/* 802.3 Objects (Ethernet) */
714#define RNDIS_OID_802_3_PERMANENT_ADDRESS 0x01010101
715#define RNDIS_OID_802_3_CURRENT_ADDRESS 0x01010102
716#define RNDIS_OID_802_3_MULTICAST_LIST 0x01010103
717#define RNDIS_OID_802_3_MAXIMUM_LIST_SIZE 0x01010104
718#define RNDIS_OID_802_3_MAC_OPTIONS 0x01010105
719
720#define NDIS_802_3_MAC_OPTION_PRIORITY 0x00000001
721
722#define RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101
723#define RNDIS_OID_802_3_XMIT_ONE_COLLISION 0x01020102
724#define RNDIS_OID_802_3_XMIT_MORE_COLLISIONS 0x01020103
725
726#define RNDIS_OID_802_3_XMIT_DEFERRED 0x01020201
727#define RNDIS_OID_802_3_XMIT_MAX_COLLISIONS 0x01020202
728#define RNDIS_OID_802_3_RCV_OVERRUN 0x01020203
729#define RNDIS_OID_802_3_XMIT_UNDERRUN 0x01020204
730#define RNDIS_OID_802_3_XMIT_HEARTBEAT_FAILURE 0x01020205
731#define RNDIS_OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206
732#define RNDIS_OID_802_3_XMIT_LATE_COLLISIONS 0x01020207
733
734/* Remote NDIS message types */
735#define REMOTE_NDIS_PACKET_MSG 0x00000001
736#define REMOTE_NDIS_INITIALIZE_MSG 0x00000002
737#define REMOTE_NDIS_HALT_MSG 0x00000003
738#define REMOTE_NDIS_QUERY_MSG 0x00000004
739#define REMOTE_NDIS_SET_MSG 0x00000005
740#define REMOTE_NDIS_RESET_MSG 0x00000006
741#define REMOTE_NDIS_INDICATE_STATUS_MSG 0x00000007
742#define REMOTE_NDIS_KEEPALIVE_MSG 0x00000008
743
744#define REMOTE_CONDIS_MP_CREATE_VC_MSG 0x00008001
745#define REMOTE_CONDIS_MP_DELETE_VC_MSG 0x00008002
746#define REMOTE_CONDIS_MP_ACTIVATE_VC_MSG 0x00008005
747#define REMOTE_CONDIS_MP_DEACTIVATE_VC_MSG 0x00008006
748#define REMOTE_CONDIS_INDICATE_STATUS_MSG 0x00008007
749
750/* Remote NDIS message completion types */
751#define REMOTE_NDIS_INITIALIZE_CMPLT 0x80000002
752#define REMOTE_NDIS_QUERY_CMPLT 0x80000004
753#define REMOTE_NDIS_SET_CMPLT 0x80000005
754#define REMOTE_NDIS_RESET_CMPLT 0x80000006
755#define REMOTE_NDIS_KEEPALIVE_CMPLT 0x80000008
756
757#define REMOTE_CONDIS_MP_CREATE_VC_CMPLT 0x80008001
758#define REMOTE_CONDIS_MP_DELETE_VC_CMPLT 0x80008002
759#define REMOTE_CONDIS_MP_ACTIVATE_VC_CMPLT 0x80008005
760#define REMOTE_CONDIS_MP_DEACTIVATE_VC_CMPLT 0x80008006
761
762/*
763 * Reserved message type for private communication between lower-layer host
764 * driver and remote device, if necessary.
765 */
766#define REMOTE_NDIS_BUS_MSG 0xff000001
767
768/* Defines for DeviceFlags in struct rndis_initialize_complete */
769#define RNDIS_DF_CONNECTIONLESS 0x00000001
770#define RNDIS_DF_CONNECTION_ORIENTED 0x00000002
771#define RNDIS_DF_RAW_DATA 0x00000004
772
773/* Remote NDIS medium types. */
774#define RNDIS_MEDIUM_802_3 0x00000000
775#define RNDIS_MEDIUM_802_5 0x00000001
776#define RNDIS_MEDIUM_FDDI 0x00000002
777#define RNDIS_MEDIUM_WAN 0x00000003
778#define RNDIS_MEDIUM_LOCAL_TALK 0x00000004
779#define RNDIS_MEDIUM_ARCNET_RAW 0x00000006
780#define RNDIS_MEDIUM_ARCNET_878_2 0x00000007
781#define RNDIS_MEDIUM_ATM 0x00000008
782#define RNDIS_MEDIUM_WIRELESS_WAN 0x00000009
783#define RNDIS_MEDIUM_IRDA 0x0000000a
784#define RNDIS_MEDIUM_CO_WAN 0x0000000b
785/* Not a real medium, defined as an upper-bound */
786#define RNDIS_MEDIUM_MAX 0x0000000d
787
788
789/* Remote NDIS medium connection states. */
790#define RNDIS_MEDIA_STATE_CONNECTED 0x00000000
791#define RNDIS_MEDIA_STATE_DISCONNECTED 0x00000001
792
793/* Remote NDIS version numbers */
794#define RNDIS_MAJOR_VERSION 0x00000001
795#define RNDIS_MINOR_VERSION 0x00000000
796
797
798/* NdisInitialize message */ 510/* NdisInitialize message */
799struct rndis_initialize_request { 511struct rndis_initialize_request {
800 u32 req_id; 512 u32 req_id;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d025c83cd12a..8b919471472f 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -428,6 +428,24 @@ int netvsc_device_remove(struct hv_device *device)
428 return 0; 428 return 0;
429} 429}
430 430
431
432#define RING_AVAIL_PERCENT_HIWATER 20
433#define RING_AVAIL_PERCENT_LOWATER 10
434
435/*
436 * Get the percentage of available bytes to write in the ring.
437 * The return value is in range from 0 to 100.
438 */
439static inline u32 hv_ringbuf_avail_percent(
440 struct hv_ring_buffer_info *ring_info)
441{
442 u32 avail_read, avail_write;
443
444 hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
445
446 return avail_write * 100 / ring_info->ring_datasize;
447}
448
431static void netvsc_send_completion(struct hv_device *device, 449static void netvsc_send_completion(struct hv_device *device,
432 struct vmpacket_descriptor *packet) 450 struct vmpacket_descriptor *packet)
433{ 451{
@@ -455,6 +473,8 @@ static void netvsc_send_completion(struct hv_device *device,
455 complete(&net_device->channel_init_wait); 473 complete(&net_device->channel_init_wait);
456 } else if (nvsp_packet->hdr.msg_type == 474 } else if (nvsp_packet->hdr.msg_type ==
457 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) { 475 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
476 int num_outstanding_sends;
477
458 /* Get the send context */ 478 /* Get the send context */
459 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long) 479 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
460 packet->trans_id; 480 packet->trans_id;
@@ -463,10 +483,14 @@ static void netvsc_send_completion(struct hv_device *device,
463 nvsc_packet->completion.send.send_completion( 483 nvsc_packet->completion.send.send_completion(
464 nvsc_packet->completion.send.send_completion_ctx); 484 nvsc_packet->completion.send.send_completion_ctx);
465 485
466 atomic_dec(&net_device->num_outstanding_sends); 486 num_outstanding_sends =
487 atomic_dec_return(&net_device->num_outstanding_sends);
467 488
468 if (netif_queue_stopped(ndev) && !net_device->start_remove) 489 if (netif_queue_stopped(ndev) && !net_device->start_remove &&
469 netif_wake_queue(ndev); 490 (hv_ringbuf_avail_percent(&device->channel->outbound)
491 > RING_AVAIL_PERCENT_HIWATER ||
492 num_outstanding_sends < 1))
493 netif_wake_queue(ndev);
470 } else { 494 } else {
471 netdev_err(ndev, "Unknown send completion packet type- " 495 netdev_err(ndev, "Unknown send completion packet type- "
472 "%d received!!\n", nvsp_packet->hdr.msg_type); 496 "%d received!!\n", nvsp_packet->hdr.msg_type);
@@ -519,10 +543,19 @@ int netvsc_send(struct hv_device *device,
519 543
520 if (ret == 0) { 544 if (ret == 0) {
521 atomic_inc(&net_device->num_outstanding_sends); 545 atomic_inc(&net_device->num_outstanding_sends);
546 if (hv_ringbuf_avail_percent(&device->channel->outbound) <
547 RING_AVAIL_PERCENT_LOWATER) {
548 netif_stop_queue(ndev);
549 if (atomic_read(&net_device->
550 num_outstanding_sends) < 1)
551 netif_wake_queue(ndev);
552 }
522 } else if (ret == -EAGAIN) { 553 } else if (ret == -EAGAIN) {
523 netif_stop_queue(ndev); 554 netif_stop_queue(ndev);
524 if (atomic_read(&net_device->num_outstanding_sends) < 1) 555 if (atomic_read(&net_device->num_outstanding_sends) < 1) {
525 netif_wake_queue(ndev); 556 netif_wake_queue(ndev);
557 ret = -ENOSPC;
558 }
526 } else { 559 } else {
527 netdev_err(ndev, "Unable to send packet %p ret %d\n", 560 netdev_err(ndev, "Unable to send packet %p ret %d\n",
528 packet, ret); 561 packet, ret);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index dd294783b5c5..8f8ed3320425 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -44,6 +44,7 @@ struct net_device_context {
44 /* point back to our device context */ 44 /* point back to our device context */
45 struct hv_device *device_ctx; 45 struct hv_device *device_ctx;
46 struct delayed_work dwork; 46 struct delayed_work dwork;
47 struct work_struct work;
47}; 48};
48 49
49 50
@@ -51,30 +52,22 @@ static int ring_size = 128;
51module_param(ring_size, int, S_IRUGO); 52module_param(ring_size, int, S_IRUGO);
52MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); 53MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
53 54
54struct set_multicast_work {
55 struct work_struct work;
56 struct net_device *net;
57};
58
59static void do_set_multicast(struct work_struct *w) 55static void do_set_multicast(struct work_struct *w)
60{ 56{
61 struct set_multicast_work *swk = 57 struct net_device_context *ndevctx =
62 container_of(w, struct set_multicast_work, work); 58 container_of(w, struct net_device_context, work);
63 struct net_device *net = swk->net;
64
65 struct net_device_context *ndevctx = netdev_priv(net);
66 struct netvsc_device *nvdev; 59 struct netvsc_device *nvdev;
67 struct rndis_device *rdev; 60 struct rndis_device *rdev;
68 61
69 nvdev = hv_get_drvdata(ndevctx->device_ctx); 62 nvdev = hv_get_drvdata(ndevctx->device_ctx);
70 if (nvdev == NULL) 63 if (nvdev == NULL || nvdev->ndev == NULL)
71 goto out; 64 return;
72 65
73 rdev = nvdev->extension; 66 rdev = nvdev->extension;
74 if (rdev == NULL) 67 if (rdev == NULL)
75 goto out; 68 return;
76 69
77 if (net->flags & IFF_PROMISC) 70 if (nvdev->ndev->flags & IFF_PROMISC)
78 rndis_filter_set_packet_filter(rdev, 71 rndis_filter_set_packet_filter(rdev,
79 NDIS_PACKET_TYPE_PROMISCUOUS); 72 NDIS_PACKET_TYPE_PROMISCUOUS);
80 else 73 else
@@ -82,21 +75,13 @@ static void do_set_multicast(struct work_struct *w)
82 NDIS_PACKET_TYPE_BROADCAST | 75 NDIS_PACKET_TYPE_BROADCAST |
83 NDIS_PACKET_TYPE_ALL_MULTICAST | 76 NDIS_PACKET_TYPE_ALL_MULTICAST |
84 NDIS_PACKET_TYPE_DIRECTED); 77 NDIS_PACKET_TYPE_DIRECTED);
85
86out:
87 kfree(w);
88} 78}
89 79
90static void netvsc_set_multicast_list(struct net_device *net) 80static void netvsc_set_multicast_list(struct net_device *net)
91{ 81{
92 struct set_multicast_work *swk = 82 struct net_device_context *net_device_ctx = netdev_priv(net);
93 kmalloc(sizeof(struct set_multicast_work), GFP_ATOMIC);
94 if (swk == NULL)
95 return;
96 83
97 swk->net = net; 84 schedule_work(&net_device_ctx->work);
98 INIT_WORK(&swk->work, do_set_multicast);
99 schedule_work(&swk->work);
100} 85}
101 86
102static int netvsc_open(struct net_device *net) 87static int netvsc_open(struct net_device *net)
@@ -125,6 +110,8 @@ static int netvsc_close(struct net_device *net)
125 110
126 netif_tx_disable(net); 111 netif_tx_disable(net);
127 112
113 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
114 cancel_work_sync(&net_device_ctx->work);
128 ret = rndis_filter_close(device_obj); 115 ret = rndis_filter_close(device_obj);
129 if (ret != 0) 116 if (ret != 0)
130 netdev_err(net, "unable to close device (ret %d).\n", ret); 117 netdev_err(net, "unable to close device (ret %d).\n", ret);
@@ -224,9 +211,13 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
224 net->stats.tx_packets++; 211 net->stats.tx_packets++;
225 } else { 212 } else {
226 kfree(packet); 213 kfree(packet);
214 if (ret != -EAGAIN) {
215 dev_kfree_skb_any(skb);
216 net->stats.tx_dropped++;
217 }
227 } 218 }
228 219
229 return ret ? NETDEV_TX_BUSY : NETDEV_TX_OK; 220 return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
230} 221}
231 222
232/* 223/*
@@ -335,6 +326,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
335 326
336 nvdev->start_remove = true; 327 nvdev->start_remove = true;
337 cancel_delayed_work_sync(&ndevctx->dwork); 328 cancel_delayed_work_sync(&ndevctx->dwork);
329 cancel_work_sync(&ndevctx->work);
338 netif_tx_disable(ndev); 330 netif_tx_disable(ndev);
339 rndis_filter_device_remove(hdev); 331 rndis_filter_device_remove(hdev);
340 332
@@ -403,6 +395,7 @@ static int netvsc_probe(struct hv_device *dev,
403 net_device_ctx->device_ctx = dev; 395 net_device_ctx->device_ctx = dev;
404 hv_set_drvdata(dev, net); 396 hv_set_drvdata(dev, net);
405 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp); 397 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
398 INIT_WORK(&net_device_ctx->work, do_set_multicast);
406 399
407 net->netdev_ops = &device_ops; 400 net->netdev_ops = &device_ops;
408 401
@@ -456,6 +449,7 @@ static int netvsc_remove(struct hv_device *dev)
456 449
457 ndev_ctx = netdev_priv(net); 450 ndev_ctx = netdev_priv(net);
458 cancel_delayed_work_sync(&ndev_ctx->dwork); 451 cancel_delayed_work_sync(&ndev_ctx->dwork);
452 cancel_work_sync(&ndev_ctx->work);
459 453
460 /* Stop outbound asap */ 454 /* Stop outbound asap */
461 netif_tx_disable(net); 455 netif_tx_disable(net);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index d6be64bcefd4..981ebb115637 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -129,8 +129,8 @@ static void dump_rndis_message(struct hv_device *hv_dev,
129 netdev = net_device->ndev; 129 netdev = net_device->ndev;
130 130
131 switch (rndis_msg->ndis_msg_type) { 131 switch (rndis_msg->ndis_msg_type) {
132 case REMOTE_NDIS_PACKET_MSG: 132 case RNDIS_MSG_PACKET:
133 netdev_dbg(netdev, "REMOTE_NDIS_PACKET_MSG (len %u, " 133 netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
134 "data offset %u data len %u, # oob %u, " 134 "data offset %u data len %u, # oob %u, "
135 "oob offset %u, oob len %u, pkt offset %u, " 135 "oob offset %u, oob len %u, pkt offset %u, "
136 "pkt len %u\n", 136 "pkt len %u\n",
@@ -144,8 +144,8 @@ static void dump_rndis_message(struct hv_device *hv_dev,
144 rndis_msg->msg.pkt.per_pkt_info_len); 144 rndis_msg->msg.pkt.per_pkt_info_len);
145 break; 145 break;
146 146
147 case REMOTE_NDIS_INITIALIZE_CMPLT: 147 case RNDIS_MSG_INIT_C:
148 netdev_dbg(netdev, "REMOTE_NDIS_INITIALIZE_CMPLT " 148 netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
149 "(len %u, id 0x%x, status 0x%x, major %d, minor %d, " 149 "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
150 "device flags %d, max xfer size 0x%x, max pkts %u, " 150 "device flags %d, max xfer size 0x%x, max pkts %u, "
151 "pkt aligned %u)\n", 151 "pkt aligned %u)\n",
@@ -162,8 +162,8 @@ static void dump_rndis_message(struct hv_device *hv_dev,
162 pkt_alignment_factor); 162 pkt_alignment_factor);
163 break; 163 break;
164 164
165 case REMOTE_NDIS_QUERY_CMPLT: 165 case RNDIS_MSG_QUERY_C:
166 netdev_dbg(netdev, "REMOTE_NDIS_QUERY_CMPLT " 166 netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
167 "(len %u, id 0x%x, status 0x%x, buf len %u, " 167 "(len %u, id 0x%x, status 0x%x, buf len %u, "
168 "buf offset %u)\n", 168 "buf offset %u)\n",
169 rndis_msg->msg_len, 169 rndis_msg->msg_len,
@@ -175,16 +175,16 @@ static void dump_rndis_message(struct hv_device *hv_dev,
175 info_buf_offset); 175 info_buf_offset);
176 break; 176 break;
177 177
178 case REMOTE_NDIS_SET_CMPLT: 178 case RNDIS_MSG_SET_C:
179 netdev_dbg(netdev, 179 netdev_dbg(netdev,
180 "REMOTE_NDIS_SET_CMPLT (len %u, id 0x%x, status 0x%x)\n", 180 "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
181 rndis_msg->msg_len, 181 rndis_msg->msg_len,
182 rndis_msg->msg.set_complete.req_id, 182 rndis_msg->msg.set_complete.req_id,
183 rndis_msg->msg.set_complete.status); 183 rndis_msg->msg.set_complete.status);
184 break; 184 break;
185 185
186 case REMOTE_NDIS_INDICATE_STATUS_MSG: 186 case RNDIS_MSG_INDICATE:
187 netdev_dbg(netdev, "REMOTE_NDIS_INDICATE_STATUS_MSG " 187 netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
188 "(len %u, status 0x%x, buf len %u, buf offset %u)\n", 188 "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
189 rndis_msg->msg_len, 189 rndis_msg->msg_len,
190 rndis_msg->msg.indicate_status.status, 190 rndis_msg->msg.indicate_status.status,
@@ -264,14 +264,14 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
264 sizeof(struct rndis_filter_packet)); 264 sizeof(struct rndis_filter_packet));
265 265
266 if (resp->ndis_msg_type == 266 if (resp->ndis_msg_type ==
267 REMOTE_NDIS_RESET_CMPLT) { 267 RNDIS_MSG_RESET_C) {
268 /* does not have a request id field */ 268 /* does not have a request id field */
269 request->response_msg.msg.reset_complete. 269 request->response_msg.msg.reset_complete.
270 status = STATUS_BUFFER_OVERFLOW; 270 status = RNDIS_STATUS_BUFFER_OVERFLOW;
271 } else { 271 } else {
272 request->response_msg.msg. 272 request->response_msg.msg.
273 init_complete.status = 273 init_complete.status =
274 STATUS_BUFFER_OVERFLOW; 274 RNDIS_STATUS_BUFFER_OVERFLOW;
275 } 275 }
276 } 276 }
277 277
@@ -415,19 +415,19 @@ int rndis_filter_receive(struct hv_device *dev,
415 dump_rndis_message(dev, rndis_msg); 415 dump_rndis_message(dev, rndis_msg);
416 416
417 switch (rndis_msg->ndis_msg_type) { 417 switch (rndis_msg->ndis_msg_type) {
418 case REMOTE_NDIS_PACKET_MSG: 418 case RNDIS_MSG_PACKET:
419 /* data msg */ 419 /* data msg */
420 rndis_filter_receive_data(rndis_dev, rndis_msg, pkt); 420 rndis_filter_receive_data(rndis_dev, rndis_msg, pkt);
421 break; 421 break;
422 422
423 case REMOTE_NDIS_INITIALIZE_CMPLT: 423 case RNDIS_MSG_INIT_C:
424 case REMOTE_NDIS_QUERY_CMPLT: 424 case RNDIS_MSG_QUERY_C:
425 case REMOTE_NDIS_SET_CMPLT: 425 case RNDIS_MSG_SET_C:
426 /* completion msgs */ 426 /* completion msgs */
427 rndis_filter_receive_response(rndis_dev, rndis_msg); 427 rndis_filter_receive_response(rndis_dev, rndis_msg);
428 break; 428 break;
429 429
430 case REMOTE_NDIS_INDICATE_STATUS_MSG: 430 case RNDIS_MSG_INDICATE:
431 /* notification msgs */ 431 /* notification msgs */
432 rndis_filter_receive_indicate_status(rndis_dev, rndis_msg); 432 rndis_filter_receive_indicate_status(rndis_dev, rndis_msg);
433 break; 433 break;
@@ -456,7 +456,7 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
456 return -EINVAL; 456 return -EINVAL;
457 457
458 *result_size = 0; 458 *result_size = 0;
459 request = get_rndis_request(dev, REMOTE_NDIS_QUERY_MSG, 459 request = get_rndis_request(dev, RNDIS_MSG_QUERY,
460 RNDIS_MESSAGE_SIZE(struct rndis_query_request)); 460 RNDIS_MESSAGE_SIZE(struct rndis_query_request));
461 if (!request) { 461 if (!request) {
462 ret = -ENOMEM; 462 ret = -ENOMEM;
@@ -536,7 +536,7 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
536 536
537 ndev = dev->net_dev->ndev; 537 ndev = dev->net_dev->ndev;
538 538
539 request = get_rndis_request(dev, REMOTE_NDIS_SET_MSG, 539 request = get_rndis_request(dev, RNDIS_MSG_SET,
540 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + 540 RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
541 sizeof(u32)); 541 sizeof(u32));
542 if (!request) { 542 if (!request) {
@@ -588,7 +588,7 @@ static int rndis_filter_init_device(struct rndis_device *dev)
588 u32 status; 588 u32 status;
589 int ret, t; 589 int ret, t;
590 590
591 request = get_rndis_request(dev, REMOTE_NDIS_INITIALIZE_MSG, 591 request = get_rndis_request(dev, RNDIS_MSG_INIT,
592 RNDIS_MESSAGE_SIZE(struct rndis_initialize_request)); 592 RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
593 if (!request) { 593 if (!request) {
594 ret = -ENOMEM; 594 ret = -ENOMEM;
@@ -641,7 +641,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
641 struct rndis_halt_request *halt; 641 struct rndis_halt_request *halt;
642 642
643 /* Attempt to do a rndis device halt */ 643 /* Attempt to do a rndis device halt */
644 request = get_rndis_request(dev, REMOTE_NDIS_HALT_MSG, 644 request = get_rndis_request(dev, RNDIS_MSG_HALT,
645 RNDIS_MESSAGE_SIZE(struct rndis_halt_request)); 645 RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
646 if (!request) 646 if (!request)
647 goto cleanup; 647 goto cleanup;
@@ -805,7 +805,7 @@ int rndis_filter_send(struct hv_device *dev,
805 if (isvlan) 805 if (isvlan)
806 rndis_msg_size += NDIS_VLAN_PPI_SIZE; 806 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
807 807
808 rndis_msg->ndis_msg_type = REMOTE_NDIS_PACKET_MSG; 808 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
809 rndis_msg->msg_len = pkt->total_data_buflen + 809 rndis_msg->msg_len = pkt->total_data_buflen +
810 rndis_msg_size; 810 rndis_msg_size;
811 811
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 468047866c8c..35758445297e 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -321,8 +321,8 @@ config AU1000_FIR
321 Say M to build a module; it will be called au1k_ir.ko 321 Say M to build a module; it will be called au1k_ir.ko
322 322
323config SMC_IRCC_FIR 323config SMC_IRCC_FIR
324 tristate "SMSC IrCC (EXPERIMENTAL)" 324 tristate "SMSC IrCC"
325 depends on EXPERIMENTAL && IRDA && ISA_DMA_API 325 depends on IRDA && ISA_DMA_API
326 help 326 help
327 Say Y here if you want to build support for the SMC Infrared 327 Say Y here if you want to build support for the SMC Infrared
328 Communications Controller. It is used in a wide variety of 328 Communications Controller. It is used in a wide variety of
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 4351296dde32..510b9c8d23a9 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1710,7 +1710,7 @@ toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap)
1710 1710
1711/* Flush all packets */ 1711/* Flush all packets */
1712 while ((i--) && (self->txpending)) 1712 while ((i--) && (self->txpending))
1713 udelay (10000); 1713 msleep(10);
1714 1714
1715 spin_lock_irqsave(&self->spinlock, flags); 1715 spin_lock_irqsave(&self->spinlock, flags);
1716 1716
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 725d6b367822..eb315b8d07a3 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -737,7 +737,7 @@ static int sh_irda_stop(struct net_device *ndev)
737 netif_stop_queue(ndev); 737 netif_stop_queue(ndev);
738 pm_runtime_put_sync(&self->pdev->dev); 738 pm_runtime_put_sync(&self->pdev->dev);
739 739
740 dev_info(&ndev->dev, "stoped\n"); 740 dev_info(&ndev->dev, "stopped\n");
741 741
742 return 0; 742 return 0;
743} 743}
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index e6661b5c1f83..256eddf1f75a 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -685,7 +685,7 @@ static int sh_sir_stop(struct net_device *ndev)
685 685
686 netif_stop_queue(ndev); 686 netif_stop_queue(ndev);
687 687
688 dev_info(&ndev->dev, "stoped\n"); 688 dev_info(&ndev->dev, "stopped\n");
689 689
690 return 0; 690 return 0;
691} 691}
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 6c95d4087b2d..a926813ee91d 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -1,7 +1,6 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Description: Driver for the SMC Infrared Communications Controller 3 * Description: Driver for the SMC Infrared Communications Controller
4 * Status: Experimental.
5 * Author: Daniele Peri (peri@csai.unipa.it) 4 * Author: Daniele Peri (peri@csai.unipa.it)
6 * Created at: 5 * Created at:
7 * Modified at: 6 * Modified at:
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index f975afdc315c..66a9bfe7b1c8 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -57,7 +57,7 @@ static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
57 struct hlist_node *n; 57 struct hlist_node *n;
58 58
59 hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[addr[5]], hlist) { 59 hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[addr[5]], hlist) {
60 if (!compare_ether_addr_64bits(vlan->dev->dev_addr, addr)) 60 if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr))
61 return vlan; 61 return vlan;
62 } 62 }
63 return NULL; 63 return NULL;
@@ -96,7 +96,7 @@ static int macvlan_addr_busy(const struct macvlan_port *port,
96 * currently in use by the underlying device or 96 * currently in use by the underlying device or
97 * another macvlan. 97 * another macvlan.
98 */ 98 */
99 if (!compare_ether_addr_64bits(port->dev->dev_addr, addr)) 99 if (ether_addr_equal_64bits(port->dev->dev_addr, addr))
100 return 1; 100 return 1;
101 101
102 if (macvlan_hash_lookup(port, addr)) 102 if (macvlan_hash_lookup(port, addr))
@@ -118,8 +118,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
118 return vlan->forward(dev, skb); 118 return vlan->forward(dev, skb);
119 119
120 skb->dev = dev; 120 skb->dev = dev;
121 if (!compare_ether_addr_64bits(eth->h_dest, 121 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
122 dev->broadcast))
123 skb->pkt_type = PACKET_BROADCAST; 122 skb->pkt_type = PACKET_BROADCAST;
124 else 123 else
125 skb->pkt_type = PACKET_MULTICAST; 124 skb->pkt_type = PACKET_MULTICAST;
@@ -259,7 +258,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
259 258
260xmit_world: 259xmit_world:
261 skb->ip_summed = ip_summed; 260 skb->ip_summed = ip_summed;
262 skb_set_dev(skb, vlan->lowerdev); 261 skb->dev = vlan->lowerdev;
263 return dev_queue_xmit(skb); 262 return dev_queue_xmit(skb);
264} 263}
265 264
@@ -312,7 +311,8 @@ static int macvlan_open(struct net_device *dev)
312 int err; 311 int err;
313 312
314 if (vlan->port->passthru) { 313 if (vlan->port->passthru) {
315 dev_set_promiscuity(lowerdev, 1); 314 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
315 dev_set_promiscuity(lowerdev, 1);
316 goto hash_add; 316 goto hash_add;
317 } 317 }
318 318
@@ -344,12 +344,15 @@ static int macvlan_stop(struct net_device *dev)
344 struct macvlan_dev *vlan = netdev_priv(dev); 344 struct macvlan_dev *vlan = netdev_priv(dev);
345 struct net_device *lowerdev = vlan->lowerdev; 345 struct net_device *lowerdev = vlan->lowerdev;
346 346
347 dev_uc_unsync(lowerdev, dev);
348 dev_mc_unsync(lowerdev, dev);
349
347 if (vlan->port->passthru) { 350 if (vlan->port->passthru) {
348 dev_set_promiscuity(lowerdev, -1); 351 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
352 dev_set_promiscuity(lowerdev, -1);
349 goto hash_del; 353 goto hash_del;
350 } 354 }
351 355
352 dev_mc_unsync(lowerdev, dev);
353 if (dev->flags & IFF_ALLMULTI) 356 if (dev->flags & IFF_ALLMULTI)
354 dev_set_allmulti(lowerdev, -1); 357 dev_set_allmulti(lowerdev, -1);
355 358
@@ -399,10 +402,11 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
399 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); 402 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
400} 403}
401 404
402static void macvlan_set_multicast_list(struct net_device *dev) 405static void macvlan_set_mac_lists(struct net_device *dev)
403{ 406{
404 struct macvlan_dev *vlan = netdev_priv(dev); 407 struct macvlan_dev *vlan = netdev_priv(dev);
405 408
409 dev_uc_sync(vlan->lowerdev, dev);
406 dev_mc_sync(vlan->lowerdev, dev); 410 dev_mc_sync(vlan->lowerdev, dev);
407} 411}
408 412
@@ -542,6 +546,43 @@ static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
542 return 0; 546 return 0;
543} 547}
544 548
549static int macvlan_fdb_add(struct ndmsg *ndm,
550 struct net_device *dev,
551 unsigned char *addr,
552 u16 flags)
553{
554 struct macvlan_dev *vlan = netdev_priv(dev);
555 int err = -EINVAL;
556
557 if (!vlan->port->passthru)
558 return -EOPNOTSUPP;
559
560 if (is_unicast_ether_addr(addr))
561 err = dev_uc_add_excl(dev, addr);
562 else if (is_multicast_ether_addr(addr))
563 err = dev_mc_add_excl(dev, addr);
564
565 return err;
566}
567
568static int macvlan_fdb_del(struct ndmsg *ndm,
569 struct net_device *dev,
570 unsigned char *addr)
571{
572 struct macvlan_dev *vlan = netdev_priv(dev);
573 int err = -EINVAL;
574
575 if (!vlan->port->passthru)
576 return -EOPNOTSUPP;
577
578 if (is_unicast_ether_addr(addr))
579 err = dev_uc_del(dev, addr);
580 else if (is_multicast_ether_addr(addr))
581 err = dev_mc_del(dev, addr);
582
583 return err;
584}
585
545static void macvlan_ethtool_get_drvinfo(struct net_device *dev, 586static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
546 struct ethtool_drvinfo *drvinfo) 587 struct ethtool_drvinfo *drvinfo)
547{ 588{
@@ -572,11 +613,14 @@ static const struct net_device_ops macvlan_netdev_ops = {
572 .ndo_change_mtu = macvlan_change_mtu, 613 .ndo_change_mtu = macvlan_change_mtu,
573 .ndo_change_rx_flags = macvlan_change_rx_flags, 614 .ndo_change_rx_flags = macvlan_change_rx_flags,
574 .ndo_set_mac_address = macvlan_set_mac_address, 615 .ndo_set_mac_address = macvlan_set_mac_address,
575 .ndo_set_rx_mode = macvlan_set_multicast_list, 616 .ndo_set_rx_mode = macvlan_set_mac_lists,
576 .ndo_get_stats64 = macvlan_dev_get_stats64, 617 .ndo_get_stats64 = macvlan_dev_get_stats64,
577 .ndo_validate_addr = eth_validate_addr, 618 .ndo_validate_addr = eth_validate_addr,
578 .ndo_vlan_rx_add_vid = macvlan_vlan_rx_add_vid, 619 .ndo_vlan_rx_add_vid = macvlan_vlan_rx_add_vid,
579 .ndo_vlan_rx_kill_vid = macvlan_vlan_rx_kill_vid, 620 .ndo_vlan_rx_kill_vid = macvlan_vlan_rx_kill_vid,
621 .ndo_fdb_add = macvlan_fdb_add,
622 .ndo_fdb_del = macvlan_fdb_del,
623 .ndo_fdb_dump = ndo_dflt_fdb_dump,
580}; 624};
581 625
582void macvlan_common_setup(struct net_device *dev) 626void macvlan_common_setup(struct net_device *dev)
@@ -711,6 +755,9 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
711 if (data && data[IFLA_MACVLAN_MODE]) 755 if (data && data[IFLA_MACVLAN_MODE])
712 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 756 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
713 757
758 if (data && data[IFLA_MACVLAN_FLAGS])
759 vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
760
714 if (vlan->mode == MACVLAN_MODE_PASSTHRU) { 761 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
715 if (port->count) 762 if (port->count)
716 return -EINVAL; 763 return -EINVAL;
@@ -760,6 +807,16 @@ static int macvlan_changelink(struct net_device *dev,
760 struct macvlan_dev *vlan = netdev_priv(dev); 807 struct macvlan_dev *vlan = netdev_priv(dev);
761 if (data && data[IFLA_MACVLAN_MODE]) 808 if (data && data[IFLA_MACVLAN_MODE])
762 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 809 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
810 if (data && data[IFLA_MACVLAN_FLAGS]) {
811 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
812 bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
813
814 if (promisc && (flags & MACVLAN_FLAG_NOPROMISC))
815 dev_set_promiscuity(vlan->lowerdev, -1);
816 else if (promisc && !(flags & MACVLAN_FLAG_NOPROMISC))
817 dev_set_promiscuity(vlan->lowerdev, 1);
818 vlan->flags = flags;
819 }
763 return 0; 820 return 0;
764} 821}
765 822
@@ -773,7 +830,10 @@ static int macvlan_fill_info(struct sk_buff *skb,
773{ 830{
774 struct macvlan_dev *vlan = netdev_priv(dev); 831 struct macvlan_dev *vlan = netdev_priv(dev);
775 832
776 NLA_PUT_U32(skb, IFLA_MACVLAN_MODE, vlan->mode); 833 if (nla_put_u32(skb, IFLA_MACVLAN_MODE, vlan->mode))
834 goto nla_put_failure;
835 if (nla_put_u16(skb, IFLA_MACVLAN_FLAGS, vlan->flags))
836 goto nla_put_failure;
777 return 0; 837 return 0;
778 838
779nla_put_failure: 839nla_put_failure:
@@ -781,7 +841,8 @@ nla_put_failure:
781} 841}
782 842
783static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = { 843static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
784 [IFLA_MACVLAN_MODE] = { .type = NLA_U32 }, 844 [IFLA_MACVLAN_MODE] = { .type = NLA_U32 },
845 [IFLA_MACVLAN_FLAGS] = { .type = NLA_U16 },
785}; 846};
786 847
787int macvlan_link_register(struct rtnl_link_ops *ops) 848int macvlan_link_register(struct rtnl_link_ops *ops)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 0427c6561c84..2ee56de7b0ca 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -1,5 +1,6 @@
1#include <linux/etherdevice.h> 1#include <linux/etherdevice.h>
2#include <linux/if_macvlan.h> 2#include <linux/if_macvlan.h>
3#include <linux/if_vlan.h>
3#include <linux/interrupt.h> 4#include <linux/interrupt.h>
4#include <linux/nsproxy.h> 5#include <linux/nsproxy.h>
5#include <linux/compat.h> 6#include <linux/compat.h>
@@ -505,10 +506,11 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
505 if (copy > size) { 506 if (copy > size) {
506 ++from; 507 ++from;
507 --count; 508 --count;
508 } 509 offset = 0;
510 } else
511 offset += size;
509 copy -= size; 512 copy -= size;
510 offset1 += size; 513 offset1 += size;
511 offset = 0;
512 } 514 }
513 515
514 if (len == offset1) 516 if (len == offset1)
@@ -518,24 +520,29 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
518 struct page *page[MAX_SKB_FRAGS]; 520 struct page *page[MAX_SKB_FRAGS];
519 int num_pages; 521 int num_pages;
520 unsigned long base; 522 unsigned long base;
523 unsigned long truesize;
521 524
522 len = from->iov_len - offset1; 525 len = from->iov_len - offset;
523 if (!len) { 526 if (!len) {
524 offset1 = 0; 527 offset = 0;
525 ++from; 528 ++from;
526 continue; 529 continue;
527 } 530 }
528 base = (unsigned long)from->iov_base + offset1; 531 base = (unsigned long)from->iov_base + offset;
529 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; 532 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
533 if (i + size > MAX_SKB_FRAGS)
534 return -EMSGSIZE;
530 num_pages = get_user_pages_fast(base, size, 0, &page[i]); 535 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
531 if ((num_pages != size) || 536 if (num_pages != size) {
532 (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags)) 537 for (i = 0; i < num_pages; i++)
533 /* put_page is in skb free */ 538 put_page(page[i]);
534 return -EFAULT; 539 return -EFAULT;
540 }
541 truesize = size * PAGE_SIZE;
535 skb->data_len += len; 542 skb->data_len += len;
536 skb->len += len; 543 skb->len += len;
537 skb->truesize += len; 544 skb->truesize += truesize;
538 atomic_add(len, &skb->sk->sk_wmem_alloc); 545 atomic_add(truesize, &skb->sk->sk_wmem_alloc);
539 while (len) { 546 while (len) {
540 int off = base & ~PAGE_MASK; 547 int off = base & ~PAGE_MASK;
541 int size = min_t(int, len, PAGE_SIZE - off); 548 int size = min_t(int, len, PAGE_SIZE - off);
@@ -546,7 +553,7 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
546 len -= size; 553 len -= size;
547 i++; 554 i++;
548 } 555 }
549 offset1 = 0; 556 offset = 0;
550 ++from; 557 ++from;
551 } 558 }
552 return 0; 559 return 0;
@@ -646,7 +653,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
646 int err; 653 int err;
647 struct virtio_net_hdr vnet_hdr = { 0 }; 654 struct virtio_net_hdr vnet_hdr = { 0 };
648 int vnet_hdr_len = 0; 655 int vnet_hdr_len = 0;
649 int copylen; 656 int copylen = 0;
650 bool zerocopy = false; 657 bool zerocopy = false;
651 658
652 if (q->flags & IFF_VNET_HDR) { 659 if (q->flags & IFF_VNET_HDR) {
@@ -675,15 +682,31 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
675 if (unlikely(len < ETH_HLEN)) 682 if (unlikely(len < ETH_HLEN))
676 goto err; 683 goto err;
677 684
685 err = -EMSGSIZE;
686 if (unlikely(count > UIO_MAXIOV))
687 goto err;
688
678 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) 689 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
679 zerocopy = true; 690 zerocopy = true;
680 691
681 if (zerocopy) { 692 if (zerocopy) {
693 /* Userspace may produce vectors with count greater than
694 * MAX_SKB_FRAGS, so we need to linearize parts of the skb
695 * to let the rest of data to be fit in the frags.
696 */
697 if (count > MAX_SKB_FRAGS) {
698 copylen = iov_length(iv, count - MAX_SKB_FRAGS);
699 if (copylen < vnet_hdr_len)
700 copylen = 0;
701 else
702 copylen -= vnet_hdr_len;
703 }
682 /* There are 256 bytes to be copied in skb, so there is enough 704 /* There are 256 bytes to be copied in skb, so there is enough
683 * room for skb expand head in case it is used. 705 * room for skb expand head in case it is used.
684 * The rest buffer is mapped from userspace. 706 * The rest buffer is mapped from userspace.
685 */ 707 */
686 copylen = vnet_hdr.hdr_len; 708 if (copylen < vnet_hdr.hdr_len)
709 copylen = vnet_hdr.hdr_len;
687 if (!copylen) 710 if (!copylen)
688 copylen = GOODCOPY_LEN; 711 copylen = GOODCOPY_LEN;
689 } else 712 } else
@@ -694,10 +717,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
694 if (!skb) 717 if (!skb)
695 goto err; 718 goto err;
696 719
697 if (zerocopy) { 720 if (zerocopy)
698 err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count); 721 err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
699 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 722 else
700 } else
701 err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, 723 err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
702 len); 724 len);
703 if (err) 725 if (err)
@@ -716,8 +738,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
716 rcu_read_lock_bh(); 738 rcu_read_lock_bh();
717 vlan = rcu_dereference_bh(q->vlan); 739 vlan = rcu_dereference_bh(q->vlan);
718 /* copy skb_ubuf_info for callback when skb has no error */ 740 /* copy skb_ubuf_info for callback when skb has no error */
719 if (zerocopy) 741 if (zerocopy) {
720 skb_shinfo(skb)->destructor_arg = m->msg_control; 742 skb_shinfo(skb)->destructor_arg = m->msg_control;
743 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
744 }
721 if (vlan) 745 if (vlan)
722 macvlan_start_xmit(skb, vlan->dev); 746 macvlan_start_xmit(skb, vlan->dev);
723 else 747 else
@@ -759,6 +783,8 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
759 struct macvlan_dev *vlan; 783 struct macvlan_dev *vlan;
760 int ret; 784 int ret;
761 int vnet_hdr_len = 0; 785 int vnet_hdr_len = 0;
786 int vlan_offset = 0;
787 int copied;
762 788
763 if (q->flags & IFF_VNET_HDR) { 789 if (q->flags & IFF_VNET_HDR) {
764 struct virtio_net_hdr vnet_hdr; 790 struct virtio_net_hdr vnet_hdr;
@@ -773,18 +799,48 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
773 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr))) 799 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
774 return -EFAULT; 800 return -EFAULT;
775 } 801 }
802 copied = vnet_hdr_len;
803
804 if (!vlan_tx_tag_present(skb))
805 len = min_t(int, skb->len, len);
806 else {
807 int copy;
808 struct {
809 __be16 h_vlan_proto;
810 __be16 h_vlan_TCI;
811 } veth;
812 veth.h_vlan_proto = htons(ETH_P_8021Q);
813 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
814
815 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
816 len = min_t(int, skb->len + VLAN_HLEN, len);
817
818 copy = min_t(int, vlan_offset, len);
819 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
820 len -= copy;
821 copied += copy;
822 if (ret || !len)
823 goto done;
824
825 copy = min_t(int, sizeof(veth), len);
826 ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
827 len -= copy;
828 copied += copy;
829 if (ret || !len)
830 goto done;
831 }
776 832
777 len = min_t(int, skb->len, len); 833 ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
778 834 copied += len;
779 ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
780 835
836done:
781 rcu_read_lock_bh(); 837 rcu_read_lock_bh();
782 vlan = rcu_dereference_bh(q->vlan); 838 vlan = rcu_dereference_bh(q->vlan);
783 if (vlan) 839 if (vlan)
784 macvlan_count_rx(vlan, len, ret == 0, 0); 840 macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
785 rcu_read_unlock_bh(); 841 rcu_read_unlock_bh();
786 842
787 return ret ? ret : (len + vnet_hdr_len); 843 return ret ? ret : copied;
788} 844}
789 845
790static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb, 846static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 0e01f4e5cd64..944cdfb80fe4 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -135,6 +135,25 @@ config MDIO_OCTEON
135 135
136 If in doubt, say Y. 136 If in doubt, say Y.
137 137
138config MDIO_BUS_MUX
139 tristate
140 depends on OF_MDIO
141 help
142 This module provides a driver framework for MDIO bus
143 multiplexers which connect one of several child MDIO busses
144 to a parent bus. Switching between child busses is done by
145 device specific drivers.
146
147config MDIO_BUS_MUX_GPIO
148 tristate "Support for GPIO controlled MDIO bus multiplexers"
149 depends on OF_GPIO && OF_MDIO
150 select MDIO_BUS_MUX
151 help
152 This module provides a driver for MDIO bus multiplexers that
153 are controlled via GPIO lines. The multiplexer connects one of
154 several child MDIO busses to a parent bus. Child bus
155 selection is under the control of GPIO lines.
156
138endif # PHYLIB 157endif # PHYLIB
139 158
140config MICREL_KS8995MA 159config MICREL_KS8995MA
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index b7438b1b94b9..f51af688ef8b 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -25,3 +25,5 @@ obj-$(CONFIG_MICREL_PHY) += micrel.o
25obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o 25obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
26obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o 26obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
27obj-$(CONFIG_AMD_PHY) += amd.o 27obj-$(CONFIG_AMD_PHY) += amd.o
28obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
29obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index e16f98cb4f04..cd802eb25fd2 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -39,10 +39,7 @@ static int bcm63xx_config_init(struct phy_device *phydev)
39 MII_BCM63XX_IR_SPEED | 39 MII_BCM63XX_IR_SPEED |
40 MII_BCM63XX_IR_LINK) | 40 MII_BCM63XX_IR_LINK) |
41 MII_BCM63XX_IR_EN; 41 MII_BCM63XX_IR_EN;
42 err = phy_write(phydev, MII_BCM63XX_IR, reg); 42 return phy_write(phydev, MII_BCM63XX_IR, reg);
43 if (err < 0)
44 return err;
45 return 0;
46} 43}
47 44
48static int bcm63xx_ack_interrupt(struct phy_device *phydev) 45static int bcm63xx_ack_interrupt(struct phy_device *phydev)
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 2f774acdb551..5f59cc064778 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -134,12 +134,7 @@ static int dm9161_config_init(struct phy_device *phydev)
134 return err; 134 return err;
135 135
136 /* Reconnect the PHY, and enable Autonegotiation */ 136 /* Reconnect the PHY, and enable Autonegotiation */
137 err = phy_write(phydev, MII_BMCR, BMCR_ANENABLE); 137 return phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
138
139 if (err < 0)
140 return err;
141
142 return 0;
143} 138}
144 139
145static int dm9161_ack_interrupt(struct phy_device *phydev) 140static int dm9161_ack_interrupt(struct phy_device *phydev)
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index dd7ae19579d1..940b29022d0c 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1215,6 +1215,36 @@ static void dp83640_txtstamp(struct phy_device *phydev,
1215 } 1215 }
1216} 1216}
1217 1217
1218static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info)
1219{
1220 struct dp83640_private *dp83640 = dev->priv;
1221
1222 info->so_timestamping =
1223 SOF_TIMESTAMPING_TX_HARDWARE |
1224 SOF_TIMESTAMPING_RX_HARDWARE |
1225 SOF_TIMESTAMPING_RAW_HARDWARE;
1226 info->phc_index = ptp_clock_index(dp83640->clock->ptp_clock);
1227 info->tx_types =
1228 (1 << HWTSTAMP_TX_OFF) |
1229 (1 << HWTSTAMP_TX_ON) |
1230 (1 << HWTSTAMP_TX_ONESTEP_SYNC);
1231 info->rx_filters =
1232 (1 << HWTSTAMP_FILTER_NONE) |
1233 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1234 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
1235 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
1236 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1237 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
1238 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
1239 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1240 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1241 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
1242 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
1243 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
1244 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
1245 return 0;
1246}
1247
1218static struct phy_driver dp83640_driver = { 1248static struct phy_driver dp83640_driver = {
1219 .phy_id = DP83640_PHY_ID, 1249 .phy_id = DP83640_PHY_ID,
1220 .phy_id_mask = 0xfffffff0, 1250 .phy_id_mask = 0xfffffff0,
@@ -1225,6 +1255,7 @@ static struct phy_driver dp83640_driver = {
1225 .remove = dp83640_remove, 1255 .remove = dp83640_remove,
1226 .config_aneg = genphy_config_aneg, 1256 .config_aneg = genphy_config_aneg,
1227 .read_status = genphy_read_status, 1257 .read_status = genphy_read_status,
1258 .ts_info = dp83640_ts_info,
1228 .hwtstamp = dp83640_hwtstamp, 1259 .hwtstamp = dp83640_hwtstamp,
1229 .rxtstamp = dp83640_rxtstamp, 1260 .rxtstamp = dp83640_rxtstamp,
1230 .txtstamp = dp83640_txtstamp, 1261 .txtstamp = dp83640_txtstamp,
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index f08c85acf761..5ac46f5226f3 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -40,6 +40,7 @@ MODULE_LICENSE("GPL");
40#define IP1001_PHASE_SEL_MASK 3 /* IP1001 RX/TXPHASE_SEL */ 40#define IP1001_PHASE_SEL_MASK 3 /* IP1001 RX/TXPHASE_SEL */
41#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */ 41#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
42#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */ 42#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
43#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
43 44
44static int ip175c_config_init(struct phy_device *phydev) 45static int ip175c_config_init(struct phy_device *phydev)
45{ 46{
@@ -185,6 +186,15 @@ static int ip175c_config_aneg(struct phy_device *phydev)
185 return 0; 186 return 0;
186} 187}
187 188
189static int ip101a_g_ack_interrupt(struct phy_device *phydev)
190{
191 int err = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS);
192 if (err < 0)
193 return err;
194
195 return 0;
196}
197
188static struct phy_driver ip175c_driver = { 198static struct phy_driver ip175c_driver = {
189 .phy_id = 0x02430d80, 199 .phy_id = 0x02430d80,
190 .name = "ICPlus IP175C", 200 .name = "ICPlus IP175C",
@@ -204,7 +214,6 @@ static struct phy_driver ip1001_driver = {
204 .phy_id_mask = 0x0ffffff0, 214 .phy_id_mask = 0x0ffffff0,
205 .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | 215 .features = PHY_GBIT_FEATURES | SUPPORTED_Pause |
206 SUPPORTED_Asym_Pause, 216 SUPPORTED_Asym_Pause,
207 .flags = PHY_HAS_INTERRUPT,
208 .config_init = &ip1001_config_init, 217 .config_init = &ip1001_config_init,
209 .config_aneg = &genphy_config_aneg, 218 .config_aneg = &genphy_config_aneg,
210 .read_status = &genphy_read_status, 219 .read_status = &genphy_read_status,
@@ -220,6 +229,7 @@ static struct phy_driver ip101a_g_driver = {
220 .features = PHY_BASIC_FEATURES | SUPPORTED_Pause | 229 .features = PHY_BASIC_FEATURES | SUPPORTED_Pause |
221 SUPPORTED_Asym_Pause, 230 SUPPORTED_Asym_Pause,
222 .flags = PHY_HAS_INTERRUPT, 231 .flags = PHY_HAS_INTERRUPT,
232 .ack_interrupt = ip101a_g_ack_interrupt,
223 .config_init = &ip101a_g_config_init, 233 .config_init = &ip101a_g_config_init,
224 .config_aneg = &genphy_config_aneg, 234 .config_aneg = &genphy_config_aneg,
225 .read_status = &genphy_read_status, 235 .read_status = &genphy_read_status,
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e8b9c53c304b..418928d644bf 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -455,11 +455,7 @@ static int m88e1111_config_init(struct phy_device *phydev)
455 if (err < 0) 455 if (err < 0)
456 return err; 456 return err;
457 457
458 err = phy_write(phydev, MII_BMCR, BMCR_RESET); 458 return phy_write(phydev, MII_BMCR, BMCR_RESET);
459 if (err < 0)
460 return err;
461
462 return 0;
463} 459}
464 460
465static int m88e1118_config_aneg(struct phy_device *phydev) 461static int m88e1118_config_aneg(struct phy_device *phydev)
@@ -515,11 +511,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
515 if (err < 0) 511 if (err < 0)
516 return err; 512 return err;
517 513
518 err = phy_write(phydev, MII_BMCR, BMCR_RESET); 514 return phy_write(phydev, MII_BMCR, BMCR_RESET);
519 if (err < 0)
520 return err;
521
522 return 0;
523} 515}
524 516
525static int m88e1149_config_init(struct phy_device *phydev) 517static int m88e1149_config_init(struct phy_device *phydev)
@@ -545,11 +537,7 @@ static int m88e1149_config_init(struct phy_device *phydev)
545 if (err < 0) 537 if (err < 0)
546 return err; 538 return err;
547 539
548 err = phy_write(phydev, MII_BMCR, BMCR_RESET); 540 return phy_write(phydev, MII_BMCR, BMCR_RESET);
549 if (err < 0)
550 return err;
551
552 return 0;
553} 541}
554 542
555static int m88e1145_config_init(struct phy_device *phydev) 543static int m88e1145_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
new file mode 100644
index 000000000000..e0cc4ef33dee
--- /dev/null
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -0,0 +1,142 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2011, 2012 Cavium, Inc.
7 */
8
9#include <linux/platform_device.h>
10#include <linux/device.h>
11#include <linux/of_mdio.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/phy.h>
15#include <linux/mdio-mux.h>
16#include <linux/of_gpio.h>
17
18#define DRV_VERSION "1.0"
19#define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver"
20
21#define MDIO_MUX_GPIO_MAX_BITS 8
22
23struct mdio_mux_gpio_state {
24 int gpio[MDIO_MUX_GPIO_MAX_BITS];
25 unsigned int num_gpios;
26 void *mux_handle;
27};
28
29static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
30 void *data)
31{
32 int change;
33 unsigned int n;
34 struct mdio_mux_gpio_state *s = data;
35
36 if (current_child == desired_child)
37 return 0;
38
39 change = current_child == -1 ? -1 : current_child ^ desired_child;
40
41 for (n = 0; n < s->num_gpios; n++) {
42 if (change & 1)
43 gpio_set_value_cansleep(s->gpio[n],
44 (desired_child & 1) != 0);
45 change >>= 1;
46 desired_child >>= 1;
47 }
48
49 return 0;
50}
51
52static int __devinit mdio_mux_gpio_probe(struct platform_device *pdev)
53{
54 enum of_gpio_flags f;
55 struct mdio_mux_gpio_state *s;
56 unsigned int num_gpios;
57 unsigned int n;
58 int r;
59
60 if (!pdev->dev.of_node)
61 return -ENODEV;
62
63 num_gpios = of_gpio_count(pdev->dev.of_node);
64 if (num_gpios == 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS)
65 return -ENODEV;
66
67 s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
68 if (!s)
69 return -ENOMEM;
70
71 s->num_gpios = num_gpios;
72
73 for (n = 0; n < num_gpios; ) {
74 int gpio = of_get_gpio_flags(pdev->dev.of_node, n, &f);
75 if (gpio < 0) {
76 r = (gpio == -ENODEV) ? -EPROBE_DEFER : gpio;
77 goto err;
78 }
79 s->gpio[n] = gpio;
80
81 n++;
82
83 r = gpio_request(gpio, "mdio_mux_gpio");
84 if (r)
85 goto err;
86
87 r = gpio_direction_output(gpio, 0);
88 if (r)
89 goto err;
90 }
91
92 r = mdio_mux_init(&pdev->dev,
93 mdio_mux_gpio_switch_fn, &s->mux_handle, s);
94
95 if (r == 0) {
96 pdev->dev.platform_data = s;
97 return 0;
98 }
99err:
100 while (n) {
101 n--;
102 gpio_free(s->gpio[n]);
103 }
104 devm_kfree(&pdev->dev, s);
105 return r;
106}
107
108static int __devexit mdio_mux_gpio_remove(struct platform_device *pdev)
109{
110 struct mdio_mux_gpio_state *s = pdev->dev.platform_data;
111 mdio_mux_uninit(s->mux_handle);
112 return 0;
113}
114
115static struct of_device_id mdio_mux_gpio_match[] = {
116 {
117 .compatible = "mdio-mux-gpio",
118 },
119 {
120 /* Legacy compatible property. */
121 .compatible = "cavium,mdio-mux-sn74cbtlv3253",
122 },
123 {},
124};
125MODULE_DEVICE_TABLE(of, mdio_mux_gpio_match);
126
127static struct platform_driver mdio_mux_gpio_driver = {
128 .driver = {
129 .name = "mdio-mux-gpio",
130 .owner = THIS_MODULE,
131 .of_match_table = mdio_mux_gpio_match,
132 },
133 .probe = mdio_mux_gpio_probe,
134 .remove = __devexit_p(mdio_mux_gpio_remove),
135};
136
137module_platform_driver(mdio_mux_gpio_driver);
138
139MODULE_DESCRIPTION(DRV_DESCRIPTION);
140MODULE_VERSION(DRV_VERSION);
141MODULE_AUTHOR("David Daney");
142MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
new file mode 100644
index 000000000000..39ea0674dcde
--- /dev/null
+++ b/drivers/net/phy/mdio-mux.c
@@ -0,0 +1,192 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2011, 2012 Cavium, Inc.
7 */
8
9#include <linux/platform_device.h>
10#include <linux/mdio-mux.h>
11#include <linux/of_mdio.h>
12#include <linux/device.h>
13#include <linux/module.h>
14#include <linux/phy.h>
15
16#define DRV_VERSION "1.0"
17#define DRV_DESCRIPTION "MDIO bus multiplexer driver"
18
19struct mdio_mux_child_bus;
20
21struct mdio_mux_parent_bus {
22 struct mii_bus *mii_bus;
23 int current_child;
24 int parent_id;
25 void *switch_data;
26 int (*switch_fn)(int current_child, int desired_child, void *data);
27
28 /* List of our children linked through their next fields. */
29 struct mdio_mux_child_bus *children;
30};
31
32struct mdio_mux_child_bus {
33 struct mii_bus *mii_bus;
34 struct mdio_mux_parent_bus *parent;
35 struct mdio_mux_child_bus *next;
36 int bus_number;
37 int phy_irq[PHY_MAX_ADDR];
38};
39
40/*
41 * The parent bus' lock is used to order access to the switch_fn.
42 */
43static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum)
44{
45 struct mdio_mux_child_bus *cb = bus->priv;
46 struct mdio_mux_parent_bus *pb = cb->parent;
47 int r;
48
49 mutex_lock(&pb->mii_bus->mdio_lock);
50 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
51 if (r)
52 goto out;
53
54 pb->current_child = cb->bus_number;
55
56 r = pb->mii_bus->read(pb->mii_bus, phy_id, regnum);
57out:
58 mutex_unlock(&pb->mii_bus->mdio_lock);
59
60 return r;
61}
62
63/*
64 * The parent bus' lock is used to order access to the switch_fn.
65 */
66static int mdio_mux_write(struct mii_bus *bus, int phy_id,
67 int regnum, u16 val)
68{
69 struct mdio_mux_child_bus *cb = bus->priv;
70 struct mdio_mux_parent_bus *pb = cb->parent;
71
72 int r;
73
74 mutex_lock(&pb->mii_bus->mdio_lock);
75 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
76 if (r)
77 goto out;
78
79 pb->current_child = cb->bus_number;
80
81 r = pb->mii_bus->write(pb->mii_bus, phy_id, regnum, val);
82out:
83 mutex_unlock(&pb->mii_bus->mdio_lock);
84
85 return r;
86}
87
88static int parent_count;
89
90int mdio_mux_init(struct device *dev,
91 int (*switch_fn)(int cur, int desired, void *data),
92 void **mux_handle,
93 void *data)
94{
95 struct device_node *parent_bus_node;
96 struct device_node *child_bus_node;
97 int r, ret_val;
98 struct mii_bus *parent_bus;
99 struct mdio_mux_parent_bus *pb;
100 struct mdio_mux_child_bus *cb;
101
102 if (!dev->of_node)
103 return -ENODEV;
104
105 parent_bus_node = of_parse_phandle(dev->of_node, "mdio-parent-bus", 0);
106
107 if (!parent_bus_node)
108 return -ENODEV;
109
110 parent_bus = of_mdio_find_bus(parent_bus_node);
111 if (parent_bus == NULL) {
112 ret_val = -EPROBE_DEFER;
113 goto err_parent_bus;
114 }
115
116 pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL);
117 if (pb == NULL) {
118 ret_val = -ENOMEM;
119 goto err_parent_bus;
120 }
121
122 pb->switch_data = data;
123 pb->switch_fn = switch_fn;
124 pb->current_child = -1;
125 pb->parent_id = parent_count++;
126 pb->mii_bus = parent_bus;
127
128 ret_val = -ENODEV;
129 for_each_child_of_node(dev->of_node, child_bus_node) {
130 u32 v;
131
132 r = of_property_read_u32(child_bus_node, "reg", &v);
133 if (r)
134 continue;
135
136 cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
137 if (cb == NULL) {
138 dev_err(dev,
139 "Error: Failed to allocate memory for child\n");
140 ret_val = -ENOMEM;
141 break;
142 }
143 cb->bus_number = v;
144 cb->parent = pb;
145 cb->mii_bus = mdiobus_alloc();
146 cb->mii_bus->priv = cb;
147
148 cb->mii_bus->irq = cb->phy_irq;
149 cb->mii_bus->name = "mdio_mux";
150 snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x",
151 pb->parent_id, v);
152 cb->mii_bus->parent = dev;
153 cb->mii_bus->read = mdio_mux_read;
154 cb->mii_bus->write = mdio_mux_write;
155 r = of_mdiobus_register(cb->mii_bus, child_bus_node);
156 if (r) {
157 mdiobus_free(cb->mii_bus);
158 devm_kfree(dev, cb);
159 } else {
160 of_node_get(child_bus_node);
161 cb->next = pb->children;
162 pb->children = cb;
163 }
164 }
165 if (pb->children) {
166 *mux_handle = pb;
167 dev_info(dev, "Version " DRV_VERSION "\n");
168 return 0;
169 }
170err_parent_bus:
171 of_node_put(parent_bus_node);
172 return ret_val;
173}
174EXPORT_SYMBOL_GPL(mdio_mux_init);
175
176void mdio_mux_uninit(void *mux_handle)
177{
178 struct mdio_mux_parent_bus *pb = mux_handle;
179 struct mdio_mux_child_bus *cb = pb->children;
180
181 while (cb) {
182 mdiobus_unregister(cb->mii_bus);
183 mdiobus_free(cb->mii_bus);
184 cb = cb->next;
185 }
186}
187EXPORT_SYMBOL_GPL(mdio_mux_uninit);
188
189MODULE_DESCRIPTION(DRV_DESCRIPTION);
190MODULE_VERSION(DRV_VERSION);
191MODULE_AUTHOR("David Daney");
192MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 8985cc62cf41..683ef1ce5519 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -88,6 +88,38 @@ static struct class mdio_bus_class = {
88 .dev_release = mdiobus_release, 88 .dev_release = mdiobus_release,
89}; 89};
90 90
91#if IS_ENABLED(CONFIG_OF_MDIO)
92/* Helper function for of_mdio_find_bus */
93static int of_mdio_bus_match(struct device *dev, void *mdio_bus_np)
94{
95 return dev->of_node == mdio_bus_np;
96}
97/**
98 * of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
99 * @mdio_np: Pointer to the mii_bus.
100 *
101 * Returns a pointer to the mii_bus, or NULL if none found.
102 *
103 * Because the association of a device_node and mii_bus is made via
104 * of_mdiobus_register(), the mii_bus cannot be found before it is
105 * registered with of_mdiobus_register().
106 *
107 */
108struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np)
109{
110 struct device *d;
111
112 if (!mdio_bus_np)
113 return NULL;
114
115 d = class_find_device(&mdio_bus_class, NULL, mdio_bus_np,
116 of_mdio_bus_match);
117
118 return d ? to_mii_bus(d) : NULL;
119}
120EXPORT_SYMBOL(of_mdio_find_bus);
121#endif
122
91/** 123/**
92 * mdiobus_register - bring up all the PHYs on a given bus and attach them to bus 124 * mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
93 * @bus: target mii_bus 125 * @bus: target mii_bus
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index e8c42d6a7d1c..de86a5582224 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -207,7 +207,7 @@ static struct phy_device* phy_device_create(struct mii_bus *bus,
207 * Description: Reads the ID registers of the PHY at @addr on the 207 * Description: Reads the ID registers of the PHY at @addr on the
208 * @bus, stores it in @phy_id and returns zero on success. 208 * @bus, stores it in @phy_id and returns zero on success.
209 */ 209 */
210int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id) 210static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
211{ 211{
212 int phy_reg; 212 int phy_reg;
213 213
@@ -230,7 +230,6 @@ int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
230 230
231 return 0; 231 return 0;
232} 232}
233EXPORT_SYMBOL(get_phy_id);
234 233
235/** 234/**
236 * get_phy_device - reads the specified PHY device and returns its @phy_device struct 235 * get_phy_device - reads the specified PHY device and returns its @phy_device struct
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 116a2dd7c879..4eb98bc52a0a 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -348,7 +348,6 @@ static int __devexit ks8995_remove(struct spi_device *spi)
348static struct spi_driver ks8995_driver = { 348static struct spi_driver ks8995_driver = {
349 .driver = { 349 .driver = {
350 .name = "spi-ks8995", 350 .name = "spi-ks8995",
351 .bus = &spi_bus_type,
352 .owner = THIS_MODULE, 351 .owner = THIS_MODULE,
353 }, 352 },
354 .probe = ks8995_probe, 353 .probe = ks8995_probe,
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index af95a98fd86f..a031f6b456b4 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -613,7 +613,7 @@ ppp_async_encode(struct asyncppp *ap)
613 *buf++ = PPP_FLAG; 613 *buf++ = PPP_FLAG;
614 ap->olim = buf; 614 ap->olim = buf;
615 615
616 kfree_skb(ap->tpkt); 616 consume_skb(ap->tpkt);
617 ap->tpkt = NULL; 617 ap->tpkt = NULL;
618 return 1; 618 return 1;
619} 619}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 33f8c51968b6..5c0557222f20 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -235,7 +235,7 @@ struct ppp_net {
235/* Prototypes. */ 235/* Prototypes. */
236static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, 236static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
237 struct file *file, unsigned int cmd, unsigned long arg); 237 struct file *file, unsigned int cmd, unsigned long arg);
238static int ppp_xmit_process(struct ppp *ppp); 238static void ppp_xmit_process(struct ppp *ppp);
239static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); 239static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
240static void ppp_push(struct ppp *ppp); 240static void ppp_push(struct ppp *ppp);
241static void ppp_channel_push(struct channel *pch); 241static void ppp_channel_push(struct channel *pch);
@@ -969,8 +969,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
969 put_unaligned_be16(proto, pp); 969 put_unaligned_be16(proto, pp);
970 970
971 skb_queue_tail(&ppp->file.xq, skb); 971 skb_queue_tail(&ppp->file.xq, skb);
972 if (!ppp_xmit_process(ppp)) 972 ppp_xmit_process(ppp);
973 netif_stop_queue(dev);
974 return NETDEV_TX_OK; 973 return NETDEV_TX_OK;
975 974
976 outf: 975 outf:
@@ -1048,11 +1047,10 @@ static void ppp_setup(struct net_device *dev)
1048 * Called to do any work queued up on the transmit side 1047 * Called to do any work queued up on the transmit side
1049 * that can now be done. 1048 * that can now be done.
1050 */ 1049 */
1051static int 1050static void
1052ppp_xmit_process(struct ppp *ppp) 1051ppp_xmit_process(struct ppp *ppp)
1053{ 1052{
1054 struct sk_buff *skb; 1053 struct sk_buff *skb;
1055 int ret = 0;
1056 1054
1057 ppp_xmit_lock(ppp); 1055 ppp_xmit_lock(ppp);
1058 if (!ppp->closing) { 1056 if (!ppp->closing) {
@@ -1062,13 +1060,12 @@ ppp_xmit_process(struct ppp *ppp)
1062 ppp_send_frame(ppp, skb); 1060 ppp_send_frame(ppp, skb);
1063 /* If there's no work left to do, tell the core net 1061 /* If there's no work left to do, tell the core net
1064 code that we can accept some more. */ 1062 code that we can accept some more. */
1065 if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) { 1063 if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
1066 netif_wake_queue(ppp->dev); 1064 netif_wake_queue(ppp->dev);
1067 ret = 1; 1065 else
1068 } 1066 netif_stop_queue(ppp->dev);
1069 } 1067 }
1070 ppp_xmit_unlock(ppp); 1068 ppp_xmit_unlock(ppp);
1071 return ret;
1072} 1069}
1073 1070
1074static inline struct sk_buff * 1071static inline struct sk_buff *
@@ -1095,13 +1092,13 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
1095 new_skb->data, skb->len + 2, 1092 new_skb->data, skb->len + 2,
1096 compressor_skb_size); 1093 compressor_skb_size);
1097 if (len > 0 && (ppp->flags & SC_CCP_UP)) { 1094 if (len > 0 && (ppp->flags & SC_CCP_UP)) {
1098 kfree_skb(skb); 1095 consume_skb(skb);
1099 skb = new_skb; 1096 skb = new_skb;
1100 skb_put(skb, len); 1097 skb_put(skb, len);
1101 skb_pull(skb, 2); /* pull off A/C bytes */ 1098 skb_pull(skb, 2); /* pull off A/C bytes */
1102 } else if (len == 0) { 1099 } else if (len == 0) {
1103 /* didn't compress, or CCP not up yet */ 1100 /* didn't compress, or CCP not up yet */
1104 kfree_skb(new_skb); 1101 consume_skb(new_skb);
1105 new_skb = skb; 1102 new_skb = skb;
1106 } else { 1103 } else {
1107 /* 1104 /*
@@ -1115,7 +1112,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
1115 if (net_ratelimit()) 1112 if (net_ratelimit())
1116 netdev_err(ppp->dev, "ppp: compressor dropped pkt\n"); 1113 netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
1117 kfree_skb(skb); 1114 kfree_skb(skb);
1118 kfree_skb(new_skb); 1115 consume_skb(new_skb);
1119 new_skb = NULL; 1116 new_skb = NULL;
1120 } 1117 }
1121 return new_skb; 1118 return new_skb;
@@ -1181,7 +1178,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1181 !(ppp->flags & SC_NO_TCP_CCID)); 1178 !(ppp->flags & SC_NO_TCP_CCID));
1182 if (cp == skb->data + 2) { 1179 if (cp == skb->data + 2) {
1183 /* didn't compress */ 1180 /* didn't compress */
1184 kfree_skb(new_skb); 1181 consume_skb(new_skb);
1185 } else { 1182 } else {
1186 if (cp[0] & SL_TYPE_COMPRESSED_TCP) { 1183 if (cp[0] & SL_TYPE_COMPRESSED_TCP) {
1187 proto = PPP_VJC_COMP; 1184 proto = PPP_VJC_COMP;
@@ -1190,7 +1187,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1190 proto = PPP_VJC_UNCOMP; 1187 proto = PPP_VJC_UNCOMP;
1191 cp[0] = skb->data[2]; 1188 cp[0] = skb->data[2];
1192 } 1189 }
1193 kfree_skb(skb); 1190 consume_skb(skb);
1194 skb = new_skb; 1191 skb = new_skb;
1195 cp = skb_put(skb, len + 2); 1192 cp = skb_put(skb, len + 2);
1196 cp[0] = 0; 1193 cp[0] = 0;
@@ -1706,7 +1703,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1706 } 1703 }
1707 skb_reserve(ns, 2); 1704 skb_reserve(ns, 2);
1708 skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len); 1705 skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
1709 kfree_skb(skb); 1706 consume_skb(skb);
1710 skb = ns; 1707 skb = ns;
1711 } 1708 }
1712 else 1709 else
@@ -1854,7 +1851,7 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
1854 goto err; 1851 goto err;
1855 } 1852 }
1856 1853
1857 kfree_skb(skb); 1854 consume_skb(skb);
1858 skb = ns; 1855 skb = ns;
1859 skb_put(skb, len); 1856 skb_put(skb, len);
1860 skb_pull(skb, 2); /* pull off the A/C bytes */ 1857 skb_pull(skb, 2); /* pull off the A/C bytes */
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 55e466c511d5..1a12033d2efa 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -588,7 +588,7 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
588 skb_reserve(npkt,2); 588 skb_reserve(npkt,2);
589 skb_copy_from_linear_data(skb, 589 skb_copy_from_linear_data(skb,
590 skb_put(npkt, skb->len), skb->len); 590 skb_put(npkt, skb->len), skb->len);
591 kfree_skb(skb); 591 consume_skb(skb);
592 skb = npkt; 592 skb = npkt;
593 } 593 }
594 skb_push(skb,2); 594 skb_push(skb,2);
@@ -656,7 +656,7 @@ ppp_sync_push(struct syncppp *ap)
656 if (sent < ap->tpkt->len) { 656 if (sent < ap->tpkt->len) {
657 tty_stuffed = 1; 657 tty_stuffed = 1;
658 } else { 658 } else {
659 kfree_skb(ap->tpkt); 659 consume_skb(ap->tpkt);
660 ap->tpkt = NULL; 660 ap->tpkt = NULL;
661 clear_bit(XMIT_FULL, &ap->xmit_flags); 661 clear_bit(XMIT_FULL, &ap->xmit_flags);
662 done = 1; 662 done = 1;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 2fa1a9b6f498..cbf7047decc0 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -201,7 +201,7 @@ static int __set_item(struct pppoe_net *pn, struct pppox_sock *po)
201 return 0; 201 return 0;
202} 202}
203 203
204static struct pppox_sock *__delete_item(struct pppoe_net *pn, __be16 sid, 204static void __delete_item(struct pppoe_net *pn, __be16 sid,
205 char *addr, int ifindex) 205 char *addr, int ifindex)
206{ 206{
207 int hash = hash_item(sid, addr); 207 int hash = hash_item(sid, addr);
@@ -220,8 +220,6 @@ static struct pppox_sock *__delete_item(struct pppoe_net *pn, __be16 sid,
220 src = &ret->next; 220 src = &ret->next;
221 ret = ret->next; 221 ret = ret->next;
222 } 222 }
223
224 return ret;
225} 223}
226 224
227/********************************************************************** 225/**********************************************************************
@@ -264,16 +262,12 @@ static inline struct pppox_sock *get_item_by_addr(struct net *net,
264 return pppox_sock; 262 return pppox_sock;
265} 263}
266 264
267static inline struct pppox_sock *delete_item(struct pppoe_net *pn, __be16 sid, 265static inline void delete_item(struct pppoe_net *pn, __be16 sid,
268 char *addr, int ifindex) 266 char *addr, int ifindex)
269{ 267{
270 struct pppox_sock *ret;
271
272 write_lock_bh(&pn->hash_lock); 268 write_lock_bh(&pn->hash_lock);
273 ret = __delete_item(pn, sid, addr, ifindex); 269 __delete_item(pn, sid, addr, ifindex);
274 write_unlock_bh(&pn->hash_lock); 270 write_unlock_bh(&pn->hash_lock);
275
276 return ret;
277} 271}
278 272
279/*************************************************************************** 273/***************************************************************************
@@ -990,8 +984,10 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
990 if (skb) { 984 if (skb) {
991 total_len = min_t(size_t, total_len, skb->len); 985 total_len = min_t(size_t, total_len, skb->len);
992 error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len); 986 error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
993 if (error == 0) 987 if (error == 0) {
994 error = total_len; 988 consume_skb(skb);
989 return total_len;
990 }
995 } 991 }
996 992
997 kfree_skb(skb); 993 kfree_skb(skb);
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 885dbdd9c39e..1c98321b56cc 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -116,8 +116,8 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
116 int i; 116 int i;
117 117
118 rcu_read_lock(); 118 rcu_read_lock();
119 for (i = find_next_bit(callid_bitmap, MAX_CALLID, 1); i < MAX_CALLID; 119 i = 1;
120 i = find_next_bit(callid_bitmap, MAX_CALLID, i + 1)) { 120 for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
121 sock = rcu_dereference(callid_sock[i]); 121 sock = rcu_dereference(callid_sock[i]);
122 if (!sock) 122 if (!sock)
123 continue; 123 continue;
@@ -209,7 +209,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
209 } 209 }
210 if (skb->sk) 210 if (skb->sk)
211 skb_set_owner_w(new_skb, skb->sk); 211 skb_set_owner_w(new_skb, skb->sk);
212 kfree_skb(skb); 212 consume_skb(skb);
213 skb = new_skb; 213 skb = new_skb;
214 } 214 }
215 215
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
index 248a144033ca..89024d5fc33a 100644
--- a/drivers/net/team/Kconfig
+++ b/drivers/net/team/Kconfig
@@ -40,4 +40,15 @@ config NET_TEAM_MODE_ACTIVEBACKUP
40 To compile this team mode as a module, choose M here: the module 40 To compile this team mode as a module, choose M here: the module
41 will be called team_mode_activebackup. 41 will be called team_mode_activebackup.
42 42
43config NET_TEAM_MODE_LOADBALANCE
44 tristate "Load-balance mode support"
45 depends on NET_TEAM
46 ---help---
47 This mode provides load balancing functionality. Tx port selection
48 is done using BPF function set up from userspace (bpf_hash_func
49 option)
50
51 To compile this team mode as a module, choose M here: the module
52 will be called team_mode_loadbalance.
53
43endif # NET_TEAM 54endif # NET_TEAM
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
index 85f2028a87af..fb9f4c1c51ff 100644
--- a/drivers/net/team/Makefile
+++ b/drivers/net/team/Makefile
@@ -5,3 +5,4 @@
5obj-$(CONFIG_NET_TEAM) += team.o 5obj-$(CONFIG_NET_TEAM) += team.o
6obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o 6obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
7obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o 7obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
8obj-$(CONFIG_NET_TEAM_MODE_LOADBALANCE) += team_mode_loadbalance.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 8f81805c6825..c61ae35a53ce 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -65,7 +65,7 @@ static int __set_port_mac(struct net_device *port_dev,
65 return dev_set_mac_address(port_dev, &addr); 65 return dev_set_mac_address(port_dev, &addr);
66} 66}
67 67
68int team_port_set_orig_mac(struct team_port *port) 68static int team_port_set_orig_mac(struct team_port *port)
69{ 69{
70 return __set_port_mac(port->dev, port->orig.dev_addr); 70 return __set_port_mac(port->dev, port->orig.dev_addr);
71} 71}
@@ -76,12 +76,26 @@ int team_port_set_team_mac(struct team_port *port)
76} 76}
77EXPORT_SYMBOL(team_port_set_team_mac); 77EXPORT_SYMBOL(team_port_set_team_mac);
78 78
79static void team_refresh_port_linkup(struct team_port *port)
80{
81 port->linkup = port->user.linkup_enabled ? port->user.linkup :
82 port->state.linkup;
83}
79 84
80/******************* 85/*******************
81 * Options handling 86 * Options handling
82 *******************/ 87 *******************/
83 88
84struct team_option *__team_find_option(struct team *team, const char *opt_name) 89struct team_option_inst { /* One for each option instance */
90 struct list_head list;
91 struct team_option *option;
92 struct team_port *port; /* != NULL if per-port */
93 bool changed;
94 bool removed;
95};
96
97static struct team_option *__team_find_option(struct team *team,
98 const char *opt_name)
85{ 99{
86 struct team_option *option; 100 struct team_option *option;
87 101
@@ -92,9 +106,121 @@ struct team_option *__team_find_option(struct team *team, const char *opt_name)
92 return NULL; 106 return NULL;
93} 107}
94 108
95int __team_options_register(struct team *team, 109static int __team_option_inst_add(struct team *team, struct team_option *option,
96 const struct team_option *option, 110 struct team_port *port)
97 size_t option_count) 111{
112 struct team_option_inst *opt_inst;
113
114 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
115 if (!opt_inst)
116 return -ENOMEM;
117 opt_inst->option = option;
118 opt_inst->port = port;
119 opt_inst->changed = true;
120 opt_inst->removed = false;
121 list_add_tail(&opt_inst->list, &team->option_inst_list);
122 return 0;
123}
124
125static void __team_option_inst_del(struct team_option_inst *opt_inst)
126{
127 list_del(&opt_inst->list);
128 kfree(opt_inst);
129}
130
131static void __team_option_inst_del_option(struct team *team,
132 struct team_option *option)
133{
134 struct team_option_inst *opt_inst, *tmp;
135
136 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
137 if (opt_inst->option == option)
138 __team_option_inst_del(opt_inst);
139 }
140}
141
142static int __team_option_inst_add_option(struct team *team,
143 struct team_option *option)
144{
145 struct team_port *port;
146 int err;
147
148 if (!option->per_port)
149 return __team_option_inst_add(team, option, 0);
150
151 list_for_each_entry(port, &team->port_list, list) {
152 err = __team_option_inst_add(team, option, port);
153 if (err)
154 goto inst_del_option;
155 }
156 return 0;
157
158inst_del_option:
159 __team_option_inst_del_option(team, option);
160 return err;
161}
162
163static void __team_option_inst_mark_removed_option(struct team *team,
164 struct team_option *option)
165{
166 struct team_option_inst *opt_inst;
167
168 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
169 if (opt_inst->option == option) {
170 opt_inst->changed = true;
171 opt_inst->removed = true;
172 }
173 }
174}
175
176static void __team_option_inst_del_port(struct team *team,
177 struct team_port *port)
178{
179 struct team_option_inst *opt_inst, *tmp;
180
181 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
182 if (opt_inst->option->per_port &&
183 opt_inst->port == port)
184 __team_option_inst_del(opt_inst);
185 }
186}
187
188static int __team_option_inst_add_port(struct team *team,
189 struct team_port *port)
190{
191 struct team_option *option;
192 int err;
193
194 list_for_each_entry(option, &team->option_list, list) {
195 if (!option->per_port)
196 continue;
197 err = __team_option_inst_add(team, option, port);
198 if (err)
199 goto inst_del_port;
200 }
201 return 0;
202
203inst_del_port:
204 __team_option_inst_del_port(team, port);
205 return err;
206}
207
208static void __team_option_inst_mark_removed_port(struct team *team,
209 struct team_port *port)
210{
211 struct team_option_inst *opt_inst;
212
213 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
214 if (opt_inst->port == port) {
215 opt_inst->changed = true;
216 opt_inst->removed = true;
217 }
218 }
219}
220
221static int __team_options_register(struct team *team,
222 const struct team_option *option,
223 size_t option_count)
98{ 224{
99 int i; 225 int i;
100 struct team_option **dst_opts; 226 struct team_option **dst_opts;
@@ -107,26 +233,32 @@ int __team_options_register(struct team *team,
107 for (i = 0; i < option_count; i++, option++) { 233 for (i = 0; i < option_count; i++, option++) {
108 if (__team_find_option(team, option->name)) { 234 if (__team_find_option(team, option->name)) {
109 err = -EEXIST; 235 err = -EEXIST;
110 goto rollback; 236 goto alloc_rollback;
111 } 237 }
112 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL); 238 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
113 if (!dst_opts[i]) { 239 if (!dst_opts[i]) {
114 err = -ENOMEM; 240 err = -ENOMEM;
115 goto rollback; 241 goto alloc_rollback;
116 } 242 }
117 } 243 }
118 244
119 for (i = 0; i < option_count; i++) { 245 for (i = 0; i < option_count; i++) {
120 dst_opts[i]->changed = true; 246 err = __team_option_inst_add_option(team, dst_opts[i]);
121 dst_opts[i]->removed = false; 247 if (err)
248 goto inst_rollback;
122 list_add_tail(&dst_opts[i]->list, &team->option_list); 249 list_add_tail(&dst_opts[i]->list, &team->option_list);
123 } 250 }
124 251
125 kfree(dst_opts); 252 kfree(dst_opts);
126 return 0; 253 return 0;
127 254
128rollback: 255inst_rollback:
129 for (i = 0; i < option_count; i++) 256 for (i--; i >= 0; i--)
257 __team_option_inst_del_option(team, dst_opts[i]);
258
259 i = option_count - 1;
260alloc_rollback:
261 for (i--; i >= 0; i--)
130 kfree(dst_opts[i]); 262 kfree(dst_opts[i]);
131 263
132 kfree(dst_opts); 264 kfree(dst_opts);
@@ -143,10 +275,8 @@ static void __team_options_mark_removed(struct team *team,
143 struct team_option *del_opt; 275 struct team_option *del_opt;
144 276
145 del_opt = __team_find_option(team, option->name); 277 del_opt = __team_find_option(team, option->name);
146 if (del_opt) { 278 if (del_opt)
147 del_opt->changed = true; 279 __team_option_inst_mark_removed_option(team, del_opt);
148 del_opt->removed = true;
149 }
150 } 280 }
151} 281}
152 282
@@ -161,6 +291,7 @@ static void __team_options_unregister(struct team *team,
161 291
162 del_opt = __team_find_option(team, option->name); 292 del_opt = __team_find_option(team, option->name);
163 if (del_opt) { 293 if (del_opt) {
294 __team_option_inst_del_option(team, del_opt);
164 list_del(&del_opt->list); 295 list_del(&del_opt->list);
165 kfree(del_opt); 296 kfree(del_opt);
166 } 297 }
@@ -193,22 +324,42 @@ void team_options_unregister(struct team *team,
193} 324}
194EXPORT_SYMBOL(team_options_unregister); 325EXPORT_SYMBOL(team_options_unregister);
195 326
196static int team_option_get(struct team *team, struct team_option *option, 327static int team_option_port_add(struct team *team, struct team_port *port)
197 void *arg) 328{
329 int err;
330
331 err = __team_option_inst_add_port(team, port);
332 if (err)
333 return err;
334 __team_options_change_check(team);
335 return 0;
336}
337
338static void team_option_port_del(struct team *team, struct team_port *port)
339{
340 __team_option_inst_mark_removed_port(team, port);
341 __team_options_change_check(team);
342 __team_option_inst_del_port(team, port);
343}
344
345static int team_option_get(struct team *team,
346 struct team_option_inst *opt_inst,
347 struct team_gsetter_ctx *ctx)
198{ 348{
199 return option->getter(team, arg); 349 return opt_inst->option->getter(team, ctx);
200} 350}
201 351
202static int team_option_set(struct team *team, struct team_option *option, 352static int team_option_set(struct team *team,
203 void *arg) 353 struct team_option_inst *opt_inst,
354 struct team_gsetter_ctx *ctx)
204{ 355{
205 int err; 356 int err;
206 357
207 err = option->setter(team, arg); 358 err = opt_inst->option->setter(team, ctx);
208 if (err) 359 if (err)
209 return err; 360 return err;
210 361
211 option->changed = true; 362 opt_inst->changed = true;
212 __team_options_change_check(team); 363 __team_options_change_check(team);
213 return err; 364 return err;
214} 365}
@@ -408,6 +559,8 @@ static int team_change_mode(struct team *team, const char *kind)
408 * Rx path frame handler 559 * Rx path frame handler
409 ************************/ 560 ************************/
410 561
562static bool team_port_enabled(struct team_port *port);
563
411/* note: already called with rcu_read_lock */ 564/* note: already called with rcu_read_lock */
412static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) 565static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
413{ 566{
@@ -424,8 +577,12 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
424 577
425 port = team_port_get_rcu(skb->dev); 578 port = team_port_get_rcu(skb->dev);
426 team = port->team; 579 team = port->team;
427 580 if (!team_port_enabled(port)) {
428 res = team->ops.receive(team, port, skb); 581 /* allow exact match delivery for disabled ports */
582 res = RX_HANDLER_EXACT;
583 } else {
584 res = team->ops.receive(team, port, skb);
585 }
429 if (res == RX_HANDLER_ANOTHER) { 586 if (res == RX_HANDLER_ANOTHER) {
430 struct team_pcpu_stats *pcpu_stats; 587 struct team_pcpu_stats *pcpu_stats;
431 588
@@ -461,17 +618,25 @@ static bool team_port_find(const struct team *team,
461 return false; 618 return false;
462} 619}
463 620
621static bool team_port_enabled(struct team_port *port)
622{
623 return port->index != -1;
624}
625
464/* 626/*
465 * Add/delete port to the team port list. Write guarded by rtnl_lock. 627 * Enable/disable port by adding to enabled port hashlist and setting
466 * Takes care of correct port->index setup (might be racy). 628 * port->index (Might be racy so reader could see incorrect ifindex when
629 * processing a flying packet, but that is not a problem). Write guarded
630 * by team->lock.
467 */ 631 */
468static void team_port_list_add_port(struct team *team, 632static void team_port_enable(struct team *team,
469 struct team_port *port) 633 struct team_port *port)
470{ 634{
471 port->index = team->port_count++; 635 if (team_port_enabled(port))
636 return;
637 port->index = team->en_port_count++;
472 hlist_add_head_rcu(&port->hlist, 638 hlist_add_head_rcu(&port->hlist,
473 team_port_index_hash(team, port->index)); 639 team_port_index_hash(team, port->index));
474 list_add_tail_rcu(&port->list, &team->port_list);
475} 640}
476 641
477static void __reconstruct_port_hlist(struct team *team, int rm_index) 642static void __reconstruct_port_hlist(struct team *team, int rm_index)
@@ -479,7 +644,7 @@ static void __reconstruct_port_hlist(struct team *team, int rm_index)
479 int i; 644 int i;
480 struct team_port *port; 645 struct team_port *port;
481 646
482 for (i = rm_index + 1; i < team->port_count; i++) { 647 for (i = rm_index + 1; i < team->en_port_count; i++) {
483 port = team_get_port_by_index(team, i); 648 port = team_get_port_by_index(team, i);
484 hlist_del_rcu(&port->hlist); 649 hlist_del_rcu(&port->hlist);
485 port->index--; 650 port->index--;
@@ -488,15 +653,17 @@ static void __reconstruct_port_hlist(struct team *team, int rm_index)
488 } 653 }
489} 654}
490 655
491static void team_port_list_del_port(struct team *team, 656static void team_port_disable(struct team *team,
492 struct team_port *port) 657 struct team_port *port)
493{ 658{
494 int rm_index = port->index; 659 int rm_index = port->index;
495 660
661 if (!team_port_enabled(port))
662 return;
496 hlist_del_rcu(&port->hlist); 663 hlist_del_rcu(&port->hlist);
497 list_del_rcu(&port->list);
498 __reconstruct_port_hlist(team, rm_index); 664 __reconstruct_port_hlist(team, rm_index);
499 team->port_count--; 665 team->en_port_count--;
666 port->index = -1;
500} 667}
501 668
502#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ 669#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -642,7 +809,16 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
642 goto err_handler_register; 809 goto err_handler_register;
643 } 810 }
644 811
645 team_port_list_add_port(team, port); 812 err = team_option_port_add(team, port);
813 if (err) {
814 netdev_err(dev, "Device %s failed to add per-port options\n",
815 portname);
816 goto err_option_port_add;
817 }
818
819 port->index = -1;
820 team_port_enable(team, port);
821 list_add_tail_rcu(&port->list, &team->port_list);
646 team_adjust_ops(team); 822 team_adjust_ops(team);
647 __team_compute_features(team); 823 __team_compute_features(team);
648 __team_port_change_check(port, !!netif_carrier_ok(port_dev)); 824 __team_port_change_check(port, !!netif_carrier_ok(port_dev));
@@ -651,6 +827,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
651 827
652 return 0; 828 return 0;
653 829
830err_option_port_add:
831 netdev_rx_handler_unregister(port_dev);
832
654err_handler_register: 833err_handler_register:
655 netdev_set_master(port_dev, NULL); 834 netdev_set_master(port_dev, NULL);
656 835
@@ -688,8 +867,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
688 867
689 port->removed = true; 868 port->removed = true;
690 __team_port_change_check(port, false); 869 __team_port_change_check(port, false);
691 team_port_list_del_port(team, port); 870 team_port_disable(team, port);
871 list_del_rcu(&port->list);
692 team_adjust_ops(team); 872 team_adjust_ops(team);
873 team_option_port_del(team, port);
693 netdev_rx_handler_unregister(port_dev); 874 netdev_rx_handler_unregister(port_dev);
694 netdev_set_master(port_dev, NULL); 875 netdev_set_master(port_dev, NULL);
695 vlan_vids_del_by_dev(port_dev, dev); 876 vlan_vids_del_by_dev(port_dev, dev);
@@ -712,19 +893,66 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
712 893
713static const char team_no_mode_kind[] = "*NOMODE*"; 894static const char team_no_mode_kind[] = "*NOMODE*";
714 895
715static int team_mode_option_get(struct team *team, void *arg) 896static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
897{
898 ctx->data.str_val = team->mode ? team->mode->kind : team_no_mode_kind;
899 return 0;
900}
901
902static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
903{
904 return team_change_mode(team, ctx->data.str_val);
905}
906
907static int team_port_en_option_get(struct team *team,
908 struct team_gsetter_ctx *ctx)
909{
910 ctx->data.bool_val = team_port_enabled(ctx->port);
911 return 0;
912}
913
914static int team_port_en_option_set(struct team *team,
915 struct team_gsetter_ctx *ctx)
916{
917 if (ctx->data.bool_val)
918 team_port_enable(team, ctx->port);
919 else
920 team_port_disable(team, ctx->port);
921 return 0;
922}
923
924static int team_user_linkup_option_get(struct team *team,
925 struct team_gsetter_ctx *ctx)
926{
927 ctx->data.bool_val = ctx->port->user.linkup;
928 return 0;
929}
930
931static int team_user_linkup_option_set(struct team *team,
932 struct team_gsetter_ctx *ctx)
933{
934 ctx->port->user.linkup = ctx->data.bool_val;
935 team_refresh_port_linkup(ctx->port);
936 return 0;
937}
938
939static int team_user_linkup_en_option_get(struct team *team,
940 struct team_gsetter_ctx *ctx)
716{ 941{
717 const char **str = arg; 942 struct team_port *port = ctx->port;
718 943
719 *str = team->mode ? team->mode->kind : team_no_mode_kind; 944 ctx->data.bool_val = port->user.linkup_enabled;
720 return 0; 945 return 0;
721} 946}
722 947
723static int team_mode_option_set(struct team *team, void *arg) 948static int team_user_linkup_en_option_set(struct team *team,
949 struct team_gsetter_ctx *ctx)
724{ 950{
725 const char **str = arg; 951 struct team_port *port = ctx->port;
726 952
727 return team_change_mode(team, *str); 953 port->user.linkup_enabled = ctx->data.bool_val;
954 team_refresh_port_linkup(ctx->port);
955 return 0;
728} 956}
729 957
730static const struct team_option team_options[] = { 958static const struct team_option team_options[] = {
@@ -734,6 +962,27 @@ static const struct team_option team_options[] = {
734 .getter = team_mode_option_get, 962 .getter = team_mode_option_get,
735 .setter = team_mode_option_set, 963 .setter = team_mode_option_set,
736 }, 964 },
965 {
966 .name = "enabled",
967 .type = TEAM_OPTION_TYPE_BOOL,
968 .per_port = true,
969 .getter = team_port_en_option_get,
970 .setter = team_port_en_option_set,
971 },
972 {
973 .name = "user_linkup",
974 .type = TEAM_OPTION_TYPE_BOOL,
975 .per_port = true,
976 .getter = team_user_linkup_option_get,
977 .setter = team_user_linkup_option_set,
978 },
979 {
980 .name = "user_linkup_enabled",
981 .type = TEAM_OPTION_TYPE_BOOL,
982 .per_port = true,
983 .getter = team_user_linkup_en_option_get,
984 .setter = team_user_linkup_en_option_set,
985 },
737}; 986};
738 987
739static int team_init(struct net_device *dev) 988static int team_init(struct net_device *dev)
@@ -750,12 +999,13 @@ static int team_init(struct net_device *dev)
750 return -ENOMEM; 999 return -ENOMEM;
751 1000
752 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) 1001 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
753 INIT_HLIST_HEAD(&team->port_hlist[i]); 1002 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
754 INIT_LIST_HEAD(&team->port_list); 1003 INIT_LIST_HEAD(&team->port_list);
755 1004
756 team_adjust_ops(team); 1005 team_adjust_ops(team);
757 1006
758 INIT_LIST_HEAD(&team->option_list); 1007 INIT_LIST_HEAD(&team->option_list);
1008 INIT_LIST_HEAD(&team->option_inst_list);
759 err = team_options_register(team, team_options, ARRAY_SIZE(team_options)); 1009 err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
760 if (err) 1010 if (err)
761 goto err_options_register; 1011 goto err_options_register;
@@ -1145,10 +1395,7 @@ team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
1145 }, 1395 },
1146 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG }, 1396 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
1147 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 }, 1397 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
1148 [TEAM_ATTR_OPTION_DATA] = { 1398 [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
1149 .type = NLA_BINARY,
1150 .len = TEAM_STRING_MAX_LEN,
1151 },
1152}; 1399};
1153 1400
1154static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) 1401static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
@@ -1241,46 +1488,86 @@ static int team_nl_fill_options_get(struct sk_buff *skb,
1241{ 1488{
1242 struct nlattr *option_list; 1489 struct nlattr *option_list;
1243 void *hdr; 1490 void *hdr;
1244 struct team_option *option; 1491 struct team_option_inst *opt_inst;
1492 int err;
1245 1493
1246 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, 1494 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
1247 TEAM_CMD_OPTIONS_GET); 1495 TEAM_CMD_OPTIONS_GET);
1248 if (IS_ERR(hdr)) 1496 if (IS_ERR(hdr))
1249 return PTR_ERR(hdr); 1497 return PTR_ERR(hdr);
1250 1498
1251 NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex); 1499 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
1500 goto nla_put_failure;
1252 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION); 1501 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
1253 if (!option_list) 1502 if (!option_list)
1254 return -EMSGSIZE; 1503 return -EMSGSIZE;
1255 1504
1256 list_for_each_entry(option, &team->option_list, list) { 1505 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
1257 struct nlattr *option_item; 1506 struct nlattr *option_item;
1258 long arg; 1507 struct team_option *option = opt_inst->option;
1508 struct team_gsetter_ctx ctx;
1259 1509
1260 /* Include only changed options if fill all mode is not on */ 1510 /* Include only changed options if fill all mode is not on */
1261 if (!fillall && !option->changed) 1511 if (!fillall && !opt_inst->changed)
1262 continue; 1512 continue;
1263 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION); 1513 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
1264 if (!option_item) 1514 if (!option_item)
1265 goto nla_put_failure; 1515 goto nla_put_failure;
1266 NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name); 1516 if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
1267 if (option->changed) { 1517 goto nla_put_failure;
1268 NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED); 1518 if (opt_inst->changed) {
1269 option->changed = false; 1519 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
1520 goto nla_put_failure;
1521 opt_inst->changed = false;
1270 } 1522 }
1271 if (option->removed) 1523 if (opt_inst->removed &&
1272 NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_REMOVED); 1524 nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
1525 goto nla_put_failure;
1526 if (opt_inst->port &&
1527 nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
1528 opt_inst->port->dev->ifindex))
1529 goto nla_put_failure;
1530 ctx.port = opt_inst->port;
1273 switch (option->type) { 1531 switch (option->type) {
1274 case TEAM_OPTION_TYPE_U32: 1532 case TEAM_OPTION_TYPE_U32:
1275 NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32); 1533 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
1276 team_option_get(team, option, &arg); 1534 goto nla_put_failure;
1277 NLA_PUT_U32(skb, TEAM_ATTR_OPTION_DATA, arg); 1535 err = team_option_get(team, opt_inst, &ctx);
1536 if (err)
1537 goto errout;
1538 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA,
1539 ctx.data.u32_val))
1540 goto nla_put_failure;
1278 break; 1541 break;
1279 case TEAM_OPTION_TYPE_STRING: 1542 case TEAM_OPTION_TYPE_STRING:
1280 NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING); 1543 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
1281 team_option_get(team, option, &arg); 1544 goto nla_put_failure;
1282 NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_DATA, 1545 err = team_option_get(team, opt_inst, &ctx);
1283 (char *) arg); 1546 if (err)
1547 goto errout;
1548 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
1549 ctx.data.str_val))
1550 goto nla_put_failure;
1551 break;
1552 case TEAM_OPTION_TYPE_BINARY:
1553 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
1554 goto nla_put_failure;
1555 err = team_option_get(team, opt_inst, &ctx);
1556 if (err)
1557 goto errout;
1558 if (nla_put(skb, TEAM_ATTR_OPTION_DATA,
1559 ctx.data.bin_val.len, ctx.data.bin_val.ptr))
1560 goto nla_put_failure;
1561 break;
1562 case TEAM_OPTION_TYPE_BOOL:
1563 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
1564 goto nla_put_failure;
1565 err = team_option_get(team, opt_inst, &ctx);
1566 if (err)
1567 goto errout;
1568 if (ctx.data.bool_val &&
1569 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
1570 goto nla_put_failure;
1284 break; 1571 break;
1285 default: 1572 default:
1286 BUG(); 1573 BUG();
@@ -1292,8 +1579,10 @@ static int team_nl_fill_options_get(struct sk_buff *skb,
1292 return genlmsg_end(skb, hdr); 1579 return genlmsg_end(skb, hdr);
1293 1580
1294nla_put_failure: 1581nla_put_failure:
1582 err = -EMSGSIZE;
1583errout:
1295 genlmsg_cancel(skb, hdr); 1584 genlmsg_cancel(skb, hdr);
1296 return -EMSGSIZE; 1585 return err;
1297} 1586}
1298 1587
1299static int team_nl_fill_options_get_all(struct sk_buff *skb, 1588static int team_nl_fill_options_get_all(struct sk_buff *skb,
@@ -1339,9 +1628,12 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1339 } 1628 }
1340 1629
1341 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) { 1630 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
1342 struct nlattr *mode_attrs[TEAM_ATTR_OPTION_MAX + 1]; 1631 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
1632 struct nlattr *attr_port_ifindex;
1633 struct nlattr *attr_data;
1343 enum team_option_type opt_type; 1634 enum team_option_type opt_type;
1344 struct team_option *option; 1635 int opt_port_ifindex = 0; /* != 0 for per-port options */
1636 struct team_option_inst *opt_inst;
1345 char *opt_name; 1637 char *opt_name;
1346 bool opt_found = false; 1638 bool opt_found = false;
1347 1639
@@ -1349,48 +1641,78 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1349 err = -EINVAL; 1641 err = -EINVAL;
1350 goto team_put; 1642 goto team_put;
1351 } 1643 }
1352 err = nla_parse_nested(mode_attrs, TEAM_ATTR_OPTION_MAX, 1644 err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
1353 nl_option, team_nl_option_policy); 1645 nl_option, team_nl_option_policy);
1354 if (err) 1646 if (err)
1355 goto team_put; 1647 goto team_put;
1356 if (!mode_attrs[TEAM_ATTR_OPTION_NAME] || 1648 if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
1357 !mode_attrs[TEAM_ATTR_OPTION_TYPE] || 1649 !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
1358 !mode_attrs[TEAM_ATTR_OPTION_DATA]) {
1359 err = -EINVAL; 1650 err = -EINVAL;
1360 goto team_put; 1651 goto team_put;
1361 } 1652 }
1362 switch (nla_get_u8(mode_attrs[TEAM_ATTR_OPTION_TYPE])) { 1653 switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
1363 case NLA_U32: 1654 case NLA_U32:
1364 opt_type = TEAM_OPTION_TYPE_U32; 1655 opt_type = TEAM_OPTION_TYPE_U32;
1365 break; 1656 break;
1366 case NLA_STRING: 1657 case NLA_STRING:
1367 opt_type = TEAM_OPTION_TYPE_STRING; 1658 opt_type = TEAM_OPTION_TYPE_STRING;
1368 break; 1659 break;
1660 case NLA_BINARY:
1661 opt_type = TEAM_OPTION_TYPE_BINARY;
1662 break;
1663 case NLA_FLAG:
1664 opt_type = TEAM_OPTION_TYPE_BOOL;
1665 break;
1369 default: 1666 default:
1370 goto team_put; 1667 goto team_put;
1371 } 1668 }
1372 1669
1373 opt_name = nla_data(mode_attrs[TEAM_ATTR_OPTION_NAME]); 1670 attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
1374 list_for_each_entry(option, &team->option_list, list) { 1671 if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
1375 long arg; 1672 err = -EINVAL;
1376 struct nlattr *opt_data_attr; 1673 goto team_put;
1674 }
1675
1676 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
1677 attr_port_ifindex = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
1678 if (attr_port_ifindex)
1679 opt_port_ifindex = nla_get_u32(attr_port_ifindex);
1680
1681 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
1682 struct team_option *option = opt_inst->option;
1683 struct team_gsetter_ctx ctx;
1684 int tmp_ifindex;
1377 1685
1686 tmp_ifindex = opt_inst->port ?
1687 opt_inst->port->dev->ifindex : 0;
1378 if (option->type != opt_type || 1688 if (option->type != opt_type ||
1379 strcmp(option->name, opt_name)) 1689 strcmp(option->name, opt_name) ||
1690 tmp_ifindex != opt_port_ifindex)
1380 continue; 1691 continue;
1381 opt_found = true; 1692 opt_found = true;
1382 opt_data_attr = mode_attrs[TEAM_ATTR_OPTION_DATA]; 1693 ctx.port = opt_inst->port;
1383 switch (opt_type) { 1694 switch (opt_type) {
1384 case TEAM_OPTION_TYPE_U32: 1695 case TEAM_OPTION_TYPE_U32:
1385 arg = nla_get_u32(opt_data_attr); 1696 ctx.data.u32_val = nla_get_u32(attr_data);
1386 break; 1697 break;
1387 case TEAM_OPTION_TYPE_STRING: 1698 case TEAM_OPTION_TYPE_STRING:
1388 arg = (long) nla_data(opt_data_attr); 1699 if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
1700 err = -EINVAL;
1701 goto team_put;
1702 }
1703 ctx.data.str_val = nla_data(attr_data);
1704 break;
1705 case TEAM_OPTION_TYPE_BINARY:
1706 ctx.data.bin_val.len = nla_len(attr_data);
1707 ctx.data.bin_val.ptr = nla_data(attr_data);
1708 break;
1709 case TEAM_OPTION_TYPE_BOOL:
1710 ctx.data.bool_val = attr_data ? true : false;
1389 break; 1711 break;
1390 default: 1712 default:
1391 BUG(); 1713 BUG();
1392 } 1714 }
1393 err = team_option_set(team, option, &arg); 1715 err = team_option_set(team, opt_inst, &ctx);
1394 if (err) 1716 if (err)
1395 goto team_put; 1717 goto team_put;
1396 } 1718 }
@@ -1420,7 +1742,8 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
1420 if (IS_ERR(hdr)) 1742 if (IS_ERR(hdr))
1421 return PTR_ERR(hdr); 1743 return PTR_ERR(hdr);
1422 1744
1423 NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex); 1745 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
1746 goto nla_put_failure;
1424 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT); 1747 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
1425 if (!port_list) 1748 if (!port_list)
1426 return -EMSGSIZE; 1749 return -EMSGSIZE;
@@ -1434,17 +1757,20 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
1434 port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT); 1757 port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
1435 if (!port_item) 1758 if (!port_item)
1436 goto nla_put_failure; 1759 goto nla_put_failure;
1437 NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex); 1760 if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
1761 goto nla_put_failure;
1438 if (port->changed) { 1762 if (port->changed) {
1439 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED); 1763 if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
1764 goto nla_put_failure;
1440 port->changed = false; 1765 port->changed = false;
1441 } 1766 }
1442 if (port->removed) 1767 if ((port->removed &&
1443 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_REMOVED); 1768 nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
1444 if (port->linkup) 1769 (port->state.linkup &&
1445 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP); 1770 nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
1446 NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed); 1771 nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
1447 NLA_PUT_U8(skb, TEAM_ATTR_PORT_DUPLEX, port->duplex); 1772 nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
1773 goto nla_put_failure;
1448 nla_nest_end(skb, port_item); 1774 nla_nest_end(skb, port_item);
1449 } 1775 }
1450 1776
@@ -1603,23 +1929,24 @@ static void __team_port_change_check(struct team_port *port, bool linkup)
1603{ 1929{
1604 int err; 1930 int err;
1605 1931
1606 if (!port->removed && port->linkup == linkup) 1932 if (!port->removed && port->state.linkup == linkup)
1607 return; 1933 return;
1608 1934
1609 port->changed = true; 1935 port->changed = true;
1610 port->linkup = linkup; 1936 port->state.linkup = linkup;
1937 team_refresh_port_linkup(port);
1611 if (linkup) { 1938 if (linkup) {
1612 struct ethtool_cmd ecmd; 1939 struct ethtool_cmd ecmd;
1613 1940
1614 err = __ethtool_get_settings(port->dev, &ecmd); 1941 err = __ethtool_get_settings(port->dev, &ecmd);
1615 if (!err) { 1942 if (!err) {
1616 port->speed = ethtool_cmd_speed(&ecmd); 1943 port->state.speed = ethtool_cmd_speed(&ecmd);
1617 port->duplex = ecmd.duplex; 1944 port->state.duplex = ecmd.duplex;
1618 goto send_event; 1945 goto send_event;
1619 } 1946 }
1620 } 1947 }
1621 port->speed = 0; 1948 port->state.speed = 0;
1622 port->duplex = 0; 1949 port->state.duplex = 0;
1623 1950
1624send_event: 1951send_event:
1625 err = team_nl_send_event_port_list_get(port->team); 1952 err = team_nl_send_event_port_list_get(port->team);
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
index f4d960e82e29..fd6bd03aaa89 100644
--- a/drivers/net/team/team_mode_activebackup.c
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -59,23 +59,21 @@ static void ab_port_leave(struct team *team, struct team_port *port)
59 RCU_INIT_POINTER(ab_priv(team)->active_port, NULL); 59 RCU_INIT_POINTER(ab_priv(team)->active_port, NULL);
60} 60}
61 61
62static int ab_active_port_get(struct team *team, void *arg) 62static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx)
63{ 63{
64 u32 *ifindex = arg;
65
66 *ifindex = 0;
67 if (ab_priv(team)->active_port) 64 if (ab_priv(team)->active_port)
68 *ifindex = ab_priv(team)->active_port->dev->ifindex; 65 ctx->data.u32_val = ab_priv(team)->active_port->dev->ifindex;
66 else
67 ctx->data.u32_val = 0;
69 return 0; 68 return 0;
70} 69}
71 70
72static int ab_active_port_set(struct team *team, void *arg) 71static int ab_active_port_set(struct team *team, struct team_gsetter_ctx *ctx)
73{ 72{
74 u32 *ifindex = arg;
75 struct team_port *port; 73 struct team_port *port;
76 74
77 list_for_each_entry_rcu(port, &team->port_list, list) { 75 list_for_each_entry(port, &team->port_list, list) {
78 if (port->dev->ifindex == *ifindex) { 76 if (port->dev->ifindex == ctx->data.u32_val) {
79 rcu_assign_pointer(ab_priv(team)->active_port, port); 77 rcu_assign_pointer(ab_priv(team)->active_port, port);
80 return 0; 78 return 0;
81 } 79 }
@@ -92,12 +90,12 @@ static const struct team_option ab_options[] = {
92 }, 90 },
93}; 91};
94 92
95int ab_init(struct team *team) 93static int ab_init(struct team *team)
96{ 94{
97 return team_options_register(team, ab_options, ARRAY_SIZE(ab_options)); 95 return team_options_register(team, ab_options, ARRAY_SIZE(ab_options));
98} 96}
99 97
100void ab_exit(struct team *team) 98static void ab_exit(struct team *team)
101{ 99{
102 team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options)); 100 team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options));
103} 101}
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
new file mode 100644
index 000000000000..86e8183c8e3d
--- /dev/null
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -0,0 +1,174 @@
1/*
2 * drivers/net/team/team_mode_loadbalance.c - Load-balancing mode for team
3 * Copyright (c) 2012 Jiri Pirko <jpirko@redhat.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/errno.h>
16#include <linux/netdevice.h>
17#include <linux/filter.h>
18#include <linux/if_team.h>
19
20struct lb_priv {
21 struct sk_filter __rcu *fp;
22 struct sock_fprog *orig_fprog;
23};
24
25static struct lb_priv *lb_priv(struct team *team)
26{
27 return (struct lb_priv *) &team->mode_priv;
28}
29
30static bool lb_transmit(struct team *team, struct sk_buff *skb)
31{
32 struct sk_filter *fp;
33 struct team_port *port;
34 unsigned int hash;
35 int port_index;
36
37 fp = rcu_dereference(lb_priv(team)->fp);
38 if (unlikely(!fp))
39 goto drop;
40 hash = SK_RUN_FILTER(fp, skb);
41 port_index = hash % team->en_port_count;
42 port = team_get_port_by_index_rcu(team, port_index);
43 if (unlikely(!port))
44 goto drop;
45 skb->dev = port->dev;
46 if (dev_queue_xmit(skb))
47 return false;
48 return true;
49
50drop:
51 dev_kfree_skb_any(skb);
52 return false;
53}
54
55static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
56{
57 if (!lb_priv(team)->orig_fprog) {
58 ctx->data.bin_val.len = 0;
59 ctx->data.bin_val.ptr = NULL;
60 return 0;
61 }
62 ctx->data.bin_val.len = lb_priv(team)->orig_fprog->len *
63 sizeof(struct sock_filter);
64 ctx->data.bin_val.ptr = lb_priv(team)->orig_fprog->filter;
65 return 0;
66}
67
68static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
69 const void *data)
70{
71 struct sock_fprog *fprog;
72 struct sock_filter *filter = (struct sock_filter *) data;
73
74 if (data_len % sizeof(struct sock_filter))
75 return -EINVAL;
76 fprog = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL);
77 if (!fprog)
78 return -ENOMEM;
79 fprog->filter = kmemdup(filter, data_len, GFP_KERNEL);
80 if (!fprog->filter) {
81 kfree(fprog);
82 return -ENOMEM;
83 }
84 fprog->len = data_len / sizeof(struct sock_filter);
85 *pfprog = fprog;
86 return 0;
87}
88
89static void __fprog_destroy(struct sock_fprog *fprog)
90{
91 kfree(fprog->filter);
92 kfree(fprog);
93}
94
95static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
96{
97 struct sk_filter *fp = NULL;
98 struct sock_fprog *fprog = NULL;
99 int err;
100
101 if (ctx->data.bin_val.len) {
102 err = __fprog_create(&fprog, ctx->data.bin_val.len,
103 ctx->data.bin_val.ptr);
104 if (err)
105 return err;
106 err = sk_unattached_filter_create(&fp, fprog);
107 if (err) {
108 __fprog_destroy(fprog);
109 return err;
110 }
111 }
112
113 if (lb_priv(team)->orig_fprog) {
114 /* Clear old filter data */
115 __fprog_destroy(lb_priv(team)->orig_fprog);
116 sk_unattached_filter_destroy(lb_priv(team)->fp);
117 }
118
119 rcu_assign_pointer(lb_priv(team)->fp, fp);
120 lb_priv(team)->orig_fprog = fprog;
121 return 0;
122}
123
124static const struct team_option lb_options[] = {
125 {
126 .name = "bpf_hash_func",
127 .type = TEAM_OPTION_TYPE_BINARY,
128 .getter = lb_bpf_func_get,
129 .setter = lb_bpf_func_set,
130 },
131};
132
133static int lb_init(struct team *team)
134{
135 return team_options_register(team, lb_options,
136 ARRAY_SIZE(lb_options));
137}
138
139static void lb_exit(struct team *team)
140{
141 team_options_unregister(team, lb_options,
142 ARRAY_SIZE(lb_options));
143}
144
145static const struct team_mode_ops lb_mode_ops = {
146 .init = lb_init,
147 .exit = lb_exit,
148 .transmit = lb_transmit,
149};
150
151static struct team_mode lb_mode = {
152 .kind = "loadbalance",
153 .owner = THIS_MODULE,
154 .priv_size = sizeof(struct lb_priv),
155 .ops = &lb_mode_ops,
156};
157
158static int __init lb_init_module(void)
159{
160 return team_mode_register(&lb_mode);
161}
162
163static void __exit lb_cleanup_module(void)
164{
165 team_mode_unregister(&lb_mode);
166}
167
168module_init(lb_init_module);
169module_exit(lb_cleanup_module);
170
171MODULE_LICENSE("GPL v2");
172MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
173MODULE_DESCRIPTION("Load-balancing mode for team");
174MODULE_ALIAS("team-mode-loadbalance");
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index a0e8f806331a..6abfbdc96be5 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -50,7 +50,7 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
50 struct team_port *port; 50 struct team_port *port;
51 int port_index; 51 int port_index;
52 52
53 port_index = rr_priv(team)->sent_packets++ % team->port_count; 53 port_index = rr_priv(team)->sent_packets++ % team->en_port_count;
54 port = team_get_port_by_index_rcu(team, port_index); 54 port = team_get_port_by_index_rcu(team, port_index);
55 port = __get_first_port_up(team, port); 55 port = __get_first_port_up(team, port);
56 if (unlikely(!port)) 56 if (unlikely(!port))
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
deleted file mode 100644
index b15ac81d46fa..000000000000
--- a/drivers/net/tokenring/3c359.c
+++ /dev/null
@@ -1,1843 +0,0 @@
1/*
2 * 3c359.c (c) 2000 Mike Phillips (mikep@linuxtr.net) All Rights Reserved
3 *
4 * Linux driver for 3Com 3c359 Tokenlink Velocity XL PCI NIC
5 *
6 * Base Driver Olympic:
7 * Written 1999 Peter De Schrijver & Mike Phillips
8 *
9 * This software may be used and distributed according to the terms
10 * of the GNU General Public License, incorporated herein by reference.
11 *
12 * 7/17/00 - Clean up, version number 0.9.0. Ready to release to the world.
13 *
14 * 2/16/01 - Port up to kernel 2.4.2 ready for submission into the kernel.
15 * 3/05/01 - Last clean up stuff before submission.
16 * 2/15/01 - Finally, update to new pci api.
17 *
18 * To Do:
19 */
20
21/*
22 * Technical Card Details
23 *
24 * All access to data is done with 16/8 bit transfers. The transfer
25 * method really sucks. You can only read or write one location at a time.
26 *
27 * Also, the microcode for the card must be uploaded if the card does not have
28 * the flashrom on board. This is a 28K bloat in the driver when compiled
29 * as a module.
30 *
31 * Rx is very simple, status into a ring of descriptors, dma data transfer,
32 * interrupts to tell us when a packet is received.
33 *
34 * Tx is a little more interesting. Similar scenario, descriptor and dma data
35 * transfers, but we don't have to interrupt the card to tell it another packet
36 * is ready for transmission, we are just doing simple memory writes, not io or mmio
37 * writes. The card can be set up to simply poll on the next
38 * descriptor pointer and when this value is non-zero will automatically download
39 * the next packet. The card then interrupts us when the packet is done.
40 *
41 */
42
43#define XL_DEBUG 0
44
45#include <linux/jiffies.h>
46#include <linux/module.h>
47#include <linux/kernel.h>
48#include <linux/errno.h>
49#include <linux/timer.h>
50#include <linux/in.h>
51#include <linux/ioport.h>
52#include <linux/string.h>
53#include <linux/proc_fs.h>
54#include <linux/ptrace.h>
55#include <linux/skbuff.h>
56#include <linux/interrupt.h>
57#include <linux/delay.h>
58#include <linux/netdevice.h>
59#include <linux/trdevice.h>
60#include <linux/stddef.h>
61#include <linux/init.h>
62#include <linux/pci.h>
63#include <linux/spinlock.h>
64#include <linux/bitops.h>
65#include <linux/firmware.h>
66#include <linux/slab.h>
67
68#include <net/checksum.h>
69
70#include <asm/io.h>
71
72#include "3c359.h"
73
74static char version[] __devinitdata =
75"3c359.c v1.2.0 2/17/01 - Mike Phillips (mikep@linuxtr.net)" ;
76
77#define FW_NAME "3com/3C359.bin"
78MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
79MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver\n") ;
80MODULE_FIRMWARE(FW_NAME);
81
82/* Module parameters */
83
84/* Ring Speed 0,4,16
85 * 0 = Autosense
86 * 4,16 = Selected speed only, no autosense
87 * This allows the card to be the first on the ring
88 * and become the active monitor.
89 *
90 * WARNING: Some hubs will allow you to insert
91 * at the wrong speed.
92 *
93 * The adapter will _not_ fail to open if there are no
94 * active monitors on the ring, it will simply open up in
95 * its last known ringspeed if no ringspeed is specified.
96 */
97
98static int ringspeed[XL_MAX_ADAPTERS] = {0,} ;
99
100module_param_array(ringspeed, int, NULL, 0);
101MODULE_PARM_DESC(ringspeed,"3c359: Ringspeed selection - 4,16 or 0") ;
102
103/* Packet buffer size */
104
105static int pkt_buf_sz[XL_MAX_ADAPTERS] = {0,} ;
106
107module_param_array(pkt_buf_sz, int, NULL, 0) ;
108MODULE_PARM_DESC(pkt_buf_sz,"3c359: Initial buffer size") ;
109/* Message Level */
110
111static int message_level[XL_MAX_ADAPTERS] = {0,} ;
112
113module_param_array(message_level, int, NULL, 0) ;
114MODULE_PARM_DESC(message_level, "3c359: Level of reported messages") ;
115/*
116 * This is a real nasty way of doing this, but otherwise you
117 * will be stuck with 1555 lines of hex #'s in the code.
118 */
119
120static DEFINE_PCI_DEVICE_TABLE(xl_pci_tbl) =
121{
122 {PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, },
123 { } /* terminate list */
124};
125MODULE_DEVICE_TABLE(pci,xl_pci_tbl) ;
126
127static int xl_init(struct net_device *dev);
128static int xl_open(struct net_device *dev);
129static int xl_open_hw(struct net_device *dev) ;
130static int xl_hw_reset(struct net_device *dev);
131static netdev_tx_t xl_xmit(struct sk_buff *skb, struct net_device *dev);
132static void xl_dn_comp(struct net_device *dev);
133static int xl_close(struct net_device *dev);
134static void xl_set_rx_mode(struct net_device *dev);
135static irqreturn_t xl_interrupt(int irq, void *dev_id);
136static int xl_set_mac_address(struct net_device *dev, void *addr) ;
137static void xl_arb_cmd(struct net_device *dev);
138static void xl_asb_cmd(struct net_device *dev) ;
139static void xl_srb_cmd(struct net_device *dev, int srb_cmd) ;
140static void xl_wait_misr_flags(struct net_device *dev) ;
141static int xl_change_mtu(struct net_device *dev, int mtu);
142static void xl_srb_bh(struct net_device *dev) ;
143static void xl_asb_bh(struct net_device *dev) ;
144static void xl_reset(struct net_device *dev) ;
145static void xl_freemem(struct net_device *dev) ;
146
147
148/* EEProm Access Functions */
149static u16 xl_ee_read(struct net_device *dev, int ee_addr) ;
150static void xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value) ;
151
152/* Debugging functions */
153#if XL_DEBUG
154static void print_tx_state(struct net_device *dev) ;
155static void print_rx_state(struct net_device *dev) ;
156
157static void print_tx_state(struct net_device *dev)
158{
159
160 struct xl_private *xl_priv = netdev_priv(dev);
161 struct xl_tx_desc *txd ;
162 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
163 int i ;
164
165 printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d\n",xl_priv->tx_ring_head,
166 xl_priv->tx_ring_tail, xl_priv->free_ring_entries) ;
167 printk("Ring , Address , FSH , DnNextPtr, Buffer, Buffer_Len\n");
168 for (i = 0; i < 16; i++) {
169 txd = &(xl_priv->xl_tx_ring[i]) ;
170 printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(txd),
171 txd->framestartheader, txd->dnnextptr, txd->buffer, txd->buffer_length ) ;
172 }
173
174 printk("DNLISTPTR = %04x\n", readl(xl_mmio + MMIO_DNLISTPTR) );
175
176 printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL) );
177 printk("Queue status = %0x\n",netif_running(dev) ) ;
178}
179
180static void print_rx_state(struct net_device *dev)
181{
182
183 struct xl_private *xl_priv = netdev_priv(dev);
184 struct xl_rx_desc *rxd ;
185 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
186 int i ;
187
188 printk("rx_ring_tail: %d\n", xl_priv->rx_ring_tail);
189 printk("Ring , Address , FrameState , UPNextPtr, FragAddr, Frag_Len\n");
190 for (i = 0; i < 16; i++) {
191 /* rxd = (struct xl_rx_desc *)xl_priv->rx_ring_dma_addr + (i * sizeof(struct xl_rx_desc)) ; */
192 rxd = &(xl_priv->xl_rx_ring[i]) ;
193 printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(rxd),
194 rxd->framestatus, rxd->upnextptr, rxd->upfragaddr, rxd->upfraglen ) ;
195 }
196
197 printk("UPLISTPTR = %04x\n", readl(xl_mmio + MMIO_UPLISTPTR));
198
199 printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL));
200 printk("Queue status = %0x\n",netif_running(dev));
201}
202#endif
203
204/*
205 * Read values from the on-board EEProm. This looks very strange
206 * but you have to wait for the EEProm to get/set the value before
207 * passing/getting the next value from the nic. As with all requests
208 * on this nic it has to be done in two stages, a) tell the nic which
209 * memory address you want to access and b) pass/get the value from the nic.
210 * With the EEProm, you have to wait before and between access a) and b).
211 * As this is only read at initialization time and the wait period is very
212 * small we shouldn't have to worry about scheduling issues.
213 */
214
215static u16 xl_ee_read(struct net_device *dev, int ee_addr)
216{
217 struct xl_private *xl_priv = netdev_priv(dev);
218 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
219
220 /* Wait for EEProm to not be busy */
221 writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
222 while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
223
224 /* Tell EEProm what we want to do and where */
225 writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
226 writew(EEREAD + ee_addr, xl_mmio + MMIO_MACDATA) ;
227
228 /* Wait for EEProm to not be busy */
229 writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
230 while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
231
232 /* Tell EEProm what we want to do and where */
233 writel(IO_WORD_WRITE | EECONTROL , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
234 writew(EEREAD + ee_addr, xl_mmio + MMIO_MACDATA) ;
235
236 /* Finally read the value from the EEProm */
237 writel(IO_WORD_READ | EEDATA , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
238 return readw(xl_mmio + MMIO_MACDATA) ;
239}
240
241/*
242 * Write values to the onboard eeprom. As with eeprom read you need to
243 * set which location to write, wait, value to write, wait, with the
244 * added twist of having to enable eeprom writes as well.
245 */
246
247static void xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value)
248{
249 struct xl_private *xl_priv = netdev_priv(dev);
250 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
251
252 /* Wait for EEProm to not be busy */
253 writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
254 while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
255
256 /* Enable write/erase */
257 writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
258 writew(EE_ENABLE_WRITE, xl_mmio + MMIO_MACDATA) ;
259
260 /* Wait for EEProm to not be busy */
261 writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
262 while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
263
264 /* Put the value we want to write into EEDATA */
265 writel(IO_WORD_WRITE | EEDATA, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
266 writew(ee_value, xl_mmio + MMIO_MACDATA) ;
267
268 /* Tell EEProm to write eevalue into ee_addr */
269 writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
270 writew(EEWRITE + ee_addr, xl_mmio + MMIO_MACDATA) ;
271
272 /* Wait for EEProm to not be busy, to ensure write gets done */
273 writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
274 while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
275
276 return ;
277}
278
279static const struct net_device_ops xl_netdev_ops = {
280 .ndo_open = xl_open,
281 .ndo_stop = xl_close,
282 .ndo_start_xmit = xl_xmit,
283 .ndo_change_mtu = xl_change_mtu,
284 .ndo_set_rx_mode = xl_set_rx_mode,
285 .ndo_set_mac_address = xl_set_mac_address,
286};
287
288static int __devinit xl_probe(struct pci_dev *pdev,
289 const struct pci_device_id *ent)
290{
291 struct net_device *dev ;
292 struct xl_private *xl_priv ;
293 static int card_no = -1 ;
294 int i ;
295
296 card_no++ ;
297
298 if (pci_enable_device(pdev)) {
299 return -ENODEV ;
300 }
301
302 pci_set_master(pdev);
303
304 if ((i = pci_request_regions(pdev,"3c359"))) {
305 return i ;
306 }
307
308 /*
309 * Allowing init_trdev to allocate the private data will align
310 * xl_private on a 32 bytes boundary which we need for the rx/tx
311 * descriptors
312 */
313
314 dev = alloc_trdev(sizeof(struct xl_private)) ;
315 if (!dev) {
316 pci_release_regions(pdev) ;
317 return -ENOMEM ;
318 }
319 xl_priv = netdev_priv(dev);
320
321#if XL_DEBUG
322 printk("pci_device: %p, dev:%p, dev->priv: %p, ba[0]: %10x, ba[1]:%10x\n",
323 pdev, dev, netdev_priv(dev), (unsigned int)pdev->resource[0].start, (unsigned int)pdev->resource[1].start);
324#endif
325
326 dev->irq=pdev->irq;
327 dev->base_addr=pci_resource_start(pdev,0) ;
328 xl_priv->xl_card_name = pci_name(pdev);
329 xl_priv->xl_mmio=ioremap(pci_resource_start(pdev,1), XL_IO_SPACE);
330 xl_priv->pdev = pdev ;
331
332 if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
333 xl_priv->pkt_buf_sz = PKT_BUF_SZ ;
334 else
335 xl_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
336
337 dev->mtu = xl_priv->pkt_buf_sz - TR_HLEN ;
338 xl_priv->xl_ring_speed = ringspeed[card_no] ;
339 xl_priv->xl_message_level = message_level[card_no] ;
340 xl_priv->xl_functional_addr[0] = xl_priv->xl_functional_addr[1] = xl_priv->xl_functional_addr[2] = xl_priv->xl_functional_addr[3] = 0 ;
341 xl_priv->xl_copy_all_options = 0 ;
342
343 if((i = xl_init(dev))) {
344 iounmap(xl_priv->xl_mmio) ;
345 free_netdev(dev) ;
346 pci_release_regions(pdev) ;
347 return i ;
348 }
349
350 dev->netdev_ops = &xl_netdev_ops;
351 SET_NETDEV_DEV(dev, &pdev->dev);
352
353 pci_set_drvdata(pdev,dev) ;
354 if ((i = register_netdev(dev))) {
355 printk(KERN_ERR "3C359, register netdev failed\n") ;
356 pci_set_drvdata(pdev,NULL) ;
357 iounmap(xl_priv->xl_mmio) ;
358 free_netdev(dev) ;
359 pci_release_regions(pdev) ;
360 return i ;
361 }
362
363 printk(KERN_INFO "3C359: %s registered as: %s\n",xl_priv->xl_card_name,dev->name) ;
364
365 return 0;
366}
367
368static int xl_init_firmware(struct xl_private *xl_priv)
369{
370 int err;
371
372 err = request_firmware(&xl_priv->fw, FW_NAME, &xl_priv->pdev->dev);
373 if (err) {
374 printk(KERN_ERR "Failed to load firmware \"%s\"\n", FW_NAME);
375 return err;
376 }
377
378 if (xl_priv->fw->size < 16) {
379 printk(KERN_ERR "Bogus length %zu in \"%s\"\n",
380 xl_priv->fw->size, FW_NAME);
381 release_firmware(xl_priv->fw);
382 err = -EINVAL;
383 }
384
385 return err;
386}
387
388static int __devinit xl_init(struct net_device *dev)
389{
390 struct xl_private *xl_priv = netdev_priv(dev);
391 int err;
392
393 printk(KERN_INFO "%s\n", version);
394 printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n",
395 xl_priv->xl_card_name, (unsigned int)dev->base_addr ,xl_priv->xl_mmio, dev->irq);
396
397 spin_lock_init(&xl_priv->xl_lock) ;
398
399 err = xl_init_firmware(xl_priv);
400 if (err == 0)
401 err = xl_hw_reset(dev);
402
403 return err;
404}
405
406
407/*
408 * Hardware reset. This needs to be a separate entity as we need to reset the card
409 * when we change the EEProm settings.
410 */
411
412static int xl_hw_reset(struct net_device *dev)
413{
414 struct xl_private *xl_priv = netdev_priv(dev);
415 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
416 unsigned long t ;
417 u16 i ;
418 u16 result_16 ;
419 u8 result_8 ;
420 u16 start ;
421 int j ;
422
423 if (xl_priv->fw == NULL)
424 return -EINVAL;
425
426 /*
427 * Reset the card. If the card has got the microcode on board, we have
428 * missed the initialization interrupt, so we must always do this.
429 */
430
431 writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ;
432
433 /*
434 * Must wait for cmdInProgress bit (12) to clear before continuing with
435 * card configuration.
436 */
437
438 t=jiffies;
439 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
440 schedule();
441 if (time_after(jiffies, t + 40 * HZ)) {
442 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL card not responding to global reset.\n", dev->name);
443 return -ENODEV;
444 }
445 }
446
447 /*
448 * Enable pmbar by setting bit in CPAttention
449 */
450
451 writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
452 result_8 = readb(xl_mmio + MMIO_MACDATA) ;
453 result_8 = result_8 | CPA_PMBARVIS ;
454 writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
455 writeb(result_8, xl_mmio + MMIO_MACDATA) ;
456
457 /*
458 * Read cpHold bit in pmbar, if cleared we have got Flashrom on board.
459 * If not, we need to upload the microcode to the card
460 */
461
462 writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);
463
464#if XL_DEBUG
465 printk(KERN_INFO "Read from PMBAR = %04x\n", readw(xl_mmio + MMIO_MACDATA));
466#endif
467
468 if ( readw( (xl_mmio + MMIO_MACDATA)) & PMB_CPHOLD ) {
469
470 /* Set PmBar, privateMemoryBase bits (8:2) to 0 */
471
472 writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);
473 result_16 = readw(xl_mmio + MMIO_MACDATA) ;
474 result_16 = result_16 & ~((0x7F) << 2) ;
475 writel( (IO_WORD_WRITE | PMBAR), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
476 writew(result_16,xl_mmio + MMIO_MACDATA) ;
477
478 /* Set CPAttention, memWrEn bit */
479
480 writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
481 result_8 = readb(xl_mmio + MMIO_MACDATA) ;
482 result_8 = result_8 | CPA_MEMWREN ;
483 writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
484 writeb(result_8, xl_mmio + MMIO_MACDATA) ;
485
486 /*
487 * Now to write the microcode into the shared ram
488 * The microcode must finish at position 0xFFFF,
489 * so we must subtract to get the start position for the code
490 *
491 * Looks strange but ensures compiler only uses
492 * 16 bit unsigned int
493 */
494 start = (0xFFFF - (xl_priv->fw->size) + 1) ;
495
496 printk(KERN_INFO "3C359: Uploading Microcode: ");
497
498 for (i = start, j = 0; j < xl_priv->fw->size; i++, j++) {
499 writel(MEM_BYTE_WRITE | 0XD0000 | i,
500 xl_mmio + MMIO_MAC_ACCESS_CMD);
501 writeb(xl_priv->fw->data[j], xl_mmio + MMIO_MACDATA);
502 if (j % 1024 == 0)
503 printk(".");
504 }
505 printk("\n") ;
506
507 for (i = 0; i < 16; i++) {
508 writel((MEM_BYTE_WRITE | 0xDFFF0) + i,
509 xl_mmio + MMIO_MAC_ACCESS_CMD);
510 writeb(xl_priv->fw->data[xl_priv->fw->size - 16 + i],
511 xl_mmio + MMIO_MACDATA);
512 }
513
514 /*
515 * Have to write the start address of the upload to FFF4, but
516 * the address must be >> 4. You do not want to know how long
517 * it took me to discover this.
518 */
519
520 writel(MEM_WORD_WRITE | 0xDFFF4, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
521 writew(start >> 4, xl_mmio + MMIO_MACDATA);
522
523 /* Clear the CPAttention, memWrEn Bit */
524
525 writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
526 result_8 = readb(xl_mmio + MMIO_MACDATA) ;
527 result_8 = result_8 & ~CPA_MEMWREN ;
528 writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
529 writeb(result_8, xl_mmio + MMIO_MACDATA) ;
530
531 /* Clear the cpHold bit in pmbar */
532
533 writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);
534 result_16 = readw(xl_mmio + MMIO_MACDATA) ;
535 result_16 = result_16 & ~PMB_CPHOLD ;
536 writel( (IO_WORD_WRITE | PMBAR), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
537 writew(result_16,xl_mmio + MMIO_MACDATA) ;
538
539
540 } /* If microcode upload required */
541
542 /*
543 * The card should now go though a self test procedure and get itself ready
544 * to be opened, we must wait for an srb response with the initialization
545 * information.
546 */
547
548#if XL_DEBUG
549 printk(KERN_INFO "%s: Microcode uploaded, must wait for the self test to complete\n", dev->name);
550#endif
551
552 writew(SETINDENABLE | 0xFFF, xl_mmio + MMIO_COMMAND) ;
553
554 t=jiffies;
555 while ( !(readw(xl_mmio + MMIO_INTSTATUS_AUTO) & INTSTAT_SRB) ) {
556 schedule();
557 if (time_after(jiffies, t + 15 * HZ)) {
558 printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n");
559 return -ENODEV;
560 }
561 }
562
563 /*
564 * Write the RxBufArea with D000, RxEarlyThresh, TxStartThresh,
565 * DnPriReqThresh, read the tech docs if you want to know what
566 * values they need to be.
567 */
568
569 writel(MMIO_WORD_WRITE | RXBUFAREA, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
570 writew(0xD000, xl_mmio + MMIO_MACDATA) ;
571
572 writel(MMIO_WORD_WRITE | RXEARLYTHRESH, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
573 writew(0X0020, xl_mmio + MMIO_MACDATA) ;
574
575 writew( SETTXSTARTTHRESH | 0x40 , xl_mmio + MMIO_COMMAND) ;
576
577 writeb(0x04, xl_mmio + MMIO_DNBURSTTHRESH) ;
578 writeb(0x04, xl_mmio + DNPRIREQTHRESH) ;
579
580 /*
581 * Read WRBR to provide the location of the srb block, have to use byte reads not word reads.
582 * Tech docs have this wrong !!!!
583 */
584
585 writel(MMIO_BYTE_READ | WRBR, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
586 xl_priv->srb = readb(xl_mmio + MMIO_MACDATA) << 8 ;
587 writel( (MMIO_BYTE_READ | WRBR) + 1, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
588 xl_priv->srb = xl_priv->srb | readb(xl_mmio + MMIO_MACDATA) ;
589
590#if XL_DEBUG
591 writel(IO_WORD_READ | SWITCHSETTINGS, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
592 if ( readw(xl_mmio + MMIO_MACDATA) & 2) {
593 printk(KERN_INFO "Default ring speed 4 mbps\n");
594 } else {
595 printk(KERN_INFO "Default ring speed 16 mbps\n");
596 }
597 printk(KERN_INFO "%s: xl_priv->srb = %04x\n",xl_priv->xl_card_name, xl_priv->srb);
598#endif
599
600 return 0;
601}
602
603static int xl_open(struct net_device *dev)
604{
605 struct xl_private *xl_priv=netdev_priv(dev);
606 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
607 u8 i ;
608 __le16 hwaddr[3] ; /* Should be u8[6] but we get word return values */
609 int open_err ;
610
611 u16 switchsettings, switchsettings_eeprom ;
612
613 if (request_irq(dev->irq, xl_interrupt, IRQF_SHARED , "3c359", dev))
614 return -EAGAIN;
615
616 /*
617 * Read the information from the EEPROM that we need.
618 */
619
620 hwaddr[0] = cpu_to_le16(xl_ee_read(dev,0x10));
621 hwaddr[1] = cpu_to_le16(xl_ee_read(dev,0x11));
622 hwaddr[2] = cpu_to_le16(xl_ee_read(dev,0x12));
623
624 /* Ring speed */
625
626 switchsettings_eeprom = xl_ee_read(dev,0x08) ;
627 switchsettings = switchsettings_eeprom ;
628
629 if (xl_priv->xl_ring_speed != 0) {
630 if (xl_priv->xl_ring_speed == 4)
631 switchsettings = switchsettings | 0x02 ;
632 else
633 switchsettings = switchsettings & ~0x02 ;
634 }
635
636 /* Only write EEProm if there has been a change */
637 if (switchsettings != switchsettings_eeprom) {
638 xl_ee_write(dev,0x08,switchsettings) ;
639 /* Hardware reset after changing EEProm */
640 xl_hw_reset(dev) ;
641 }
642
643 memcpy(dev->dev_addr,hwaddr,dev->addr_len) ;
644
645 open_err = xl_open_hw(dev) ;
646
647 /*
648 * This really needs to be cleaned up with better error reporting.
649 */
650
651 if (open_err != 0) { /* Something went wrong with the open command */
652 if (open_err & 0x07) { /* Wrong speed, retry at different speed */
653 printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed\n", dev->name);
654 switchsettings = switchsettings ^ 2 ;
655 xl_ee_write(dev,0x08,switchsettings) ;
656 xl_hw_reset(dev) ;
657 open_err = xl_open_hw(dev) ;
658 if (open_err != 0) {
659 printk(KERN_WARNING "%s: Open error returned a second time, we're bombing out now\n", dev->name);
660 free_irq(dev->irq,dev) ;
661 return -ENODEV ;
662 }
663 } else {
664 printk(KERN_WARNING "%s: Open Error = %04x\n", dev->name, open_err) ;
665 free_irq(dev->irq,dev) ;
666 return -ENODEV ;
667 }
668 }
669
670 /*
671 * Now to set up the Rx and Tx buffer structures
672 */
673 /* These MUST be on 8 byte boundaries */
674 xl_priv->xl_tx_ring = kzalloc((sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE) + 7, GFP_DMA | GFP_KERNEL);
675 if (xl_priv->xl_tx_ring == NULL) {
676 free_irq(dev->irq,dev);
677 return -ENOMEM;
678 }
679 xl_priv->xl_rx_ring = kzalloc((sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE) +7, GFP_DMA | GFP_KERNEL);
680 if (xl_priv->xl_rx_ring == NULL) {
681 free_irq(dev->irq,dev);
682 kfree(xl_priv->xl_tx_ring);
683 return -ENOMEM;
684 }
685
686 /* Setup Rx Ring */
687 for (i=0 ; i < XL_RX_RING_SIZE ; i++) {
688 struct sk_buff *skb ;
689
690 skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ;
691 if (skb==NULL)
692 break ;
693
694 skb->dev = dev ;
695 xl_priv->xl_rx_ring[i].upfragaddr = cpu_to_le32(pci_map_single(xl_priv->pdev, skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
696 xl_priv->xl_rx_ring[i].upfraglen = cpu_to_le32(xl_priv->pkt_buf_sz) | RXUPLASTFRAG;
697 xl_priv->rx_ring_skb[i] = skb ;
698 }
699
700 if (i==0) {
701 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
702 free_irq(dev->irq,dev) ;
703 kfree(xl_priv->xl_tx_ring);
704 kfree(xl_priv->xl_rx_ring);
705 return -EIO ;
706 }
707
708 xl_priv->rx_ring_no = i ;
709 xl_priv->rx_ring_tail = 0 ;
710 xl_priv->rx_ring_dma_addr = pci_map_single(xl_priv->pdev,xl_priv->xl_rx_ring, sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE, PCI_DMA_TODEVICE) ;
711 for (i=0;i<(xl_priv->rx_ring_no-1);i++) {
712 xl_priv->xl_rx_ring[i].upnextptr = cpu_to_le32(xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * (i+1)));
713 }
714 xl_priv->xl_rx_ring[i].upnextptr = 0 ;
715
716 writel(xl_priv->rx_ring_dma_addr, xl_mmio + MMIO_UPLISTPTR) ;
717
718 /* Setup Tx Ring */
719
720 xl_priv->tx_ring_dma_addr = pci_map_single(xl_priv->pdev,xl_priv->xl_tx_ring, sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
721
722 xl_priv->tx_ring_head = 1 ;
723 xl_priv->tx_ring_tail = 255 ; /* Special marker for first packet */
724 xl_priv->free_ring_entries = XL_TX_RING_SIZE ;
725
726 /*
727 * Setup the first dummy DPD entry for polling to start working.
728 */
729
730 xl_priv->xl_tx_ring[0].framestartheader = TXDPDEMPTY;
731 xl_priv->xl_tx_ring[0].buffer = 0 ;
732 xl_priv->xl_tx_ring[0].buffer_length = 0 ;
733 xl_priv->xl_tx_ring[0].dnnextptr = 0 ;
734
735 writel(xl_priv->tx_ring_dma_addr, xl_mmio + MMIO_DNLISTPTR) ;
736 writel(DNUNSTALL, xl_mmio + MMIO_COMMAND) ;
737 writel(UPUNSTALL, xl_mmio + MMIO_COMMAND) ;
738 writel(DNENABLE, xl_mmio + MMIO_COMMAND) ;
739 writeb(0x40, xl_mmio + MMIO_DNPOLL) ;
740
741 /*
742 * Enable interrupts on the card
743 */
744
745 writel(SETINTENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ;
746 writel(SETINDENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ;
747
748 netif_start_queue(dev) ;
749 return 0;
750
751}
752
753static int xl_open_hw(struct net_device *dev)
754{
755 struct xl_private *xl_priv=netdev_priv(dev);
756 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
757 u16 vsoff ;
758 char ver_str[33];
759 int open_err ;
760 int i ;
761 unsigned long t ;
762
763 /*
764 * Okay, let's build up the Open.NIC srb command
765 *
766 */
767
768 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
769 writeb(OPEN_NIC, xl_mmio + MMIO_MACDATA) ;
770
771 /*
772 * Use this as a test byte, if it comes back with the same value, the command didn't work
773 */
774
775 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb)+ 2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
776 writeb(0xff,xl_mmio + MMIO_MACDATA) ;
777
778 /* Open options */
779 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + 8, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
780 writeb(0x00, xl_mmio + MMIO_MACDATA) ;
781 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + 9, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
782 writeb(0x00, xl_mmio + MMIO_MACDATA) ;
783
784 /*
785 * Node address, be careful here, the docs say you can just put zeros here and it will use
786 * the hardware address, it doesn't, you must include the node address in the open command.
787 */
788
789 if (xl_priv->xl_laa[0]) { /* If using a LAA address */
790 for (i=10;i<16;i++) {
791 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
792 writeb(xl_priv->xl_laa[i-10],xl_mmio + MMIO_MACDATA) ;
793 }
794 memcpy(dev->dev_addr,xl_priv->xl_laa,dev->addr_len) ;
795 } else { /* Regular hardware address */
796 for (i=10;i<16;i++) {
797 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
798 writeb(dev->dev_addr[i-10], xl_mmio + MMIO_MACDATA) ;
799 }
800 }
801
802 /* Default everything else to 0 */
803 for (i = 16; i < 34; i++) {
804 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
805 writeb(0x00,xl_mmio + MMIO_MACDATA) ;
806 }
807
808 /*
809 * Set the csrb bit in the MISR register
810 */
811
812 xl_wait_misr_flags(dev) ;
813 writel(MEM_BYTE_WRITE | MF_CSRB, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
814 writeb(0xFF, xl_mmio + MMIO_MACDATA) ;
815 writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
816 writeb(MISR_CSRB , xl_mmio + MMIO_MACDATA) ;
817
818 /*
819 * Now wait for the command to run
820 */
821
822 t=jiffies;
823 while (! (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) {
824 schedule();
825 if (time_after(jiffies, t + 40 * HZ)) {
826 printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n");
827 break ;
828 }
829 }
830
831 /*
832 * Let's interpret the open response
833 */
834
835 writel( (MEM_BYTE_READ | 0xD0000 | xl_priv->srb)+2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
836 if (readb(xl_mmio + MMIO_MACDATA)!=0) {
837 open_err = readb(xl_mmio + MMIO_MACDATA) << 8 ;
838 writel( (MEM_BYTE_READ | 0xD0000 | xl_priv->srb) + 7, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
839 open_err |= readb(xl_mmio + MMIO_MACDATA) ;
840 return open_err ;
841 } else {
842 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 8, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
843 xl_priv->asb = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
844 printk(KERN_INFO "%s: Adapter Opened Details: ",dev->name) ;
845 printk("ASB: %04x",xl_priv->asb ) ;
846 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 10, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
847 printk(", SRB: %04x",swab16(readw(xl_mmio + MMIO_MACDATA)) ) ;
848
849 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
850 xl_priv->arb = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
851 printk(", ARB: %04x\n",xl_priv->arb );
852 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
853 vsoff = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
854
855 /*
856 * Interesting, sending the individual characters directly to printk was causing klogd to use
857 * use 100% of processor time, so we build up the string and print that instead.
858 */
859
860 for (i=0;i<0x20;i++) {
861 writel( (MEM_BYTE_READ | 0xD0000 | vsoff) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
862 ver_str[i] = readb(xl_mmio + MMIO_MACDATA) ;
863 }
864 ver_str[i] = '\0' ;
865 printk(KERN_INFO "%s: Microcode version String: %s\n",dev->name,ver_str);
866 }
867
868 /*
869 * Issue the AckInterrupt
870 */
871 writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
872
873 return 0 ;
874}
875
876/*
877 * There are two ways of implementing rx on the 359 NIC, either
878 * interrupt driven or polling. We are going to uses interrupts,
879 * it is the easier way of doing things.
880 *
881 * The Rx works with a ring of Rx descriptors. At initialise time the ring
882 * entries point to the next entry except for the last entry in the ring
883 * which points to 0. The card is programmed with the location of the first
884 * available descriptor and keeps reading the next_ptr until next_ptr is set
885 * to 0. Hopefully with a ring size of 16 the card will never get to read a next_ptr
886 * of 0. As the Rx interrupt is received we copy the frame up to the protocol layers
887 * and then point the end of the ring to our current position and point our current
888 * position to 0, therefore making the current position the last position on the ring.
889 * The last position on the ring therefore loops continually loops around the rx ring.
890 *
891 * rx_ring_tail is the position on the ring to process next. (Think of a snake, the head
892 * expands as the card adds new packets and we go around eating the tail processing the
893 * packets.)
894 *
895 * Undoubtably it could be streamlined and improved upon, but at the moment it works
896 * and the fast path through the routine is fine.
897 *
898 * adv_rx_ring could be inlined to increase performance, but its called a *lot* of times
899 * in xl_rx so would increase the size of the function significantly.
900 */
901
902static void adv_rx_ring(struct net_device *dev) /* Advance rx_ring, cut down on bloat in xl_rx */
903{
904 struct xl_private *xl_priv=netdev_priv(dev);
905 int n = xl_priv->rx_ring_tail;
906 int prev_ring_loc;
907
908 prev_ring_loc = (n + XL_RX_RING_SIZE - 1) & (XL_RX_RING_SIZE - 1);
909 xl_priv->xl_rx_ring[prev_ring_loc].upnextptr = cpu_to_le32(xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * n));
910 xl_priv->xl_rx_ring[n].framestatus = 0;
911 xl_priv->xl_rx_ring[n].upnextptr = 0;
912 xl_priv->rx_ring_tail++;
913 xl_priv->rx_ring_tail &= (XL_RX_RING_SIZE-1);
914}
915
916static void xl_rx(struct net_device *dev)
917{
918 struct xl_private *xl_priv=netdev_priv(dev);
919 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
920 struct sk_buff *skb, *skb2 ;
921 int frame_length = 0, copy_len = 0 ;
922 int temp_ring_loc ;
923
924 /*
925 * Receive the next frame, loop around the ring until all frames
926 * have been received.
927 */
928
929 while (xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus & (RXUPDCOMPLETE | RXUPDFULL) ) { /* Descriptor to process */
930
931 if (xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus & RXUPDFULL ) { /* UpdFull, Multiple Descriptors used for the frame */
932
933 /*
934 * This is a pain, you need to go through all the descriptors until the last one
935 * for this frame to find the framelength
936 */
937
938 temp_ring_loc = xl_priv->rx_ring_tail ;
939
940 while (xl_priv->xl_rx_ring[temp_ring_loc].framestatus & RXUPDFULL ) {
941 temp_ring_loc++ ;
942 temp_ring_loc &= (XL_RX_RING_SIZE-1) ;
943 }
944
945 frame_length = le32_to_cpu(xl_priv->xl_rx_ring[temp_ring_loc].framestatus) & 0x7FFF;
946
947 skb = dev_alloc_skb(frame_length) ;
948
949 if (skb==NULL) { /* No memory for frame, still need to roll forward the rx ring */
950 printk(KERN_WARNING "%s: dev_alloc_skb failed - multi buffer !\n", dev->name) ;
951 while (xl_priv->rx_ring_tail != temp_ring_loc)
952 adv_rx_ring(dev) ;
953
954 adv_rx_ring(dev) ; /* One more time just for luck :) */
955 dev->stats.rx_dropped++ ;
956
957 writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
958 return ;
959 }
960
961 while (xl_priv->rx_ring_tail != temp_ring_loc) {
962 copy_len = le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen) & 0x7FFF;
963 frame_length -= copy_len ;
964 pci_dma_sync_single_for_cpu(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
965 skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail],
966 skb_put(skb, copy_len),
967 copy_len);
968 pci_dma_sync_single_for_device(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
969 adv_rx_ring(dev) ;
970 }
971
972 /* Now we have found the last fragment */
973 pci_dma_sync_single_for_cpu(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
974 skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail],
975 skb_put(skb,copy_len), frame_length);
976/* memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */
977 pci_dma_sync_single_for_device(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
978 adv_rx_ring(dev) ;
979 skb->protocol = tr_type_trans(skb,dev) ;
980 netif_rx(skb) ;
981
982 } else { /* Single Descriptor Used, simply swap buffers over, fast path */
983
984 frame_length = le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus) & 0x7FFF;
985
986 skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ;
987
988 if (skb==NULL) { /* Still need to fix the rx ring */
989 printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer\n",dev->name);
990 adv_rx_ring(dev) ;
991 dev->stats.rx_dropped++ ;
992 writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
993 return ;
994 }
995
996 skb2 = xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] ;
997 pci_unmap_single(xl_priv->pdev, le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
998 skb_put(skb2, frame_length) ;
999 skb2->protocol = tr_type_trans(skb2,dev) ;
1000
1001 xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] = skb ;
1002 xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr = cpu_to_le32(pci_map_single(xl_priv->pdev,skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
1003 xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen = cpu_to_le32(xl_priv->pkt_buf_sz) | RXUPLASTFRAG;
1004 adv_rx_ring(dev) ;
1005 dev->stats.rx_packets++ ;
1006 dev->stats.rx_bytes += frame_length ;
1007
1008 netif_rx(skb2) ;
1009 } /* if multiple buffers */
1010 } /* while packet to do */
1011
1012 /* Clear the updComplete interrupt */
1013 writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
1014 return ;
1015}
1016
1017/*
1018 * This is ruthless, it doesn't care what state the card is in it will
1019 * completely reset the adapter.
1020 */
1021
1022static void xl_reset(struct net_device *dev)
1023{
1024 struct xl_private *xl_priv=netdev_priv(dev);
1025 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1026 unsigned long t;
1027
1028 writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ;
1029
1030 /*
1031 * Must wait for cmdInProgress bit (12) to clear before continuing with
1032 * card configuration.
1033 */
1034
1035 t=jiffies;
1036 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1037 if (time_after(jiffies, t + 40 * HZ)) {
1038 printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n");
1039 break ;
1040 }
1041 }
1042
1043}
1044
1045static void xl_freemem(struct net_device *dev)
1046{
1047 struct xl_private *xl_priv=netdev_priv(dev);
1048 int i ;
1049
1050 for (i=0;i<XL_RX_RING_SIZE;i++) {
1051 dev_kfree_skb_irq(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]) ;
1052 pci_unmap_single(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
1053 xl_priv->rx_ring_tail++ ;
1054 xl_priv->rx_ring_tail &= XL_RX_RING_SIZE-1;
1055 }
1056
1057 /* unmap ring */
1058 pci_unmap_single(xl_priv->pdev,xl_priv->rx_ring_dma_addr, sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE, PCI_DMA_FROMDEVICE) ;
1059
1060 pci_unmap_single(xl_priv->pdev,xl_priv->tx_ring_dma_addr, sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE, PCI_DMA_TODEVICE) ;
1061
1062 kfree(xl_priv->xl_rx_ring) ;
1063 kfree(xl_priv->xl_tx_ring) ;
1064
1065 return ;
1066}
1067
1068static irqreturn_t xl_interrupt(int irq, void *dev_id)
1069{
1070 struct net_device *dev = (struct net_device *)dev_id;
1071 struct xl_private *xl_priv =netdev_priv(dev);
1072 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1073 u16 intstatus, macstatus ;
1074
1075 intstatus = readw(xl_mmio + MMIO_INTSTATUS) ;
1076
1077 if (!(intstatus & 1)) /* We didn't generate the interrupt */
1078 return IRQ_NONE;
1079
1080 spin_lock(&xl_priv->xl_lock) ;
1081
1082 /*
1083 * Process the interrupt
1084 */
1085 /*
1086 * Something fishy going on here, we shouldn't get 0001 ints, not fatal though.
1087 */
1088 if (intstatus == 0x0001) {
1089 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1090 printk(KERN_INFO "%s: 00001 int received\n",dev->name);
1091 } else {
1092 if (intstatus & (HOSTERRINT | SRBRINT | ARBCINT | UPCOMPINT | DNCOMPINT | HARDERRINT | (1<<8) | TXUNDERRUN | ASBFINT)) {
1093
1094 /*
1095 * Host Error.
1096 * It may be possible to recover from this, but usually it means something
1097 * is seriously fubar, so we just close the adapter.
1098 */
1099
1100 if (intstatus & HOSTERRINT) {
1101 printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x\n",dev->name,intstatus);
1102 writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ;
1103 printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name);
1104 netif_stop_queue(dev) ;
1105 xl_freemem(dev) ;
1106 free_irq(dev->irq,dev);
1107 xl_reset(dev) ;
1108 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1109 spin_unlock(&xl_priv->xl_lock) ;
1110 return IRQ_HANDLED;
1111 } /* Host Error */
1112
1113 if (intstatus & SRBRINT ) { /* Srbc interrupt */
1114 writel(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1115 if (xl_priv->srb_queued)
1116 xl_srb_bh(dev) ;
1117 } /* SRBR Interrupt */
1118
1119 if (intstatus & TXUNDERRUN) { /* Issue DnReset command */
1120 writel(DNRESET, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1121 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { /* Wait for command to run */
1122 /* !!! FIX-ME !!!!
1123 Must put a timeout check here ! */
1124 /* Empty Loop */
1125 }
1126 printk(KERN_WARNING "%s: TX Underrun received\n",dev->name);
1127 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1128 } /* TxUnderRun */
1129
1130 if (intstatus & ARBCINT ) { /* Arbc interrupt */
1131 xl_arb_cmd(dev) ;
1132 } /* Arbc */
1133
1134 if (intstatus & ASBFINT) {
1135 if (xl_priv->asb_queued == 1) {
1136 xl_asb_cmd(dev) ;
1137 } else if (xl_priv->asb_queued == 2) {
1138 xl_asb_bh(dev) ;
1139 } else {
1140 writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ;
1141 }
1142 } /* Asbf */
1143
1144 if (intstatus & UPCOMPINT ) /* UpComplete */
1145 xl_rx(dev) ;
1146
1147 if (intstatus & DNCOMPINT ) /* DnComplete */
1148 xl_dn_comp(dev) ;
1149
1150 if (intstatus & HARDERRINT ) { /* Hardware error */
1151 writel(MMIO_WORD_READ | MACSTATUS, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1152 macstatus = readw(xl_mmio + MMIO_MACDATA) ;
1153 printk(KERN_WARNING "%s: MacStatusError, details: ", dev->name);
1154 if (macstatus & (1<<14))
1155 printk(KERN_WARNING "tchk error: Unrecoverable error\n");
1156 if (macstatus & (1<<3))
1157 printk(KERN_WARNING "eint error: Internal watchdog timer expired\n");
1158 if (macstatus & (1<<2))
1159 printk(KERN_WARNING "aint error: Host tried to perform invalid operation\n");
1160 printk(KERN_WARNING "Instatus = %02x, macstatus = %02x\n",intstatus,macstatus) ;
1161 printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name);
1162 netif_stop_queue(dev) ;
1163 xl_freemem(dev) ;
1164 free_irq(dev->irq,dev);
1165 unregister_netdev(dev) ;
1166 free_netdev(dev) ;
1167 xl_reset(dev) ;
1168 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1169 spin_unlock(&xl_priv->xl_lock) ;
1170 return IRQ_HANDLED;
1171 }
1172 } else {
1173 printk(KERN_WARNING "%s: Received Unknown interrupt : %04x\n", dev->name, intstatus);
1174 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1175 }
1176 }
1177
1178 /* Turn interrupts back on */
1179
1180 writel( SETINDENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ;
1181 writel( SETINTENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ;
1182
1183 spin_unlock(&xl_priv->xl_lock) ;
1184 return IRQ_HANDLED;
1185}
1186
1187/*
1188 * Tx - Polling configuration
1189 */
1190
1191static netdev_tx_t xl_xmit(struct sk_buff *skb, struct net_device *dev)
1192{
1193 struct xl_private *xl_priv=netdev_priv(dev);
1194 struct xl_tx_desc *txd ;
1195 int tx_head, tx_tail, tx_prev ;
1196 unsigned long flags ;
1197
1198 spin_lock_irqsave(&xl_priv->xl_lock,flags) ;
1199
1200 netif_stop_queue(dev) ;
1201
1202 if (xl_priv->free_ring_entries > 1 ) {
1203 /*
1204 * Set up the descriptor for the packet
1205 */
1206 tx_head = xl_priv->tx_ring_head ;
1207 tx_tail = xl_priv->tx_ring_tail ;
1208
1209 txd = &(xl_priv->xl_tx_ring[tx_head]) ;
1210 txd->dnnextptr = 0 ;
1211 txd->framestartheader = cpu_to_le32(skb->len) | TXDNINDICATE;
1212 txd->buffer = cpu_to_le32(pci_map_single(xl_priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE));
1213 txd->buffer_length = cpu_to_le32(skb->len) | TXDNFRAGLAST;
1214 xl_priv->tx_ring_skb[tx_head] = skb ;
1215 dev->stats.tx_packets++ ;
1216 dev->stats.tx_bytes += skb->len ;
1217
1218 /*
1219 * Set the nextptr of the previous descriptor equal to this descriptor, add XL_TX_RING_SIZE -1
1220 * to ensure no negative numbers in unsigned locations.
1221 */
1222
1223 tx_prev = (xl_priv->tx_ring_head + XL_TX_RING_SIZE - 1) & (XL_TX_RING_SIZE - 1) ;
1224
1225 xl_priv->tx_ring_head++ ;
1226 xl_priv->tx_ring_head &= (XL_TX_RING_SIZE - 1) ;
1227 xl_priv->free_ring_entries-- ;
1228
1229 xl_priv->xl_tx_ring[tx_prev].dnnextptr = cpu_to_le32(xl_priv->tx_ring_dma_addr + (sizeof (struct xl_tx_desc) * tx_head));
1230
1231 /* Sneaky, by doing a read on DnListPtr we can force the card to poll on the DnNextPtr */
1232 /* readl(xl_mmio + MMIO_DNLISTPTR) ; */
1233
1234 netif_wake_queue(dev) ;
1235
1236 spin_unlock_irqrestore(&xl_priv->xl_lock,flags) ;
1237
1238 return NETDEV_TX_OK;
1239 } else {
1240 spin_unlock_irqrestore(&xl_priv->xl_lock,flags) ;
1241 return NETDEV_TX_BUSY;
1242 }
1243
1244}
1245
1246/*
1247 * The NIC has told us that a packet has been downloaded onto the card, we must
1248 * find out which packet it has done, clear the skb and information for the packet
1249 * then advance around the ring for all transmitted packets
1250 */
1251
1252static void xl_dn_comp(struct net_device *dev)
1253{
1254 struct xl_private *xl_priv=netdev_priv(dev);
1255 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1256 struct xl_tx_desc *txd ;
1257
1258
1259 if (xl_priv->tx_ring_tail == 255) {/* First time */
1260 xl_priv->xl_tx_ring[0].framestartheader = 0 ;
1261 xl_priv->xl_tx_ring[0].dnnextptr = 0 ;
1262 xl_priv->tx_ring_tail = 1 ;
1263 }
1264
1265 while (xl_priv->xl_tx_ring[xl_priv->tx_ring_tail].framestartheader & TXDNCOMPLETE ) {
1266 txd = &(xl_priv->xl_tx_ring[xl_priv->tx_ring_tail]) ;
1267 pci_unmap_single(xl_priv->pdev, le32_to_cpu(txd->buffer), xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]->len, PCI_DMA_TODEVICE);
1268 txd->framestartheader = 0 ;
1269 txd->buffer = cpu_to_le32(0xdeadbeef);
1270 txd->buffer_length = 0 ;
1271 dev_kfree_skb_irq(xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]) ;
1272 xl_priv->tx_ring_tail++ ;
1273 xl_priv->tx_ring_tail &= (XL_TX_RING_SIZE - 1) ;
1274 xl_priv->free_ring_entries++ ;
1275 }
1276
1277 netif_wake_queue(dev) ;
1278
1279 writel(ACK_INTERRUPT | DNCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
1280}
1281
1282/*
1283 * Close the adapter properly.
1284 * This srb reply cannot be handled from interrupt context as we have
1285 * to free the interrupt from the driver.
1286 */
1287
1288static int xl_close(struct net_device *dev)
1289{
1290 struct xl_private *xl_priv = netdev_priv(dev);
1291 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1292 unsigned long t ;
1293
1294 netif_stop_queue(dev) ;
1295
1296 /*
1297 * Close the adapter, need to stall the rx and tx queues.
1298 */
1299
1300 writew(DNSTALL, xl_mmio + MMIO_COMMAND) ;
1301 t=jiffies;
1302 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1303 schedule();
1304 if (time_after(jiffies, t + 10 * HZ)) {
1305 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNSTALL not responding.\n", dev->name);
1306 break ;
1307 }
1308 }
1309 writew(DNDISABLE, xl_mmio + MMIO_COMMAND) ;
1310 t=jiffies;
1311 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1312 schedule();
1313 if (time_after(jiffies, t + 10 * HZ)) {
1314 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNDISABLE not responding.\n", dev->name);
1315 break ;
1316 }
1317 }
1318 writew(UPSTALL, xl_mmio + MMIO_COMMAND) ;
1319 t=jiffies;
1320 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1321 schedule();
1322 if (time_after(jiffies, t + 10 * HZ)) {
1323 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPSTALL not responding.\n", dev->name);
1324 break ;
1325 }
1326 }
1327
1328 /* Turn off interrupts, we will still get the indication though
1329 * so we can trap it
1330 */
1331
1332 writel(SETINTENABLE, xl_mmio + MMIO_COMMAND) ;
1333
1334 xl_srb_cmd(dev,CLOSE_NIC) ;
1335
1336 t=jiffies;
1337 while (!(readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) {
1338 schedule();
1339 if (time_after(jiffies, t + 10 * HZ)) {
1340 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-CLOSENIC not responding.\n", dev->name);
1341 break ;
1342 }
1343 }
1344 /* Read the srb response from the adapter */
1345
1346 writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD);
1347 if (readb(xl_mmio + MMIO_MACDATA) != CLOSE_NIC) {
1348 printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response\n",dev->name);
1349 } else {
1350 writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1351 if (readb(xl_mmio + MMIO_MACDATA)==0) {
1352 printk(KERN_INFO "%s: Adapter has been closed\n",dev->name);
1353 writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1354
1355 xl_freemem(dev) ;
1356 free_irq(dev->irq,dev) ;
1357 } else {
1358 printk(KERN_INFO "%s: Close nic command returned error code %02x\n",dev->name, readb(xl_mmio + MMIO_MACDATA)) ;
1359 }
1360 }
1361
1362 /* Reset the upload and download logic */
1363
1364 writew(UPRESET, xl_mmio + MMIO_COMMAND) ;
1365 t=jiffies;
1366 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1367 schedule();
1368 if (time_after(jiffies, t + 10 * HZ)) {
1369 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPRESET not responding.\n", dev->name);
1370 break ;
1371 }
1372 }
1373 writew(DNRESET, xl_mmio + MMIO_COMMAND) ;
1374 t=jiffies;
1375 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1376 schedule();
1377 if (time_after(jiffies, t + 10 * HZ)) {
1378 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNRESET not responding.\n", dev->name);
1379 break ;
1380 }
1381 }
1382 xl_hw_reset(dev) ;
1383 return 0 ;
1384}
1385
1386static void xl_set_rx_mode(struct net_device *dev)
1387{
1388 struct xl_private *xl_priv = netdev_priv(dev);
1389 struct netdev_hw_addr *ha;
1390 unsigned char dev_mc_address[4] ;
1391 u16 options ;
1392
1393 if (dev->flags & IFF_PROMISC)
1394 options = 0x0004 ;
1395 else
1396 options = 0x0000 ;
1397
1398 if (options ^ xl_priv->xl_copy_all_options) { /* Changed, must send command */
1399 xl_priv->xl_copy_all_options = options ;
1400 xl_srb_cmd(dev, SET_RECEIVE_MODE) ;
1401 return ;
1402 }
1403
1404 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1405
1406 netdev_for_each_mc_addr(ha, dev) {
1407 dev_mc_address[0] |= ha->addr[2];
1408 dev_mc_address[1] |= ha->addr[3];
1409 dev_mc_address[2] |= ha->addr[4];
1410 dev_mc_address[3] |= ha->addr[5];
1411 }
1412
1413 if (memcmp(xl_priv->xl_functional_addr,dev_mc_address,4) != 0) { /* Options have changed, run the command */
1414 memcpy(xl_priv->xl_functional_addr, dev_mc_address,4) ;
1415 xl_srb_cmd(dev, SET_FUNC_ADDRESS) ;
1416 }
1417 return ;
1418}
1419
1420
1421/*
1422 * We issued an srb command and now we must read
1423 * the response from the completed command.
1424 */
1425
1426static void xl_srb_bh(struct net_device *dev)
1427{
1428 struct xl_private *xl_priv = netdev_priv(dev);
1429 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1430 u8 srb_cmd, ret_code ;
1431 int i ;
1432
1433 writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1434 srb_cmd = readb(xl_mmio + MMIO_MACDATA) ;
1435 writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1436 ret_code = readb(xl_mmio + MMIO_MACDATA) ;
1437
1438 /* Ret_code is standard across all commands */
1439
1440 switch (ret_code) {
1441 case 1:
1442 printk(KERN_INFO "%s: Command: %d - Invalid Command code\n",dev->name,srb_cmd) ;
1443 break ;
1444 case 4:
1445 printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command\n",dev->name,srb_cmd);
1446 break ;
1447
1448 case 6:
1449 printk(KERN_INFO "%s: Command: %d - Options Invalid for command\n",dev->name,srb_cmd);
1450 break ;
1451
1452 case 0: /* Successful command execution */
1453 switch (srb_cmd) {
1454 case READ_LOG: /* Returns 14 bytes of data from the NIC */
1455 if(xl_priv->xl_message_level)
1456 printk(KERN_INFO "%s: READ.LOG 14 bytes of data ",dev->name) ;
1457 /*
1458 * We still have to read the log even if message_level = 0 and we don't want
1459 * to see it
1460 */
1461 for (i=0;i<14;i++) {
1462 writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb | i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1463 if(xl_priv->xl_message_level)
1464 printk("%02x:",readb(xl_mmio + MMIO_MACDATA)) ;
1465 }
1466 printk("\n") ;
1467 break ;
1468 case SET_FUNC_ADDRESS:
1469 if(xl_priv->xl_message_level)
1470 printk(KERN_INFO "%s: Functional Address Set\n",dev->name);
1471 break ;
1472 case CLOSE_NIC:
1473 if(xl_priv->xl_message_level)
1474 printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler\n",dev->name);
1475 break ;
1476 case SET_MULTICAST_MODE:
1477 if(xl_priv->xl_message_level)
1478 printk(KERN_INFO "%s: Multicast options successfully changed\n",dev->name) ;
1479 break ;
1480 case SET_RECEIVE_MODE:
1481 if(xl_priv->xl_message_level) {
1482 if (xl_priv->xl_copy_all_options == 0x0004)
1483 printk(KERN_INFO "%s: Entering promiscuous mode\n", dev->name);
1484 else
1485 printk(KERN_INFO "%s: Entering normal receive mode\n",dev->name);
1486 }
1487 break ;
1488
1489 } /* switch */
1490 break ;
1491 } /* switch */
1492 return ;
1493}
1494
1495static int xl_set_mac_address (struct net_device *dev, void *addr)
1496{
1497 struct sockaddr *saddr = addr ;
1498 struct xl_private *xl_priv = netdev_priv(dev);
1499
1500 if (netif_running(dev)) {
1501 printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
1502 return -EIO ;
1503 }
1504
1505 memcpy(xl_priv->xl_laa, saddr->sa_data,dev->addr_len) ;
1506
1507 if (xl_priv->xl_message_level) {
1508 printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, xl_priv->xl_laa[0],
1509 xl_priv->xl_laa[1], xl_priv->xl_laa[2],
1510 xl_priv->xl_laa[3], xl_priv->xl_laa[4],
1511 xl_priv->xl_laa[5]);
1512 }
1513
1514 return 0 ;
1515}
1516
1517static void xl_arb_cmd(struct net_device *dev)
1518{
1519 struct xl_private *xl_priv = netdev_priv(dev);
1520 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1521 u8 arb_cmd ;
1522 u16 lan_status, lan_status_diff ;
1523
1524 writel( ( MEM_BYTE_READ | 0xD0000 | xl_priv->arb), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1525 arb_cmd = readb(xl_mmio + MMIO_MACDATA) ;
1526
1527 if (arb_cmd == RING_STATUS_CHANGE) { /* Ring.Status.Change */
1528 writel( ( (MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1529
1530 printk(KERN_INFO "%s: Ring Status Change: New Status = %04x\n", dev->name, swab16(readw(xl_mmio + MMIO_MACDATA) )) ;
1531
1532 lan_status = swab16(readw(xl_mmio + MMIO_MACDATA));
1533
1534 /* Acknowledge interrupt, this tells nic we are done with the arb */
1535 writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1536
1537 lan_status_diff = xl_priv->xl_lan_status ^ lan_status ;
1538
1539 if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) {
1540 if (lan_status_diff & LSC_LWF)
1541 printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
1542 if (lan_status_diff & LSC_ARW)
1543 printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
1544 if (lan_status_diff & LSC_FPE)
1545 printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
1546 if (lan_status_diff & LSC_RR)
1547 printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
1548
1549 /* Adapter has been closed by the hardware */
1550
1551 netif_stop_queue(dev);
1552 xl_freemem(dev) ;
1553 free_irq(dev->irq,dev);
1554
1555 printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
1556 } /* If serious error */
1557
1558 if (xl_priv->xl_message_level) {
1559 if (lan_status_diff & LSC_SIG_LOSS)
1560 printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
1561 if (lan_status_diff & LSC_HARD_ERR)
1562 printk(KERN_INFO "%s: Beaconing\n",dev->name);
1563 if (lan_status_diff & LSC_SOFT_ERR)
1564 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
1565 if (lan_status_diff & LSC_TRAN_BCN)
1566 printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
1567 if (lan_status_diff & LSC_SS)
1568 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1569 if (lan_status_diff & LSC_RING_REC)
1570 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
1571 if (lan_status_diff & LSC_FDX_MODE)
1572 printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
1573 }
1574
1575 if (lan_status_diff & LSC_CO) {
1576 if (xl_priv->xl_message_level)
1577 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
1578 /* Issue READ.LOG command */
1579 xl_srb_cmd(dev, READ_LOG) ;
1580 }
1581
1582 /* There is no command in the tech docs to issue the read_sr_counters */
1583 if (lan_status_diff & LSC_SR_CO) {
1584 if (xl_priv->xl_message_level)
1585 printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
1586 }
1587
1588 xl_priv->xl_lan_status = lan_status ;
1589
1590 } /* Lan.change.status */
1591 else if ( arb_cmd == RECEIVE_DATA) { /* Received.Data */
1592#if XL_DEBUG
1593 printk(KERN_INFO "Received.Data\n");
1594#endif
1595 writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1596 xl_priv->mac_buffer = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
1597
1598 /* Now we are going to be really basic here and not do anything
1599 * with the data at all. The tech docs do not give me enough
1600 * information to calculate the buffers properly so we're
1601 * just going to tell the nic that we've dealt with the frame
1602 * anyway.
1603 */
1604
1605 /* Acknowledge interrupt, this tells nic we are done with the arb */
1606 writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1607
1608 /* Is the ASB free ? */
1609
1610 xl_priv->asb_queued = 0 ;
1611 writel( ((MEM_BYTE_READ | 0xD0000 | xl_priv->asb) + 2), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1612 if (readb(xl_mmio + MMIO_MACDATA) != 0xff) {
1613 xl_priv->asb_queued = 1 ;
1614
1615 xl_wait_misr_flags(dev) ;
1616
1617 writel(MEM_BYTE_WRITE | MF_ASBFR, xl_mmio + MMIO_MAC_ACCESS_CMD);
1618 writeb(0xff, xl_mmio + MMIO_MACDATA) ;
1619 writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1620 writeb(MISR_ASBFR, xl_mmio + MMIO_MACDATA) ;
1621 return ;
1622 /* Drop out and wait for the bottom half to be run */
1623 }
1624
1625 xl_asb_cmd(dev) ;
1626
1627 } else {
1628 printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x\n",dev->name,arb_cmd);
1629 }
1630
1631 /* Acknowledge the arb interrupt */
1632
1633 writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
1634
1635 return ;
1636}
1637
1638
1639/*
1640 * There is only one asb command, but we can get called from different
1641 * places.
1642 */
1643
1644static void xl_asb_cmd(struct net_device *dev)
1645{
1646 struct xl_private *xl_priv = netdev_priv(dev);
1647 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1648
1649 if (xl_priv->asb_queued == 1)
1650 writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ;
1651
1652 writel(MEM_BYTE_WRITE | 0xd0000 | xl_priv->asb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1653 writeb(0x81, xl_mmio + MMIO_MACDATA) ;
1654
1655 writel(MEM_WORD_WRITE | 0xd0000 | xl_priv->asb | 6, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1656 writew(swab16(xl_priv->mac_buffer), xl_mmio + MMIO_MACDATA) ;
1657
1658 xl_wait_misr_flags(dev) ;
1659
1660 writel(MEM_BYTE_WRITE | MF_RASB, xl_mmio + MMIO_MAC_ACCESS_CMD);
1661 writeb(0xff, xl_mmio + MMIO_MACDATA) ;
1662
1663 writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1664 writeb(MISR_RASB, xl_mmio + MMIO_MACDATA) ;
1665
1666 xl_priv->asb_queued = 2 ;
1667
1668 return ;
1669}
1670
1671/*
1672 * This will only get called if there was an error
1673 * from the asb cmd.
1674 */
1675static void xl_asb_bh(struct net_device *dev)
1676{
1677 struct xl_private *xl_priv = netdev_priv(dev);
1678 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1679 u8 ret_code ;
1680
1681 writel(MMIO_BYTE_READ | 0xd0000 | xl_priv->asb | 2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1682 ret_code = readb(xl_mmio + MMIO_MACDATA) ;
1683 switch (ret_code) {
1684 case 0x01:
1685 printk(KERN_INFO "%s: ASB Command, unrecognized command code\n",dev->name);
1686 break ;
1687 case 0x26:
1688 printk(KERN_INFO "%s: ASB Command, unexpected receive buffer\n", dev->name);
1689 break ;
1690 case 0x40:
1691 printk(KERN_INFO "%s: ASB Command, Invalid Station ID\n", dev->name);
1692 break ;
1693 }
1694 xl_priv->asb_queued = 0 ;
1695 writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ;
1696 return ;
1697}
1698
1699/*
1700 * Issue srb commands to the nic
1701 */
1702
1703static void xl_srb_cmd(struct net_device *dev, int srb_cmd)
1704{
1705 struct xl_private *xl_priv = netdev_priv(dev);
1706 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1707
1708 switch (srb_cmd) {
1709 case READ_LOG:
1710 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1711 writeb(READ_LOG, xl_mmio + MMIO_MACDATA) ;
1712 break;
1713
1714 case CLOSE_NIC:
1715 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1716 writeb(CLOSE_NIC, xl_mmio + MMIO_MACDATA) ;
1717 break ;
1718
1719 case SET_RECEIVE_MODE:
1720 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1721 writeb(SET_RECEIVE_MODE, xl_mmio + MMIO_MACDATA) ;
1722 writel(MEM_WORD_WRITE | 0xD0000 | xl_priv->srb | 4, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1723 writew(xl_priv->xl_copy_all_options, xl_mmio + MMIO_MACDATA) ;
1724 break ;
1725
1726 case SET_FUNC_ADDRESS:
1727 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1728 writeb(SET_FUNC_ADDRESS, xl_mmio + MMIO_MACDATA) ;
1729 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 6 , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1730 writeb(xl_priv->xl_functional_addr[0], xl_mmio + MMIO_MACDATA) ;
1731 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 7 , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1732 writeb(xl_priv->xl_functional_addr[1], xl_mmio + MMIO_MACDATA) ;
1733 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 8 , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1734 writeb(xl_priv->xl_functional_addr[2], xl_mmio + MMIO_MACDATA) ;
1735 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 9 , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1736 writeb(xl_priv->xl_functional_addr[3], xl_mmio + MMIO_MACDATA) ;
1737 break ;
1738 } /* switch */
1739
1740
1741 xl_wait_misr_flags(dev) ;
1742
1743 /* Write 0xff to the CSRB flag */
1744 writel(MEM_BYTE_WRITE | MF_CSRB , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1745 writeb(0xFF, xl_mmio + MMIO_MACDATA) ;
1746 /* Set csrb bit in MISR register to process command */
1747 writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1748 writeb(MISR_CSRB, xl_mmio + MMIO_MACDATA) ;
1749 xl_priv->srb_queued = 1 ;
1750
1751 return ;
1752}
1753
1754/*
1755 * This is nasty, to use the MISR command you have to wait for 6 memory locations
1756 * to be zero. This is the way the driver does on other OS'es so we should be ok with
1757 * the empty loop.
1758 */
1759
1760static void xl_wait_misr_flags(struct net_device *dev)
1761{
1762 struct xl_private *xl_priv = netdev_priv(dev);
1763 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1764
1765 int i ;
1766
1767 writel(MMIO_BYTE_READ | MISR_RW, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1768 if (readb(xl_mmio + MMIO_MACDATA) != 0) { /* Misr not clear */
1769 for (i=0; i<6; i++) {
1770 writel(MEM_BYTE_READ | 0xDFFE0 | i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1771 while (readb(xl_mmio + MMIO_MACDATA) != 0) {
1772 ; /* Empty Loop */
1773 }
1774 }
1775 }
1776
1777 writel(MMIO_BYTE_WRITE | MISR_AND, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1778 writeb(0x80, xl_mmio + MMIO_MACDATA) ;
1779
1780 return ;
1781}
1782
1783/*
1784 * Change mtu size, this should work the same as olympic
1785 */
1786
1787static int xl_change_mtu(struct net_device *dev, int mtu)
1788{
1789 struct xl_private *xl_priv = netdev_priv(dev);
1790 u16 max_mtu ;
1791
1792 if (xl_priv->xl_ring_speed == 4)
1793 max_mtu = 4500 ;
1794 else
1795 max_mtu = 18000 ;
1796
1797 if (mtu > max_mtu)
1798 return -EINVAL ;
1799 if (mtu < 100)
1800 return -EINVAL ;
1801
1802 dev->mtu = mtu ;
1803 xl_priv->pkt_buf_sz = mtu + TR_HLEN ;
1804
1805 return 0 ;
1806}
1807
1808static void __devexit xl_remove_one (struct pci_dev *pdev)
1809{
1810 struct net_device *dev = pci_get_drvdata(pdev);
1811 struct xl_private *xl_priv=netdev_priv(dev);
1812
1813 release_firmware(xl_priv->fw);
1814 unregister_netdev(dev);
1815 iounmap(xl_priv->xl_mmio) ;
1816 pci_release_regions(pdev) ;
1817 pci_set_drvdata(pdev,NULL) ;
1818 free_netdev(dev);
1819 return ;
1820}
1821
1822static struct pci_driver xl_3c359_driver = {
1823 .name = "3c359",
1824 .id_table = xl_pci_tbl,
1825 .probe = xl_probe,
1826 .remove = __devexit_p(xl_remove_one),
1827};
1828
1829static int __init xl_pci_init (void)
1830{
1831 return pci_register_driver(&xl_3c359_driver);
1832}
1833
1834
1835static void __exit xl_pci_cleanup (void)
1836{
1837 pci_unregister_driver (&xl_3c359_driver);
1838}
1839
1840module_init(xl_pci_init);
1841module_exit(xl_pci_cleanup);
1842
1843MODULE_LICENSE("GPL") ;
diff --git a/drivers/net/tokenring/3c359.h b/drivers/net/tokenring/3c359.h
deleted file mode 100644
index bcb1a6b4a4c7..000000000000
--- a/drivers/net/tokenring/3c359.h
+++ /dev/null
@@ -1,291 +0,0 @@
1/*
2 * 3c359.h (c) 2000 Mike Phillips (mikep@linuxtr.net) All Rights Reserved
3 *
4 * Linux driver for 3Com 3C359 Token Link PCI XL cards.
5 *
6 * This software may be used and distributed according to the terms
7 * of the GNU General Public License Version 2 or (at your option)
8 * any later verion, incorporated herein by reference.
9 */
10
11/* Memory Access Commands */
12#define IO_BYTE_READ 0x28 << 24
13#define IO_BYTE_WRITE 0x18 << 24
14#define IO_WORD_READ 0x20 << 24
15#define IO_WORD_WRITE 0x10 << 24
16#define MMIO_BYTE_READ 0x88 << 24
17#define MMIO_BYTE_WRITE 0x48 << 24
18#define MMIO_WORD_READ 0x80 << 24
19#define MMIO_WORD_WRITE 0x40 << 24
20#define MEM_BYTE_READ 0x8C << 24
21#define MEM_BYTE_WRITE 0x4C << 24
22#define MEM_WORD_READ 0x84 << 24
23#define MEM_WORD_WRITE 0x44 << 24
24
25#define PMBAR 0x1C80
26#define PMB_CPHOLD (1<<10)
27
28#define CPATTENTION 0x180D
29#define CPA_PMBARVIS (1<<7)
30#define CPA_MEMWREN (1<<6)
31
32#define SWITCHSETTINGS 0x1C88
33#define EECONTROL 0x1C8A
34#define EEDATA 0x1C8C
35#define EEREAD 0x0080
36#define EEWRITE 0x0040
37#define EEERASE 0x0060
38#define EE_ENABLE_WRITE 0x0030
39#define EEBUSY (1<<15)
40
41#define WRBR 0xCDE02
42#define WWOR 0xCDE04
43#define WWCR 0xCDE06
44#define MACSTATUS 0xCDE08
45#define MISR_RW 0xCDE0B
46#define MISR_AND 0xCDE2B
47#define MISR_SET 0xCDE4B
48#define RXBUFAREA 0xCDE10
49#define RXEARLYTHRESH 0xCDE12
50#define TXSTARTTHRESH 0x58
51#define DNPRIREQTHRESH 0x2C
52
53#define MISR_CSRB (1<<5)
54#define MISR_RASB (1<<4)
55#define MISR_SRBFR (1<<3)
56#define MISR_ASBFR (1<<2)
57#define MISR_ARBF (1<<1)
58
59/* MISR Flags memory locations */
60#define MF_SSBF 0xDFFE0
61#define MF_ARBF 0xDFFE1
62#define MF_ASBFR 0xDFFE2
63#define MF_SRBFR 0xDFFE3
64#define MF_RASB 0xDFFE4
65#define MF_CSRB 0xDFFE5
66
67#define MMIO_MACDATA 0x10
68#define MMIO_MAC_ACCESS_CMD 0x14
69#define MMIO_TIMER 0x1A
70#define MMIO_DMA_CTRL 0x20
71#define MMIO_DNLISTPTR 0x24
72#define MMIO_HASHFILTER 0x28
73#define MMIO_CONFIG 0x29
74#define MMIO_DNPRIREQTHRESH 0x2C
75#define MMIO_DNPOLL 0x2D
76#define MMIO_UPPKTSTATUS 0x30
77#define MMIO_FREETIMER 0x34
78#define MMIO_COUNTDOWN 0x36
79#define MMIO_UPLISTPTR 0x38
80#define MMIO_UPPOLL 0x3C
81#define MMIO_UPBURSTTHRESH 0x40
82#define MMIO_DNBURSTTHRESH 0x41
83#define MMIO_INTSTATUS_AUTO 0x56
84#define MMIO_TXSTARTTHRESH 0x58
85#define MMIO_INTERRUPTENABLE 0x5A
86#define MMIO_INDICATIONENABLE 0x5C
87#define MMIO_COMMAND 0x5E /* These two are meant to be the same */
88#define MMIO_INTSTATUS 0x5E /* Makes the code more readable this way */
89#define INTSTAT_CMD_IN_PROGRESS (1<<12)
90#define INTSTAT_SRB (1<<14)
91#define INTSTAT_INTLATCH (1<<0)
92
93/* Indication / Interrupt Mask
94 * Annoyingly the bits to be set in the indication and interrupt enable
95 * do not match with the actual bits received in the interrupt, although
96 * they are in the same order.
97 * The mapping for the indication / interrupt are:
98 * Bit Indication / Interrupt
99 * 0 HostError
100 * 1 txcomplete
101 * 2 updneeded
102 * 3 rxcomplete
103 * 4 intrequested
104 * 5 macerror
105 * 6 dncomplete
106 * 7 upcomplete
107 * 8 txunderrun
108 * 9 asbf
109 * 10 srbr
110 * 11 arbc
111 *
112 * The only ones we don't want to receive are txcomplete and rxcomplete
113 * we use dncomplete and upcomplete instead.
114 */
115
116#define INT_MASK 0xFF5
117
118/* Note the subtle difference here, IND and INT */
119
120#define SETINDENABLE (8<<12)
121#define SETINTENABLE (7<<12)
122#define SRBBIT (1<<10)
123#define ASBBIT (1<<9)
124#define ARBBIT (1<<11)
125
126#define SRB 0xDFE90
127#define ASB 0xDFED0
128#define ARB 0xD0000
129#define SCRATCH 0xDFEF0
130
131#define INT_REQUEST 0x6000 /* (6 << 12) */
132#define ACK_INTERRUPT 0x6800 /* (13 <<11) */
133#define GLOBAL_RESET 0x00
134#define DNDISABLE 0x5000
135#define DNENABLE 0x4800
136#define DNSTALL 0x3002
137#define DNRESET 0x5800
138#define DNUNSTALL 0x3003
139#define UPRESET 0x2800
140#define UPSTALL 0x3000
141#define UPUNSTALL 0x3001
142#define SETCONFIG 0x4000
143#define SETTXSTARTTHRESH 0x9800
144
145/* Received Interrupts */
146#define ASBFINT (1<<13)
147#define SRBRINT (1<<14)
148#define ARBCINT (1<<15)
149#define TXUNDERRUN (1<<11)
150
151#define UPCOMPINT (1<<10)
152#define DNCOMPINT (1<<9)
153#define HARDERRINT (1<<7)
154#define RXCOMPLETE (1<<4)
155#define TXCOMPINT (1<<2)
156#define HOSTERRINT (1<<1)
157
158/* Receive descriptor bits */
159#define RXOVERRUN cpu_to_le32(1<<19)
160#define RXFC cpu_to_le32(1<<21)
161#define RXAR cpu_to_le32(1<<22)
162#define RXUPDCOMPLETE cpu_to_le32(1<<23)
163#define RXUPDFULL cpu_to_le32(1<<24)
164#define RXUPLASTFRAG cpu_to_le32(1<<31)
165
166/* Transmit descriptor bits */
167#define TXDNCOMPLETE cpu_to_le32(1<<16)
168#define TXTXINDICATE cpu_to_le32(1<<27)
169#define TXDPDEMPTY cpu_to_le32(1<<29)
170#define TXDNINDICATE cpu_to_le32(1<<31)
171#define TXDNFRAGLAST cpu_to_le32(1<<31)
172
173/* Interrupts to Acknowledge */
174#define LATCH_ACK 1
175#define TXCOMPACK (1<<1)
176#define INTREQACK (1<<2)
177#define DNCOMPACK (1<<3)
178#define UPCOMPACK (1<<4)
179#define ASBFACK (1<<5)
180#define SRBRACK (1<<6)
181#define ARBCACK (1<<7)
182
183#define XL_IO_SPACE 128
184#define SRB_COMMAND_SIZE 50
185
186/* Adapter Commands */
187#define REQUEST_INT 0x00
188#define MODIFY_OPEN_PARMS 0x01
189#define RESTORE_OPEN_PARMS 0x02
190#define OPEN_NIC 0x03
191#define CLOSE_NIC 0x04
192#define SET_SLEEP_MODE 0x05
193#define SET_GROUP_ADDRESS 0x06
194#define SET_FUNC_ADDRESS 0x07
195#define READ_LOG 0x08
196#define SET_MULTICAST_MODE 0x0C
197#define CHANGE_WAKEUP_PATTERN 0x0D
198#define GET_STATISTICS 0x13
199#define SET_RECEIVE_MODE 0x1F
200
201/* ARB Commands */
202#define RECEIVE_DATA 0x81
203#define RING_STATUS_CHANGE 0x84
204
205/* ASB Commands */
206#define ASB_RECEIVE_DATE 0x81
207
208/* Defines for LAN STATUS CHANGE reports */
209#define LSC_SIG_LOSS 0x8000
210#define LSC_HARD_ERR 0x4000
211#define LSC_SOFT_ERR 0x2000
212#define LSC_TRAN_BCN 0x1000
213#define LSC_LWF 0x0800
214#define LSC_ARW 0x0400
215#define LSC_FPE 0x0200
216#define LSC_RR 0x0100
217#define LSC_CO 0x0080
218#define LSC_SS 0x0040
219#define LSC_RING_REC 0x0020
220#define LSC_SR_CO 0x0010
221#define LSC_FDX_MODE 0x0004
222
223#define XL_MAX_ADAPTERS 8 /* 0x08 __MODULE_STRING can't hand 0xnn */
224
225/* 3c359 defaults for buffers */
226
227#define XL_RX_RING_SIZE 16 /* must be a power of 2 */
228#define XL_TX_RING_SIZE 16 /* must be a power of 2 */
229
230#define PKT_BUF_SZ 4096 /* Default packet size */
231
232/* 3c359 data structures */
233
234struct xl_tx_desc {
235 __le32 dnnextptr;
236 __le32 framestartheader;
237 __le32 buffer;
238 __le32 buffer_length;
239};
240
241struct xl_rx_desc {
242 __le32 upnextptr;
243 __le32 framestatus;
244 __le32 upfragaddr;
245 __le32 upfraglen;
246};
247
248struct xl_private {
249
250
251 /* These two structures must be aligned on 8 byte boundaries */
252
253 /* struct xl_rx_desc xl_rx_ring[XL_RX_RING_SIZE]; */
254 /* struct xl_tx_desc xl_tx_ring[XL_TX_RING_SIZE]; */
255 struct xl_rx_desc *xl_rx_ring ;
256 struct xl_tx_desc *xl_tx_ring ;
257 struct sk_buff *tx_ring_skb[XL_TX_RING_SIZE], *rx_ring_skb[XL_RX_RING_SIZE];
258 int tx_ring_head, tx_ring_tail ;
259 int rx_ring_tail, rx_ring_no ;
260 int free_ring_entries ;
261
262 u16 srb;
263 u16 arb;
264 u16 asb;
265
266 u8 __iomem *xl_mmio;
267 const char *xl_card_name;
268 struct pci_dev *pdev ;
269
270 spinlock_t xl_lock ;
271
272 volatile int srb_queued;
273 struct wait_queue *srb_wait;
274 volatile int asb_queued;
275
276 u16 mac_buffer ;
277 u16 xl_lan_status ;
278 u8 xl_ring_speed ;
279 u16 pkt_buf_sz ;
280 u8 xl_message_level;
281 u16 xl_copy_all_options ;
282 unsigned char xl_functional_addr[4] ;
283 u16 xl_addr_table_addr, xl_parms_addr ;
284 u8 xl_laa[6] ;
285 u32 rx_ring_dma_addr ;
286 u32 tx_ring_dma_addr ;
287
288 /* firmware section */
289 const struct firmware *fw;
290};
291
diff --git a/drivers/net/tokenring/Kconfig b/drivers/net/tokenring/Kconfig
deleted file mode 100644
index 45550d42b368..000000000000
--- a/drivers/net/tokenring/Kconfig
+++ /dev/null
@@ -1,199 +0,0 @@
1#
2# Token Ring driver configuration
3#
4
5# So far, we only have PCI, ISA, and MCA token ring devices
6menuconfig TR
7 bool "Token Ring driver support"
8 depends on NETDEVICES && !UML
9 depends on (PCI || ISA || MCA || CCW || PCMCIA)
10 help
11 Token Ring is IBM's way of communication on a local network; the
12 rest of the world uses Ethernet. To participate on a Token Ring
13 network, you need a special Token ring network card. If you are
14 connected to such a Token Ring network and want to use your Token
15 Ring card under Linux, say Y here and to the driver for your
16 particular card below and read the Token-Ring mini-HOWTO, available
17 from <http://www.tldp.org/docs.html#howto>. Most people can
18 say N here.
19
20if TR
21
22config WANT_LLC
23 def_bool y
24 select LLC
25
26config PCMCIA_IBMTR
27 tristate "IBM PCMCIA tokenring adapter support"
28 depends on IBMTR!=y && PCMCIA
29 ---help---
30 Say Y here if you intend to attach this type of Token Ring PCMCIA
31 card to your computer. You then also need to say Y to "Token Ring
32 driver support".
33
34 To compile this driver as a module, choose M here: the module will be
35 called ibmtr_cs.
36
37config IBMTR
38 tristate "IBM Tropic chipset based adapter support"
39 depends on ISA || MCA
40 ---help---
41 This is support for all IBM Token Ring cards that don't use DMA. If
42 you have such a beast, say Y and read the Token-Ring mini-HOWTO,
43 available from <http://www.tldp.org/docs.html#howto>.
44
45 Warning: this driver will almost definitely fail if more than one
46 active Token Ring card is present.
47
48 To compile this driver as a module, choose M here: the module will be
49 called ibmtr.
50
51config IBMOL
52 tristate "IBM Olympic chipset PCI adapter support"
53 depends on PCI
54 ---help---
55 This is support for all non-Lanstreamer IBM PCI Token Ring Cards.
56 Specifically this is all IBM PCI, PCI Wake On Lan, PCI II, PCI II
57 Wake On Lan, and PCI 100/16/4 adapters.
58
59 If you have such an adapter, say Y and read the Token-Ring
60 mini-HOWTO, available from <http://www.tldp.org/docs.html#howto>.
61
62 To compile this driver as a module, choose M here: the module will be
63 called olympic.
64
65 Also read <file:Documentation/networking/olympic.txt> or check the
66 Linux Token Ring Project site for the latest information at
67 <http://www.linuxtr.net/>.
68
69config IBMLS
70 tristate "IBM Lanstreamer chipset PCI adapter support"
71 depends on PCI && !64BIT
72 help
73 This is support for IBM Lanstreamer PCI Token Ring Cards.
74
75 If you have such an adapter, say Y and read the Token-Ring
76 mini-HOWTO, available from <http://www.tldp.org/docs.html#howto>.
77
78 To compile this driver as a module, choose M here: the module will be
79 called lanstreamer.
80
81config 3C359
82 tristate "3Com 3C359 Token Link Velocity XL adapter support"
83 depends on PCI
84 ---help---
85 This is support for the 3Com PCI Velocity XL cards, specifically
86 the 3Com 3C359, please note this is not for the 3C339 cards, you
87 should use the tms380 driver instead.
88
89 If you have such an adapter, say Y and read the Token-Ring
90 mini-HOWTO, available from <http://www.tldp.org/docs.html#howto>.
91
92 To compile this driver as a module, choose M here: the module will be
93 called 3c359.
94
95 Also read the file <file:Documentation/networking/3c359.txt> or check the
96 Linux Token Ring Project site for the latest information at
97 <http://www.linuxtr.net>
98
99config TMS380TR
100 tristate "Generic TMS380 Token Ring ISA/PCI adapter support"
101 depends on PCI || ISA && ISA_DMA_API || MCA
102 select FW_LOADER
103 ---help---
104 This driver provides generic support for token ring adapters
105 based on the Texas Instruments TMS380 series chipsets. This
106 includes the SysKonnect TR4/16(+) ISA (SK-4190), SysKonnect
107 TR4/16(+) PCI (SK-4590), SysKonnect TR4/16 PCI (SK-4591),
108 Compaq 4/16 PCI, Thomas-Conrad TC4048 4/16 PCI, and several
109 Madge adapters. If you say Y here, you will be asked to select
110 which cards to support below. If you're using modules, each
111 class of card will be supported by a separate module.
112
113 If you have such an adapter and would like to use it, say Y and
114 read the Token-Ring mini-HOWTO, available from
115 <http://www.tldp.org/docs.html#howto>.
116
117 Also read the file <file:Documentation/networking/tms380tr.txt> or
118 check <http://www.auk.cx/tms380tr/>.
119
120 To compile this driver as a module, choose M here: the module will be
121 called tms380tr.
122
123config TMSPCI
124 tristate "Generic TMS380 PCI support"
125 depends on TMS380TR && PCI
126 ---help---
127 This tms380 module supports generic TMS380-based PCI cards.
128
129 These cards are known to work:
130 - Compaq 4/16 TR PCI
131 - SysKonnect TR4/16 PCI (SK-4590/SK-4591)
132 - Thomas-Conrad TC4048 PCI 4/16
133 - 3Com Token Link Velocity
134
135 To compile this driver as a module, choose M here: the module will be
136 called tmspci.
137
138config SKISA
139 tristate "SysKonnect TR4/16 ISA support"
140 depends on TMS380TR && ISA
141 help
142 This tms380 module supports SysKonnect TR4/16 ISA cards.
143
144 These cards are known to work:
145 - SysKonnect TR4/16 ISA (SK-4190)
146
147 To compile this driver as a module, choose M here: the module will be
148 called skisa.
149
150config PROTEON
151 tristate "Proteon ISA support"
152 depends on TMS380TR && ISA
153 help
154 This tms380 module supports Proteon ISA cards.
155
156 These cards are known to work:
157 - Proteon 1392
158 - Proteon 1392 plus
159
160 To compile this driver as a module, choose M here: the module will be
161 called proteon.
162
163config ABYSS
164 tristate "Madge Smart 16/4 PCI Mk2 support"
165 depends on TMS380TR && PCI
166 help
167 This tms380 module supports the Madge Smart 16/4 PCI Mk2
168 cards (51-02).
169
170 To compile this driver as a module, choose M here: the module will be
171 called abyss.
172
173config MADGEMC
174 tristate "Madge Smart 16/4 Ringnode MicroChannel"
175 depends on TMS380TR && MCA
176 help
177 This tms380 module supports the Madge Smart 16/4 MC16 and MC32
178 MicroChannel adapters.
179
180 To compile this driver as a module, choose M here: the module will be
181 called madgemc.
182
183config SMCTR
184 tristate "SMC ISA/MCA adapter support"
185 depends on (ISA || MCA_LEGACY) && (BROKEN || !64BIT)
186 ---help---
187 This is support for the ISA and MCA SMC Token Ring cards,
188 specifically SMC TokenCard Elite (8115T) and SMC TokenCard Elite/A
189 (8115T/A) adapters.
190
191 If you have such an adapter and would like to use it, say Y or M and
192 read the Token-Ring mini-HOWTO, available from
193 <http://www.tldp.org/docs.html#howto> and the file
194 <file:Documentation/networking/smctr.txt>.
195
196 To compile this driver as a module, choose M here: the module will be
197 called smctr.
198
199endif # TR
diff --git a/drivers/net/tokenring/Makefile b/drivers/net/tokenring/Makefile
deleted file mode 100644
index f1be8d97b7a8..000000000000
--- a/drivers/net/tokenring/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
1#
2# Makefile for drivers/net/tokenring
3#
4
5obj-$(CONFIG_PCMCIA_IBMTR) += ibmtr_cs.o
6obj-$(CONFIG_IBMTR) += ibmtr.o
7obj-$(CONFIG_IBMOL) += olympic.o
8obj-$(CONFIG_IBMLS) += lanstreamer.o
9obj-$(CONFIG_TMS380TR) += tms380tr.o
10obj-$(CONFIG_ABYSS) += abyss.o
11obj-$(CONFIG_MADGEMC) += madgemc.o
12obj-$(CONFIG_PROTEON) += proteon.o
13obj-$(CONFIG_TMSPCI) += tmspci.o
14obj-$(CONFIG_SKISA) += skisa.o
15obj-$(CONFIG_SMCTR) += smctr.o
16obj-$(CONFIG_3C359) += 3c359.o
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
deleted file mode 100644
index b715e6b444da..000000000000
--- a/drivers/net/tokenring/abyss.c
+++ /dev/null
@@ -1,468 +0,0 @@
1/*
2 * abyss.c: Network driver for the Madge Smart 16/4 PCI Mk2 token ring card.
3 *
4 * Written 1999-2000 by Adam Fritzler
5 *
6 * This software may be used and distributed according to the terms
7 * of the GNU General Public License, incorporated herein by reference.
8 *
9 * This driver module supports the following cards:
10 * - Madge Smart 16/4 PCI Mk2
11 *
12 * Maintainer(s):
13 * AF Adam Fritzler
14 *
15 * Modification History:
16 * 30-Dec-99 AF Split off from the tms380tr driver.
17 * 22-Jan-00 AF Updated to use indirect read/writes
18 * 23-Nov-00 JG New PCI API, cleanups
19 *
20 *
21 * TODO:
22 * 1. See if we can use MMIO instead of inb/outb/inw/outw
23 * 2. Add support for Mk1 (has AT24 attached to the PCI
24 * config registers)
25 *
26 */
27
28#include <linux/module.h>
29#include <linux/kernel.h>
30#include <linux/errno.h>
31#include <linux/pci.h>
32#include <linux/init.h>
33#include <linux/netdevice.h>
34#include <linux/trdevice.h>
35
36#include <asm/io.h>
37#include <asm/irq.h>
38
39#include "tms380tr.h"
40#include "abyss.h" /* Madge-specific constants */
41
42static char version[] __devinitdata =
43"abyss.c: v1.02 23/11/2000 by Adam Fritzler\n";
44
45#define ABYSS_IO_EXTENT 64
46
47static DEFINE_PCI_DEVICE_TABLE(abyss_pci_tbl) = {
48 { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_MK2,
49 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_TOKEN_RING << 8, 0x00ffffff, },
50 { } /* Terminating entry */
51};
52MODULE_DEVICE_TABLE(pci, abyss_pci_tbl);
53
54MODULE_LICENSE("GPL");
55
56static int abyss_open(struct net_device *dev);
57static int abyss_close(struct net_device *dev);
58static void abyss_enable(struct net_device *dev);
59static int abyss_chipset_init(struct net_device *dev);
60static void abyss_read_eeprom(struct net_device *dev);
61static unsigned short abyss_setnselout_pins(struct net_device *dev);
62
63static void at24_writedatabyte(unsigned long regaddr, unsigned char byte);
64static int at24_sendfullcmd(unsigned long regaddr, unsigned char cmd, unsigned char addr);
65static int at24_sendcmd(unsigned long regaddr, unsigned char cmd);
66static unsigned char at24_readdatabit(unsigned long regaddr);
67static unsigned char at24_readdatabyte(unsigned long regaddr);
68static int at24_waitforack(unsigned long regaddr);
69static int at24_waitfornack(unsigned long regaddr);
70static void at24_setlines(unsigned long regaddr, unsigned char clock, unsigned char data);
71static void at24_start(unsigned long regaddr);
72static unsigned char at24_readb(unsigned long regaddr, unsigned char addr);
73
74static unsigned short abyss_sifreadb(struct net_device *dev, unsigned short reg)
75{
76 return inb(dev->base_addr + reg);
77}
78
79static unsigned short abyss_sifreadw(struct net_device *dev, unsigned short reg)
80{
81 return inw(dev->base_addr + reg);
82}
83
84static void abyss_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
85{
86 outb(val, dev->base_addr + reg);
87}
88
89static void abyss_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
90{
91 outw(val, dev->base_addr + reg);
92}
93
94static struct net_device_ops abyss_netdev_ops;
95
96static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_id *ent)
97{
98 static int versionprinted;
99 struct net_device *dev;
100 struct net_local *tp;
101 int ret, pci_irq_line;
102 unsigned long pci_ioaddr;
103
104 if (versionprinted++ == 0)
105 printk("%s", version);
106
107 if (pci_enable_device(pdev))
108 return -EIO;
109
110 /* Remove I/O space marker in bit 0. */
111 pci_irq_line = pdev->irq;
112 pci_ioaddr = pci_resource_start (pdev, 0);
113
114 /* At this point we have found a valid card. */
115
116 dev = alloc_trdev(sizeof(struct net_local));
117 if (!dev)
118 return -ENOMEM;
119
120 if (!request_region(pci_ioaddr, ABYSS_IO_EXTENT, dev->name)) {
121 ret = -EBUSY;
122 goto err_out_trdev;
123 }
124
125 ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED,
126 dev->name, dev);
127 if (ret)
128 goto err_out_region;
129
130 dev->base_addr = pci_ioaddr;
131 dev->irq = pci_irq_line;
132
133 printk("%s: Madge Smart 16/4 PCI Mk2 (Abyss)\n", dev->name);
134 printk("%s: IO: %#4lx IRQ: %d\n",
135 dev->name, pci_ioaddr, dev->irq);
136 /*
137 * The TMS SIF registers lay 0x10 above the card base address.
138 */
139 dev->base_addr += 0x10;
140
141 ret = tmsdev_init(dev, &pdev->dev);
142 if (ret) {
143 printk("%s: unable to get memory for dev->priv.\n",
144 dev->name);
145 goto err_out_irq;
146 }
147
148 abyss_read_eeprom(dev);
149
150 printk("%s: Ring Station Address: %pM\n", dev->name, dev->dev_addr);
151
152 tp = netdev_priv(dev);
153 tp->setnselout = abyss_setnselout_pins;
154 tp->sifreadb = abyss_sifreadb;
155 tp->sifreadw = abyss_sifreadw;
156 tp->sifwriteb = abyss_sifwriteb;
157 tp->sifwritew = abyss_sifwritew;
158
159 memcpy(tp->ProductID, "Madge PCI 16/4 Mk2", PROD_ID_SIZE + 1);
160
161 dev->netdev_ops = &abyss_netdev_ops;
162
163 pci_set_drvdata(pdev, dev);
164 SET_NETDEV_DEV(dev, &pdev->dev);
165
166 ret = register_netdev(dev);
167 if (ret)
168 goto err_out_tmsdev;
169 return 0;
170
171err_out_tmsdev:
172 pci_set_drvdata(pdev, NULL);
173 tmsdev_term(dev);
174err_out_irq:
175 free_irq(pdev->irq, dev);
176err_out_region:
177 release_region(pci_ioaddr, ABYSS_IO_EXTENT);
178err_out_trdev:
179 free_netdev(dev);
180 return ret;
181}
182
183static unsigned short abyss_setnselout_pins(struct net_device *dev)
184{
185 unsigned short val = 0;
186 struct net_local *tp = netdev_priv(dev);
187
188 if(tp->DataRate == SPEED_4)
189 val |= 0x01; /* Set 4Mbps */
190 else
191 val |= 0x00; /* Set 16Mbps */
192
193 return val;
194}
195
196/*
197 * The following Madge boards should use this code:
198 * - Smart 16/4 PCI Mk2 (Abyss)
199 * - Smart 16/4 PCI Mk1 (PCI T)
200 * - Smart 16/4 Client Plus PnP (Big Apple)
201 * - Smart 16/4 Cardbus Mk2
202 *
203 * These access an Atmel AT24 SEEPROM using their glue chip registers.
204 *
205 */
206static void at24_writedatabyte(unsigned long regaddr, unsigned char byte)
207{
208 int i;
209
210 for (i = 0; i < 8; i++) {
211 at24_setlines(regaddr, 0, (byte >> (7-i))&0x01);
212 at24_setlines(regaddr, 1, (byte >> (7-i))&0x01);
213 at24_setlines(regaddr, 0, (byte >> (7-i))&0x01);
214 }
215}
216
217static int at24_sendfullcmd(unsigned long regaddr, unsigned char cmd, unsigned char addr)
218{
219 if (at24_sendcmd(regaddr, cmd)) {
220 at24_writedatabyte(regaddr, addr);
221 return at24_waitforack(regaddr);
222 }
223 return 0;
224}
225
226static int at24_sendcmd(unsigned long regaddr, unsigned char cmd)
227{
228 int i;
229
230 for (i = 0; i < 10; i++) {
231 at24_start(regaddr);
232 at24_writedatabyte(regaddr, cmd);
233 if (at24_waitforack(regaddr))
234 return 1;
235 }
236 return 0;
237}
238
239static unsigned char at24_readdatabit(unsigned long regaddr)
240{
241 unsigned char val;
242
243 at24_setlines(regaddr, 0, 1);
244 at24_setlines(regaddr, 1, 1);
245 val = (inb(regaddr) & AT24_DATA)?1:0;
246 at24_setlines(regaddr, 1, 1);
247 at24_setlines(regaddr, 0, 1);
248 return val;
249}
250
251static unsigned char at24_readdatabyte(unsigned long regaddr)
252{
253 unsigned char data = 0;
254 int i;
255
256 for (i = 0; i < 8; i++) {
257 data <<= 1;
258 data |= at24_readdatabit(regaddr);
259 }
260
261 return data;
262}
263
264static int at24_waitforack(unsigned long regaddr)
265{
266 int i;
267
268 for (i = 0; i < 10; i++) {
269 if ((at24_readdatabit(regaddr) & 0x01) == 0x00)
270 return 1;
271 }
272 return 0;
273}
274
275static int at24_waitfornack(unsigned long regaddr)
276{
277 int i;
278 for (i = 0; i < 10; i++) {
279 if ((at24_readdatabit(regaddr) & 0x01) == 0x01)
280 return 1;
281 }
282 return 0;
283}
284
285static void at24_setlines(unsigned long regaddr, unsigned char clock, unsigned char data)
286{
287 unsigned char val = AT24_ENABLE;
288 if (clock)
289 val |= AT24_CLOCK;
290 if (data)
291 val |= AT24_DATA;
292
293 outb(val, regaddr);
294 tms380tr_wait(20); /* Very necessary. */
295}
296
297static void at24_start(unsigned long regaddr)
298{
299 at24_setlines(regaddr, 0, 1);
300 at24_setlines(regaddr, 1, 1);
301 at24_setlines(regaddr, 1, 0);
302 at24_setlines(regaddr, 0, 1);
303}
304
305static unsigned char at24_readb(unsigned long regaddr, unsigned char addr)
306{
307 unsigned char data = 0xff;
308
309 if (at24_sendfullcmd(regaddr, AT24_WRITE, addr)) {
310 if (at24_sendcmd(regaddr, AT24_READ)) {
311 data = at24_readdatabyte(regaddr);
312 if (!at24_waitfornack(regaddr))
313 data = 0xff;
314 }
315 }
316 return data;
317}
318
319
320/*
321 * Enable basic functions of the Madge chipset needed
322 * for initialization.
323 */
324static void abyss_enable(struct net_device *dev)
325{
326 unsigned char reset_reg;
327 unsigned long ioaddr;
328
329 ioaddr = dev->base_addr;
330 reset_reg = inb(ioaddr + PCIBM2_RESET_REG);
331 reset_reg |= PCIBM2_RESET_REG_CHIP_NRES;
332 outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
333 tms380tr_wait(100);
334}
335
336/*
337 * Enable the functions of the Madge chipset needed for
338 * full working order.
339 */
340static int abyss_chipset_init(struct net_device *dev)
341{
342 unsigned char reset_reg;
343 unsigned long ioaddr;
344
345 ioaddr = dev->base_addr;
346
347 reset_reg = inb(ioaddr + PCIBM2_RESET_REG);
348
349 reset_reg |= PCIBM2_RESET_REG_CHIP_NRES;
350 outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
351
352 reset_reg &= ~(PCIBM2_RESET_REG_CHIP_NRES |
353 PCIBM2_RESET_REG_FIFO_NRES |
354 PCIBM2_RESET_REG_SIF_NRES);
355 outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
356
357 tms380tr_wait(100);
358
359 reset_reg |= PCIBM2_RESET_REG_CHIP_NRES;
360 outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
361
362 reset_reg |= PCIBM2_RESET_REG_SIF_NRES;
363 outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
364
365 reset_reg |= PCIBM2_RESET_REG_FIFO_NRES;
366 outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
367
368 outb(PCIBM2_INT_CONTROL_REG_SINTEN |
369 PCIBM2_INT_CONTROL_REG_PCI_ERR_ENABLE,
370 ioaddr + PCIBM2_INT_CONTROL_REG);
371
372 outb(30, ioaddr + PCIBM2_FIFO_THRESHOLD);
373
374 return 0;
375}
376
377static inline void abyss_chipset_close(struct net_device *dev)
378{
379 unsigned long ioaddr;
380
381 ioaddr = dev->base_addr;
382 outb(0, ioaddr + PCIBM2_RESET_REG);
383}
384
385/*
386 * Read configuration data from the AT24 SEEPROM on Madge cards.
387 *
388 */
389static void abyss_read_eeprom(struct net_device *dev)
390{
391 struct net_local *tp;
392 unsigned long ioaddr;
393 unsigned short val;
394 int i;
395
396 tp = netdev_priv(dev);
397 ioaddr = dev->base_addr;
398
399 /* Must enable glue chip first */
400 abyss_enable(dev);
401
402 val = at24_readb(ioaddr + PCIBM2_SEEPROM_REG,
403 PCIBM2_SEEPROM_RING_SPEED);
404 tp->DataRate = val?SPEED_4:SPEED_16; /* set open speed */
405 printk("%s: SEEPROM: ring speed: %dMb/sec\n", dev->name, tp->DataRate);
406
407 val = at24_readb(ioaddr + PCIBM2_SEEPROM_REG,
408 PCIBM2_SEEPROM_RAM_SIZE) * 128;
409 printk("%s: SEEPROM: adapter RAM: %dkb\n", dev->name, val);
410
411 dev->addr_len = 6;
412 for (i = 0; i < 6; i++)
413 dev->dev_addr[i] = at24_readb(ioaddr + PCIBM2_SEEPROM_REG,
414 PCIBM2_SEEPROM_BIA+i);
415}
416
417static int abyss_open(struct net_device *dev)
418{
419 abyss_chipset_init(dev);
420 tms380tr_open(dev);
421 return 0;
422}
423
424static int abyss_close(struct net_device *dev)
425{
426 tms380tr_close(dev);
427 abyss_chipset_close(dev);
428 return 0;
429}
430
431static void __devexit abyss_detach (struct pci_dev *pdev)
432{
433 struct net_device *dev = pci_get_drvdata(pdev);
434
435 BUG_ON(!dev);
436 unregister_netdev(dev);
437 release_region(dev->base_addr-0x10, ABYSS_IO_EXTENT);
438 free_irq(dev->irq, dev);
439 tmsdev_term(dev);
440 free_netdev(dev);
441 pci_set_drvdata(pdev, NULL);
442}
443
444static struct pci_driver abyss_driver = {
445 .name = "abyss",
446 .id_table = abyss_pci_tbl,
447 .probe = abyss_attach,
448 .remove = __devexit_p(abyss_detach),
449};
450
451static int __init abyss_init (void)
452{
453 abyss_netdev_ops = tms380tr_netdev_ops;
454
455 abyss_netdev_ops.ndo_open = abyss_open;
456 abyss_netdev_ops.ndo_stop = abyss_close;
457
458 return pci_register_driver(&abyss_driver);
459}
460
461static void __exit abyss_rmmod (void)
462{
463 pci_unregister_driver (&abyss_driver);
464}
465
466module_init(abyss_init);
467module_exit(abyss_rmmod);
468
diff --git a/drivers/net/tokenring/abyss.h b/drivers/net/tokenring/abyss.h
deleted file mode 100644
index b0a473b89133..000000000000
--- a/drivers/net/tokenring/abyss.h
+++ /dev/null
@@ -1,58 +0,0 @@
1/*
2 * abyss.h: Header for the abyss tms380tr module
3 *
4 * Authors:
5 * - Adam Fritzler
6 */
7
8#ifndef __LINUX_MADGETR_H
9#define __LINUX_MADGETR_H
10
11#ifdef __KERNEL__
12
13/*
14 * For Madge Smart 16/4 PCI Mk2. Since we increment the base address
15 * to get everything correct for the TMS SIF, we do these as negatives
16 * as they fall below the SIF in addressing.
17 */
18#define PCIBM2_INT_STATUS_REG ((short)-15)/* 0x01 */
19#define PCIBM2_INT_CONTROL_REG ((short)-14)/* 0x02 */
20#define PCIBM2_RESET_REG ((short)-12)/* 0x04 */
21#define PCIBM2_SEEPROM_REG ((short)-9) /* 0x07 */
22
23#define PCIBM2_INT_CONTROL_REG_SINTEN 0x02
24#define PCIBM2_INT_CONTROL_REG_PCI_ERR_ENABLE 0x80
25#define PCIBM2_INT_STATUS_REG_PCI_ERR 0x80
26
27#define PCIBM2_RESET_REG_CHIP_NRES 0x01
28#define PCIBM2_RESET_REG_FIFO_NRES 0x02
29#define PCIBM2_RESET_REG_SIF_NRES 0x04
30
31#define PCIBM2_FIFO_THRESHOLD 0x21
32#define PCIBM2_BURST_LENGTH 0x22
33
34/*
35 * Bits in PCIBM2_SEEPROM_REG.
36 */
37#define AT24_ENABLE 0x04
38#define AT24_DATA 0x02
39#define AT24_CLOCK 0x01
40
41/*
42 * AT24 Commands.
43 */
44#define AT24_WRITE 0xA0
45#define AT24_READ 0xA1
46
47/*
48 * Addresses in AT24 SEEPROM.
49 */
50#define PCIBM2_SEEPROM_BIA 0x12
51#define PCIBM2_SEEPROM_RING_SPEED 0x18
52#define PCIBM2_SEEPROM_RAM_SIZE 0x1A
53#define PCIBM2_SEEPROM_HWF1 0x1C
54#define PCIBM2_SEEPROM_HWF2 0x1E
55
56
57#endif /* __KERNEL__ */
58#endif /* __LINUX_MADGETR_H */
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
deleted file mode 100644
index b5c8c18f5046..000000000000
--- a/drivers/net/tokenring/ibmtr.c
+++ /dev/null
@@ -1,1964 +0,0 @@
1/* ibmtr.c: A shared-memory IBM Token Ring 16/4 driver for linux
2 *
3 * Written 1993 by Mark Swanson and Peter De Schrijver.
4 * This software may be used and distributed according to the terms
5 * of the GNU General Public License, incorporated herein by reference.
6 *
7 * This device driver should work with Any IBM Token Ring Card that does
8 * not use DMA.
9 *
10 * I used Donald Becker's (becker@scyld.com) device driver work
11 * as a base for most of my initial work.
12 *
13 * Changes by Peter De Schrijver
14 * (Peter.Deschrijver@linux.cc.kuleuven.ac.be) :
15 *
16 * + changed name to ibmtr.c in anticipation of other tr boards.
17 * + changed reset code and adapter open code.
18 * + added SAP open code.
19 * + a first attempt to write interrupt, transmit and receive routines.
20 *
21 * Changes by David W. Morris (dwm@shell.portal.com) :
22 * 941003 dwm: - Restructure tok_probe for multiple adapters, devices.
23 * + Add comments, misc reorg for clarity.
24 * + Flatten interrupt handler levels.
25 *
26 * Changes by Farzad Farid (farzy@zen.via.ecp.fr)
27 * and Pascal Andre (andre@chimay.via.ecp.fr) (March 9 1995) :
28 * + multi ring support clean up.
29 * + RFC1042 compliance enhanced.
30 *
31 * Changes by Pascal Andre (andre@chimay.via.ecp.fr) (September 7 1995) :
32 * + bug correction in tr_tx
33 * + removed redundant information display
34 * + some code reworking
35 *
36 * Changes by Michel Lespinasse (walken@via.ecp.fr),
37 * Yann Doussot (doussot@via.ecp.fr) and Pascal Andre (andre@via.ecp.fr)
38 * (February 18, 1996) :
39 * + modified shared memory and mmio access port the driver to
40 * alpha platform (structure access -> readb/writeb)
41 *
42 * Changes by Steve Kipisz (bungy@ibm.net or kipisz@vnet.ibm.com)
43 * (January 18 1996):
44 * + swapped WWOR and WWCR in ibmtr.h
45 * + moved some init code from tok_probe into trdev_init. The
46 * PCMCIA code can call trdev_init to complete initializing
47 * the driver.
48 * + added -DPCMCIA to support PCMCIA
49 * + detecting PCMCIA Card Removal in interrupt handler. If
50 * ISRP is FF, then a PCMCIA card has been removed
51 * 10/2000 Burt needed a new method to avoid crashing the OS
52 *
53 * Changes by Paul Norton (pnorton@cts.com) :
54 * + restructured the READ.LOG logic to prevent the transmit SRB
55 * from being rudely overwritten before the transmit cycle is
56 * complete. (August 15 1996)
57 * + completed multiple adapter support. (November 20 1996)
58 * + implemented csum_partial_copy in tr_rx and increased receive
59 * buffer size and count. Minor fixes. (March 15, 1997)
60 *
61 * Changes by Christopher Turcksin <wabbit@rtfc.demon.co.uk>
62 * + Now compiles ok as a module again.
63 *
64 * Changes by Paul Norton (pnorton@ieee.org) :
65 * + moved the header manipulation code in tr_tx and tr_rx to
66 * net/802/tr.c. (July 12 1997)
67 * + add retry and timeout on open if cable disconnected. (May 5 1998)
68 * + lifted 2000 byte mtu limit. now depends on shared-RAM size.
69 * May 25 1998)
70 * + can't allocate 2k recv buff at 8k shared-RAM. (20 October 1998)
71 *
72 * Changes by Joel Sloan (jjs@c-me.com) :
73 * + disable verbose debug messages by default - to enable verbose
74 * debugging, edit the IBMTR_DEBUG_MESSAGES define below
75 *
76 * Changes by Mike Phillips <phillim@amtrak.com> :
77 * + Added extra #ifdef's to work with new PCMCIA Token Ring Code.
78 * The PCMCIA code now just sets up the card so it can be recognized
79 * by ibmtr_probe. Also checks allocated memory vs. on-board memory
80 * for correct figure to use.
81 *
82 * Changes by Tim Hockin (thockin@isunix.it.ilstu.edu) :
83 * + added spinlocks for SMP sanity (10 March 1999)
84 *
85 * Changes by Jochen Friedrich to enable RFC1469 Option 2 multicasting
86 * i.e. using functional address C0 00 00 04 00 00 to transmit and
87 * receive multicast packets.
88 *
89 * Changes by Mike Sullivan (based on original sram patch by Dave Grothe
90 * to support windowing into on adapter shared ram.
91 * i.e. Use LANAID to setup a PnP configuration with 16K RAM. Paging
92 * will shift this 16K window over the entire available shared RAM.
93 *
94 * Changes by Peter De Schrijver (p2@mind.be) :
95 * + fixed a problem with PCMCIA card removal
96 *
97 * Change by Mike Sullivan et al.:
98 * + added turbo card support. No need to use lanaid to configure
99 * the adapter into isa compatibility mode.
100 *
101 * Changes by Burt Silverman to allow the computer to behave nicely when
102 * a cable is pulled or not in place, or a PCMCIA card is removed hot.
103 */
104
105/* change the define of IBMTR_DEBUG_MESSAGES to a nonzero value
106in the event that chatty debug messages are desired - jjs 12/30/98 */
107
108#define IBMTR_DEBUG_MESSAGES 0
109
110#include <linux/module.h>
111#include <linux/sched.h>
112
113#ifdef PCMCIA /* required for ibmtr_cs.c to build */
114#undef MODULE /* yes, really */
115#undef ENABLE_PAGING
116#else
117#define ENABLE_PAGING 1
118#endif
119
120/* changes the output format of driver initialization */
121#define TR_VERBOSE 0
122
123/* some 95 OS send many non UI frame; this allow removing the warning */
124#define TR_FILTERNONUI 1
125
126#include <linux/interrupt.h>
127#include <linux/ioport.h>
128#include <linux/netdevice.h>
129#include <linux/ip.h>
130#include <linux/trdevice.h>
131#include <linux/ibmtr.h>
132
133#include <net/checksum.h>
134
135#include <asm/io.h>
136
137#define DPRINTK(format, args...) printk("%s: " format, dev->name , ## args)
138#define DPRINTD(format, args...) DummyCall("%s: " format, dev->name , ## args)
139
140/* version and credits */
141#ifndef PCMCIA
142static char version[] __devinitdata =
143 "\nibmtr.c: v1.3.57 8/ 7/94 Peter De Schrijver and Mark Swanson\n"
144 " v2.1.125 10/20/98 Paul Norton <pnorton@ieee.org>\n"
145 " v2.2.0 12/30/98 Joel Sloan <jjs@c-me.com>\n"
146 " v2.2.1 02/08/00 Mike Sullivan <sullivam@us.ibm.com>\n"
147 " v2.2.2 07/27/00 Burt Silverman <burts@us.ibm.com>\n"
148 " v2.4.0 03/01/01 Mike Sullivan <sullivan@us.ibm.com>\n";
149#endif
150
151/* this allows displaying full adapter information */
152
153static char *channel_def[] __devinitdata = { "ISA", "MCA", "ISA P&P" };
154
155static char pcchannelid[] __devinitdata = {
156 0x05, 0x00, 0x04, 0x09,
157 0x04, 0x03, 0x04, 0x0f,
158 0x03, 0x06, 0x03, 0x01,
159 0x03, 0x01, 0x03, 0x00,
160 0x03, 0x09, 0x03, 0x09,
161 0x03, 0x00, 0x02, 0x00
162};
163
164static char mcchannelid[] __devinitdata = {
165 0x04, 0x0d, 0x04, 0x01,
166 0x05, 0x02, 0x05, 0x03,
167 0x03, 0x06, 0x03, 0x03,
168 0x05, 0x08, 0x03, 0x04,
169 0x03, 0x05, 0x03, 0x01,
170 0x03, 0x08, 0x02, 0x00
171};
172
173static char __devinit *adapter_def(char type)
174{
175 switch (type) {
176 case 0xF: return "PC Adapter | PC Adapter II | Adapter/A";
177 case 0xE: return "16/4 Adapter | 16/4 Adapter/A (long)";
178 case 0xD: return "16/4 Adapter/A (short) | 16/4 ISA-16 Adapter";
179 case 0xC: return "Auto 16/4 Adapter";
180 default: return "adapter (unknown type)";
181 }
182};
183
184#define TRC_INIT 0x01 /* Trace initialization & PROBEs */
185#define TRC_INITV 0x02 /* verbose init trace points */
186static unsigned char ibmtr_debug_trace = 0;
187
188static int ibmtr_probe1(struct net_device *dev, int ioaddr);
189static unsigned char get_sram_size(struct tok_info *adapt_info);
190static int trdev_init(struct net_device *dev);
191static int tok_open(struct net_device *dev);
192static int tok_init_card(struct net_device *dev);
193static void tok_open_adapter(unsigned long dev_addr);
194static void open_sap(unsigned char type, struct net_device *dev);
195static void tok_set_multicast_list(struct net_device *dev);
196static netdev_tx_t tok_send_packet(struct sk_buff *skb,
197 struct net_device *dev);
198static int tok_close(struct net_device *dev);
199static irqreturn_t tok_interrupt(int irq, void *dev_id);
200static void initial_tok_int(struct net_device *dev);
201static void tr_tx(struct net_device *dev);
202static void tr_rx(struct net_device *dev);
203static void ibmtr_reset_timer(struct timer_list*tmr,struct net_device *dev);
204static void tok_rerun(unsigned long dev_addr);
205static void ibmtr_readlog(struct net_device *dev);
206static int ibmtr_change_mtu(struct net_device *dev, int mtu);
207static void find_turbo_adapters(int *iolist);
208
209static int ibmtr_portlist[IBMTR_MAX_ADAPTERS+1] __devinitdata = {
210 0xa20, 0xa24, 0, 0, 0
211};
212static int __devinitdata turbo_io[IBMTR_MAX_ADAPTERS] = {0};
213static int __devinitdata turbo_irq[IBMTR_MAX_ADAPTERS] = {0};
214static int __devinitdata turbo_searched = 0;
215
216#ifndef PCMCIA
217static __u32 ibmtr_mem_base __devinitdata = 0xd0000;
218#endif
219
220static void __devinit PrtChanID(char *pcid, short stride)
221{
222 short i, j;
223 for (i = 0, j = 0; i < 24; i++, j += stride)
224 printk("%1x", ((int) pcid[j]) & 0x0f);
225 printk("\n");
226}
227
228static void __devinit HWPrtChanID(void __iomem *pcid, short stride)
229{
230 short i, j;
231 for (i = 0, j = 0; i < 24; i++, j += stride)
232 printk("%1x", ((int) readb(pcid + j)) & 0x0f);
233 printk("\n");
234}
235
236/* We have to ioremap every checked address, because isa_readb is
237 * going away.
238 */
239
240static void __devinit find_turbo_adapters(int *iolist)
241{
242 int ram_addr;
243 int index=0;
244 void __iomem *chanid;
245 int found_turbo=0;
246 unsigned char *tchanid, ctemp;
247 int i, j;
248 unsigned long jif;
249 void __iomem *ram_mapped ;
250
251 if (turbo_searched == 1) return;
252 turbo_searched=1;
253 for (ram_addr=0xC0000; ram_addr < 0xE0000; ram_addr+=0x2000) {
254
255 __u32 intf_tbl=0;
256
257 found_turbo=1;
258 ram_mapped = ioremap((u32)ram_addr,0x1fff) ;
259 if (ram_mapped==NULL)
260 continue ;
261 chanid=(CHANNEL_ID + ram_mapped);
262 tchanid=pcchannelid;
263 ctemp=readb(chanid) & 0x0f;
264 if (ctemp != *tchanid) continue;
265 for (i=2,j=1; i<=46; i=i+2,j++) {
266 if ((readb(chanid+i) & 0x0f) != tchanid[j]){
267 found_turbo=0;
268 break;
269 }
270 }
271 if (!found_turbo) continue;
272
273 writeb(0x90, ram_mapped+0x1E01);
274 for(i=2; i<0x0f; i++) {
275 writeb(0x00, ram_mapped+0x1E01+i);
276 }
277 writeb(0x00, ram_mapped+0x1E01);
278 for(jif=jiffies+TR_BUSY_INTERVAL; time_before_eq(jiffies,jif););
279 intf_tbl=ntohs(readw(ram_mapped+ACA_OFFSET+ACA_RW+WRBR_EVEN));
280 if (intf_tbl) {
281#if IBMTR_DEBUG_MESSAGES
282 printk("ibmtr::find_turbo_adapters, Turbo found at "
283 "ram_addr %x\n",ram_addr);
284 printk("ibmtr::find_turbo_adapters, interface_table ");
285 for(i=0; i<6; i++) {
286 printk("%x:",readb(ram_addr+intf_tbl+i));
287 }
288 printk("\n");
289#endif
290 turbo_io[index]=ntohs(readw(ram_mapped+intf_tbl+4));
291 turbo_irq[index]=readb(ram_mapped+intf_tbl+3);
292 outb(0, turbo_io[index] + ADAPTRESET);
293 for(jif=jiffies+TR_RST_TIME;time_before_eq(jiffies,jif););
294 outb(0, turbo_io[index] + ADAPTRESETREL);
295 index++;
296 continue;
297 }
298#if IBMTR_DEBUG_MESSAGES
299 printk("ibmtr::find_turbo_adapters, ibmtr card found at"
300 " %x but not a Turbo model\n",ram_addr);
301#endif
302 iounmap(ram_mapped) ;
303 } /* for */
304 for(i=0; i<IBMTR_MAX_ADAPTERS; i++) {
305 if(!turbo_io[i]) break;
306 for (j=0; j<IBMTR_MAX_ADAPTERS; j++) {
307 if ( iolist[j] && iolist[j] != turbo_io[i]) continue;
308 iolist[j]=turbo_io[i];
309 break;
310 }
311 }
312}
313
314static void ibmtr_cleanup_card(struct net_device *dev)
315{
316 if (dev->base_addr) {
317 outb(0,dev->base_addr+ADAPTRESET);
318
319 schedule_timeout_uninterruptible(TR_RST_TIME); /* wait 50ms */
320
321 outb(0,dev->base_addr+ADAPTRESETREL);
322 }
323
324#ifndef PCMCIA
325 free_irq(dev->irq, dev);
326 release_region(dev->base_addr, IBMTR_IO_EXTENT);
327
328 {
329 struct tok_info *ti = netdev_priv(dev);
330 iounmap(ti->mmio);
331 iounmap(ti->sram_virt);
332 }
333#endif
334}
335
336/****************************************************************************
337 * ibmtr_probe(): Routine specified in the network device structure
338 * to probe for an IBM Token Ring Adapter. Routine outline:
339 * I. Interrogate hardware to determine if an adapter exists
340 * and what the speeds and feeds are
341 * II. Setup data structures to control execution based upon
342 * adapter characteristics.
343 *
344 * We expect ibmtr_probe to be called once for each device entry
345 * which references it.
346 ****************************************************************************/
347
348static int __devinit ibmtr_probe(struct net_device *dev)
349{
350 int i;
351 int base_addr = dev->base_addr;
352
353 if (base_addr && base_addr <= 0x1ff) /* Don't probe at all. */
354 return -ENXIO;
355 if (base_addr > 0x1ff) { /* Check a single specified location. */
356 if (!ibmtr_probe1(dev, base_addr)) return 0;
357 return -ENODEV;
358 }
359 find_turbo_adapters(ibmtr_portlist);
360 for (i = 0; ibmtr_portlist[i]; i++) {
361 int ioaddr = ibmtr_portlist[i];
362
363 if (!ibmtr_probe1(dev, ioaddr)) return 0;
364 }
365 return -ENODEV;
366}
367
368int __devinit ibmtr_probe_card(struct net_device *dev)
369{
370 int err = ibmtr_probe(dev);
371 if (!err) {
372 err = register_netdev(dev);
373 if (err)
374 ibmtr_cleanup_card(dev);
375 }
376 return err;
377}
378
379/*****************************************************************************/
380
381static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
382{
383
384 unsigned char segment, intr=0, irq=0, i, j, cardpresent=NOTOK, temp=0;
385 void __iomem * t_mmio = NULL;
386 struct tok_info *ti = netdev_priv(dev);
387 void __iomem *cd_chanid;
388 unsigned char *tchanid, ctemp;
389#ifndef PCMCIA
390 unsigned char t_irq=0;
391 unsigned long timeout;
392 static int version_printed;
393#endif
394
395 /* Query the adapter PIO base port which will return
396 * indication of where MMIO was placed. We also have a
397 * coded interrupt number.
398 */
399 segment = inb(PIOaddr);
400 if (segment < 0x40 || segment > 0xe0) {
401 /* Out of range values so we'll assume non-existent IO device
402 * but this is not necessarily a problem, esp if a turbo
403 * adapter is being used. */
404#if IBMTR_DEBUG_MESSAGES
405 DPRINTK("ibmtr_probe1(): unhappy that inb(0x%X) == 0x%X, "
406 "Hardware Problem?\n",PIOaddr,segment);
407#endif
408 return -ENODEV;
409 }
410 /*
411 * Compute the linear base address of the MMIO area
412 * as LINUX doesn't care about segments
413 */
414 t_mmio = ioremap(((__u32) (segment & 0xfc) << 11) + 0x80000,2048);
415 if (!t_mmio) {
416 DPRINTK("Cannot remap mmiobase memory area") ;
417 return -ENODEV ;
418 }
419 intr = segment & 0x03; /* low bits is coded interrupt # */
420 if (ibmtr_debug_trace & TRC_INIT)
421 DPRINTK("PIOaddr: %4hx seg/intr: %2x mmio base: %p intr: %d\n"
422 , PIOaddr, (int) segment, t_mmio, (int) intr);
423
424 /*
425 * Now we will compare expected 'channelid' strings with
426 * what we is there to learn of ISA/MCA or not TR card
427 */
428#ifdef PCMCIA
429 iounmap(t_mmio);
430 t_mmio = ti->mmio; /*BMS to get virtual address */
431 irq = ti->irq; /*BMS to display the irq! */
432#endif
433 cd_chanid = (CHANNEL_ID + t_mmio); /* for efficiency */
434 tchanid = pcchannelid;
435 cardpresent = TR_ISA; /* try ISA */
436
437 /* Suboptimize knowing first byte different */
438 ctemp = readb(cd_chanid) & 0x0f;
439 if (ctemp != *tchanid) { /* NOT ISA card, try MCA */
440 tchanid = mcchannelid;
441 cardpresent = TR_MCA;
442 if (ctemp != *tchanid) /* Neither ISA nor MCA */
443 cardpresent = NOTOK;
444 }
445 if (cardpresent != NOTOK) {
446 /* Know presumed type, try rest of ID */
447 for (i = 2, j = 1; i <= 46; i = i + 2, j++) {
448 if( (readb(cd_chanid+i)&0x0f) == tchanid[j]) continue;
449 /* match failed, not TR card */
450 cardpresent = NOTOK;
451 break;
452 }
453 }
454 /*
455 * If we have an ISA board check for the ISA P&P version,
456 * as it has different IRQ settings
457 */
458 if (cardpresent == TR_ISA && (readb(AIPFID + t_mmio) == 0x0e))
459 cardpresent = TR_ISAPNP;
460 if (cardpresent == NOTOK) { /* "channel_id" did not match, report */
461 if (!(ibmtr_debug_trace & TRC_INIT)) {
462#ifndef PCMCIA
463 iounmap(t_mmio);
464#endif
465 return -ENODEV;
466 }
467 DPRINTK( "Channel ID string not found for PIOaddr: %4hx\n",
468 PIOaddr);
469 DPRINTK("Expected for ISA: ");
470 PrtChanID(pcchannelid, 1);
471 DPRINTK(" found: ");
472/* BMS Note that this can be misleading, when hardware is flaky, because you
473 are reading it a second time here. So with my flaky hardware, I'll see my-
474 self in this block, with the HW ID matching the ISA ID exactly! */
475 HWPrtChanID(cd_chanid, 2);
476 DPRINTK("Expected for MCA: ");
477 PrtChanID(mcchannelid, 1);
478 }
479 /* Now, setup some of the pl0 buffers for this driver.. */
480 /* If called from PCMCIA, it is already set up, so no need to
481 waste the memory, just use the existing structure */
482#ifndef PCMCIA
483 ti->mmio = t_mmio;
484 for (i = 0; i < IBMTR_MAX_ADAPTERS; i++) {
485 if (turbo_io[i] != PIOaddr)
486 continue;
487#if IBMTR_DEBUG_MESSAGES
488 printk("ibmtr::tr_probe1, setting PIOaddr %x to Turbo\n",
489 PIOaddr);
490#endif
491 ti->turbo = 1;
492 t_irq = turbo_irq[i];
493 }
494#endif /* !PCMCIA */
495 ti->readlog_pending = 0;
496 init_waitqueue_head(&ti->wait_for_reset);
497
498 /* if PCMCIA, the card can be recognized as either TR_ISA or TR_ISAPNP
499 * depending which card is inserted. */
500
501#ifndef PCMCIA
502 switch (cardpresent) {
503 case TR_ISA:
504 if (intr == 0) irq = 9; /* irq2 really is irq9 */
505 if (intr == 1) irq = 3;
506 if (intr == 2) irq = 6;
507 if (intr == 3) irq = 7;
508 ti->adapter_int_enable = PIOaddr + ADAPTINTREL;
509 break;
510 case TR_MCA:
511 if (intr == 0) irq = 9;
512 if (intr == 1) irq = 3;
513 if (intr == 2) irq = 10;
514 if (intr == 3) irq = 11;
515 ti->global_int_enable = 0;
516 ti->adapter_int_enable = 0;
517 ti->sram_phys=(__u32)(inb(PIOaddr+ADAPTRESETREL) & 0xfe) << 12;
518 break;
519 case TR_ISAPNP:
520 if (!t_irq) {
521 if (intr == 0) irq = 9;
522 if (intr == 1) irq = 3;
523 if (intr == 2) irq = 10;
524 if (intr == 3) irq = 11;
525 } else
526 irq=t_irq;
527 timeout = jiffies + TR_SPIN_INTERVAL;
528 while (!readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN)){
529 if (!time_after(jiffies, timeout)) continue;
530 DPRINTK( "Hardware timeout during initialization.\n");
531 iounmap(t_mmio);
532 return -ENODEV;
533 }
534 ti->sram_phys =
535 ((__u32)readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_EVEN)<<12);
536 ti->adapter_int_enable = PIOaddr + ADAPTINTREL;
537 break;
538 } /*end switch (cardpresent) */
539#endif /*not PCMCIA */
540
541 if (ibmtr_debug_trace & TRC_INIT) { /* just report int */
542 DPRINTK("irq=%d", irq);
543 printk(", sram_phys=0x%x", ti->sram_phys);
544 if(ibmtr_debug_trace&TRC_INITV){ /* full chat in verbose only */
545 DPRINTK(", ti->mmio=%p", ti->mmio);
546 printk(", segment=%02X", segment);
547 }
548 printk(".\n");
549 }
550
551 /* Get hw address of token ring card */
552 j = 0;
553 for (i = 0; i < 0x18; i = i + 2) {
554 /* technical reference states to do this */
555 temp = readb(ti->mmio + AIP + i) & 0x0f;
556 ti->hw_address[j] = temp;
557 if (j & 1)
558 dev->dev_addr[(j / 2)] =
559 ti->hw_address[j]+ (ti->hw_address[j - 1] << 4);
560 ++j;
561 }
562 /* get Adapter type: 'F' = Adapter/A, 'E' = 16/4 Adapter II,... */
563 ti->adapter_type = readb(ti->mmio + AIPADAPTYPE);
564
565 /* get Data Rate: F=4Mb, E=16Mb, D=4Mb & 16Mb ?? */
566 ti->data_rate = readb(ti->mmio + AIPDATARATE);
567
568 /* Get Early Token Release support?: F=no, E=4Mb, D=16Mb, C=4&16Mb */
569 ti->token_release = readb(ti->mmio + AIPEARLYTOKEN);
570
571 /* How much shared RAM is on adapter ? */
572 if (ti->turbo) {
573 ti->avail_shared_ram=127;
574 } else {
575 ti->avail_shared_ram = get_sram_size(ti);/*in 512 byte units */
576 }
577 /* We need to set or do a bunch of work here based on previous results*/
578 /* Support paging? What sizes?: F=no, E=16k, D=32k, C=16 & 32k */
579 ti->shared_ram_paging = readb(ti->mmio + AIPSHRAMPAGE);
580
581 /* Available DHB 4Mb size: F=2048, E=4096, D=4464 */
582 switch (readb(ti->mmio + AIP4MBDHB)) {
583 case 0xe: ti->dhb_size4mb = 4096; break;
584 case 0xd: ti->dhb_size4mb = 4464; break;
585 default: ti->dhb_size4mb = 2048; break;
586 }
587
588 /* Available DHB 16Mb size: F=2048, E=4096, D=8192, C=16384, B=17960 */
589 switch (readb(ti->mmio + AIP16MBDHB)) {
590 case 0xe: ti->dhb_size16mb = 4096; break;
591 case 0xd: ti->dhb_size16mb = 8192; break;
592 case 0xc: ti->dhb_size16mb = 16384; break;
593 case 0xb: ti->dhb_size16mb = 17960; break;
594 default: ti->dhb_size16mb = 2048; break;
595 }
596
597 /* We must figure out how much shared memory space this adapter
598 * will occupy so that if there are two adapters we can fit both
599 * in. Given a choice, we will limit this adapter to 32K. The
600 * maximum space will will use for two adapters is 64K so if the
601 * adapter we are working on demands 64K (it also doesn't support
602 * paging), then only one adapter can be supported.
603 */
604
605 /*
606 * determine how much of total RAM is mapped into PC space
607 */
608 ti->mapped_ram_size= /*sixteen to onehundredtwentyeight 512byte blocks*/
609 1<< ((readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_ODD) >> 2 & 0x03) + 4);
610 ti->page_mask = 0;
611 if (ti->turbo) ti->page_mask=0xf0;
612 else if (ti->shared_ram_paging == 0xf); /* No paging in adapter */
613 else {
614#ifdef ENABLE_PAGING
615 unsigned char pg_size = 0;
616 /* BMS: page size: PCMCIA, use configuration register;
617 ISAPNP, use LANAIDC config tool from www.ibm.com */
618 switch (ti->shared_ram_paging) {
619 case 0xf:
620 break;
621 case 0xe:
622 ti->page_mask = (ti->mapped_ram_size == 32) ? 0xc0 : 0;
623 pg_size = 32; /* 16KB page size */
624 break;
625 case 0xd:
626 ti->page_mask = (ti->mapped_ram_size == 64) ? 0x80 : 0;
627 pg_size = 64; /* 32KB page size */
628 break;
629 case 0xc:
630 switch (ti->mapped_ram_size) {
631 case 32:
632 ti->page_mask = 0xc0;
633 pg_size = 32;
634 break;
635 case 64:
636 ti->page_mask = 0x80;
637 pg_size = 64;
638 break;
639 }
640 break;
641 default:
642 DPRINTK("Unknown shared ram paging info %01X\n",
643 ti->shared_ram_paging);
644 iounmap(t_mmio);
645 return -ENODEV;
646 break;
647 } /*end switch shared_ram_paging */
648
649 if (ibmtr_debug_trace & TRC_INIT)
650 DPRINTK("Shared RAM paging code: %02X, "
651 "mapped RAM size: %dK, shared RAM size: %dK, "
652 "page mask: %02X\n:",
653 ti->shared_ram_paging, ti->mapped_ram_size / 2,
654 ti->avail_shared_ram / 2, ti->page_mask);
655#endif /*ENABLE_PAGING */
656 }
657
658#ifndef PCMCIA
659 /* finish figuring the shared RAM address */
660 if (cardpresent == TR_ISA) {
661 static const __u32 ram_bndry_mask[] = {
662 0xffffe000, 0xffffc000, 0xffff8000, 0xffff0000
663 };
664 __u32 new_base, rrr_32, chk_base, rbm;
665
666 rrr_32=readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_ODD) >> 2 & 0x03;
667 rbm = ram_bndry_mask[rrr_32];
668 new_base = (ibmtr_mem_base + (~rbm)) & rbm;/* up to boundary */
669 chk_base = new_base + (ti->mapped_ram_size << 9);
670 if (chk_base > (ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE)) {
671 DPRINTK("Shared RAM for this adapter (%05x) exceeds "
672 "driver limit (%05x), adapter not started.\n",
673 chk_base, ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE);
674 iounmap(t_mmio);
675 return -ENODEV;
676 } else { /* seems cool, record what we have figured out */
677 ti->sram_base = new_base >> 12;
678 ibmtr_mem_base = chk_base;
679 }
680 }
681 else ti->sram_base = ti->sram_phys >> 12;
682
683 /* The PCMCIA has already got the interrupt line and the io port,
684 so no chance of anybody else getting it - MLP */
685 if (request_irq(dev->irq = irq, tok_interrupt, 0, "ibmtr", dev) != 0) {
686 DPRINTK("Could not grab irq %d. Halting Token Ring driver.\n",
687 irq);
688 iounmap(t_mmio);
689 return -ENODEV;
690 }
691 /*?? Now, allocate some of the PIO PORTs for this driver.. */
692 /* record PIOaddr range as busy */
693 if (!request_region(PIOaddr, IBMTR_IO_EXTENT, "ibmtr")) {
694 DPRINTK("Could not grab PIO range. Halting driver.\n");
695 free_irq(dev->irq, dev);
696 iounmap(t_mmio);
697 return -EBUSY;
698 }
699
700 if (!version_printed++) {
701 printk(version);
702 }
703#endif /* !PCMCIA */
704 DPRINTK("%s %s found\n",
705 channel_def[cardpresent - 1], adapter_def(ti->adapter_type));
706 DPRINTK("using irq %d, PIOaddr %hx, %dK shared RAM.\n",
707 irq, PIOaddr, ti->mapped_ram_size / 2);
708 DPRINTK("Hardware address : %pM\n", dev->dev_addr);
709 if (ti->page_mask)
710 DPRINTK("Shared RAM paging enabled. "
711 "Page size: %uK Shared Ram size %dK\n",
712 ((ti->page_mask^0xff)+1) >>2, ti->avail_shared_ram / 2);
713 else
714 DPRINTK("Shared RAM paging disabled. ti->page_mask %x\n",
715 ti->page_mask);
716
717 /* Calculate the maximum DHB we can use */
718 /* two cases where avail_shared_ram doesn't equal mapped_ram_size:
719 1. avail_shared_ram is 127 but mapped_ram_size is 128 (typical)
720 2. user has configured adapter for less than avail_shared_ram
721 but is not using paging (she should use paging, I believe)
722 */
723 if (!ti->page_mask) {
724 ti->avail_shared_ram=
725 min(ti->mapped_ram_size,ti->avail_shared_ram);
726 }
727
728 switch (ti->avail_shared_ram) {
729 case 16: /* 8KB shared RAM */
730 ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)2048);
731 ti->rbuf_len4 = 1032;
732 ti->rbuf_cnt4=2;
733 ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)2048);
734 ti->rbuf_len16 = 1032;
735 ti->rbuf_cnt16=2;
736 break;
737 case 32: /* 16KB shared RAM */
738 ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464);
739 ti->rbuf_len4 = 1032;
740 ti->rbuf_cnt4=4;
741 ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)4096);
742 ti->rbuf_len16 = 1032; /*1024 usable */
743 ti->rbuf_cnt16=4;
744 break;
745 case 64: /* 32KB shared RAM */
746 ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464);
747 ti->rbuf_len4 = 1032;
748 ti->rbuf_cnt4=6;
749 ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)10240);
750 ti->rbuf_len16 = 1032;
751 ti->rbuf_cnt16=6;
752 break;
753 case 127: /* 63.5KB shared RAM */
754 ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464);
755 ti->rbuf_len4 = 1032;
756 ti->rbuf_cnt4=6;
757 ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)16384);
758 ti->rbuf_len16 = 1032;
759 ti->rbuf_cnt16=16;
760 break;
761 case 128: /* 64KB shared RAM */
762 ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464);
763 ti->rbuf_len4 = 1032;
764 ti->rbuf_cnt4=6;
765 ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)17960);
766 ti->rbuf_len16 = 1032;
767 ti->rbuf_cnt16=16;
768 break;
769 default:
770 ti->dhb_size4mb = 2048;
771 ti->rbuf_len4 = 1032;
772 ti->rbuf_cnt4=2;
773 ti->dhb_size16mb = 2048;
774 ti->rbuf_len16 = 1032;
775 ti->rbuf_cnt16=2;
776 break;
777 }
778 /* this formula is not smart enough for the paging case
779 ti->rbuf_cnt<x> = (ti->avail_shared_ram * BLOCKSZ - ADAPT_PRIVATE -
780 ARBLENGTH - SSBLENGTH - DLC_MAX_SAP * SAPLENGTH -
781 DLC_MAX_STA * STALENGTH - ti->dhb_size<x>mb * NUM_DHB -
782 SRBLENGTH - ASBLENGTH) / ti->rbuf_len<x>;
783 */
784 ti->maxmtu16 = (ti->rbuf_len16 - 8) * ti->rbuf_cnt16 - TR_HLEN;
785 ti->maxmtu4 = (ti->rbuf_len4 - 8) * ti->rbuf_cnt4 - TR_HLEN;
786 /*BMS assuming 18 bytes of Routing Information (usually works) */
787 DPRINTK("Maximum Receive Internet Protocol MTU 16Mbps: %d, 4Mbps: %d\n",
788 ti->maxmtu16, ti->maxmtu4);
789
790 dev->base_addr = PIOaddr; /* set the value for device */
791 dev->mem_start = ti->sram_base << 12;
792 dev->mem_end = dev->mem_start + (ti->mapped_ram_size << 9) - 1;
793 trdev_init(dev);
794 return 0; /* Return 0 to indicate we have found a Token Ring card. */
795} /*ibmtr_probe1() */
796
797/*****************************************************************************/
798
799/* query the adapter for the size of shared RAM */
800/* the function returns the RAM size in units of 512 bytes */
801
802static unsigned char __devinit get_sram_size(struct tok_info *adapt_info)
803{
804 unsigned char avail_sram_code;
805 static unsigned char size_code[] = { 0, 16, 32, 64, 127, 128 };
806 /* Adapter gives
807 'F' -- use RRR bits 3,2
808 'E' -- 8kb 'D' -- 16kb
809 'C' -- 32kb 'A' -- 64KB
810 'B' - 64KB less 512 bytes at top
811 (WARNING ... must zero top bytes in INIT */
812
813 avail_sram_code = 0xf - readb(adapt_info->mmio + AIPAVAILSHRAM);
814 if (avail_sram_code) return size_code[avail_sram_code];
815 else /* for code 'F', must compute size from RRR(3,2) bits */
816 return 1 <<
817 ((readb(adapt_info->mmio+ACA_OFFSET+ACA_RW+RRR_ODD)>>2&3)+4);
818}
819
820/*****************************************************************************/
821
822static const struct net_device_ops trdev_netdev_ops = {
823 .ndo_open = tok_open,
824 .ndo_stop = tok_close,
825 .ndo_start_xmit = tok_send_packet,
826 .ndo_set_rx_mode = tok_set_multicast_list,
827 .ndo_change_mtu = ibmtr_change_mtu,
828};
829
830static int __devinit trdev_init(struct net_device *dev)
831{
832 struct tok_info *ti = netdev_priv(dev);
833
834 SET_PAGE(ti->srb_page);
835 ti->open_failure = NO ;
836 dev->netdev_ops = &trdev_netdev_ops;
837
838 return 0;
839}
840
841/*****************************************************************************/
842
843static int tok_init_card(struct net_device *dev)
844{
845 struct tok_info *ti;
846 short PIOaddr;
847 unsigned long i;
848
849 PIOaddr = dev->base_addr;
850 ti = netdev_priv(dev);
851 /* Special processing for first interrupt after reset */
852 ti->do_tok_int = FIRST_INT;
853 /* Reset adapter */
854 writeb(~INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
855 outb(0, PIOaddr + ADAPTRESET);
856
857 schedule_timeout_uninterruptible(TR_RST_TIME); /* wait 50ms */
858
859 outb(0, PIOaddr + ADAPTRESETREL);
860#ifdef ENABLE_PAGING
861 if (ti->page_mask)
862 writeb(SRPR_ENABLE_PAGING,ti->mmio+ACA_OFFSET+ACA_RW+SRPR_EVEN);
863#endif
864 writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
865 i = sleep_on_timeout(&ti->wait_for_reset, 4 * HZ);
866 return i? 0 : -EAGAIN;
867}
868
869/*****************************************************************************/
870static int tok_open(struct net_device *dev)
871{
872 struct tok_info *ti = netdev_priv(dev);
873 int i;
874
875 /*the case we were left in a failure state during a previous open */
876 if (ti->open_failure == YES) {
877 DPRINTK("Last time you were disconnected, how about now?\n");
878 printk("You can't insert with an ICS connector half-cocked.\n");
879 }
880
881 ti->open_status = CLOSED; /* CLOSED or OPEN */
882 ti->sap_status = CLOSED; /* CLOSED or OPEN */
883 ti->open_failure = NO; /* NO or YES */
884 ti->open_mode = MANUAL; /* MANUAL or AUTOMATIC */
885
886 ti->sram_phys &= ~1; /* to reverse what we do in tok_close */
887 /* init the spinlock */
888 spin_lock_init(&ti->lock);
889 init_timer(&ti->tr_timer);
890
891 i = tok_init_card(dev);
892 if (i) return i;
893
894 while (1){
895 tok_open_adapter((unsigned long) dev);
896 i= interruptible_sleep_on_timeout(&ti->wait_for_reset, 25 * HZ);
897 /* sig catch: estimate opening adapter takes more than .5 sec*/
898 if (i>(245*HZ)/10) break; /* fancier than if (i==25*HZ) */
899 if (i==0) break;
900 if (ti->open_status == OPEN && ti->sap_status==OPEN) {
901 netif_start_queue(dev);
902 DPRINTK("Adapter is up and running\n");
903 return 0;
904 }
905 i=schedule_timeout_interruptible(TR_RETRY_INTERVAL);
906 /* wait 30 seconds */
907 if(i!=0) break; /*prob. a signal, like the i>24*HZ case above */
908 }
909 outb(0, dev->base_addr + ADAPTRESET);/* kill pending interrupts*/
910 DPRINTK("TERMINATED via signal\n"); /*BMS useful */
911 return -EAGAIN;
912}
913
914/*****************************************************************************/
915
916#define COMMAND_OFST 0
917#define OPEN_OPTIONS_OFST 8
918#define NUM_RCV_BUF_OFST 24
919#define RCV_BUF_LEN_OFST 26
920#define DHB_LENGTH_OFST 28
921#define NUM_DHB_OFST 30
922#define DLC_MAX_SAP_OFST 32
923#define DLC_MAX_STA_OFST 33
924
925static void tok_open_adapter(unsigned long dev_addr)
926{
927 struct net_device *dev = (struct net_device *) dev_addr;
928 struct tok_info *ti;
929 int i;
930
931 ti = netdev_priv(dev);
932 SET_PAGE(ti->init_srb_page);
933 writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
934 for (i = 0; i < sizeof(struct dir_open_adapter); i++)
935 writeb(0, ti->init_srb + i);
936 writeb(DIR_OPEN_ADAPTER, ti->init_srb + COMMAND_OFST);
937 writew(htons(OPEN_PASS_BCON_MAC), ti->init_srb + OPEN_OPTIONS_OFST);
938 if (ti->ring_speed == 16) {
939 writew(htons(ti->dhb_size16mb), ti->init_srb + DHB_LENGTH_OFST);
940 writew(htons(ti->rbuf_cnt16), ti->init_srb + NUM_RCV_BUF_OFST);
941 writew(htons(ti->rbuf_len16), ti->init_srb + RCV_BUF_LEN_OFST);
942 } else {
943 writew(htons(ti->dhb_size4mb), ti->init_srb + DHB_LENGTH_OFST);
944 writew(htons(ti->rbuf_cnt4), ti->init_srb + NUM_RCV_BUF_OFST);
945 writew(htons(ti->rbuf_len4), ti->init_srb + RCV_BUF_LEN_OFST);
946 }
947 writeb(NUM_DHB, /* always 2 */ ti->init_srb + NUM_DHB_OFST);
948 writeb(DLC_MAX_SAP, ti->init_srb + DLC_MAX_SAP_OFST);
949 writeb(DLC_MAX_STA, ti->init_srb + DLC_MAX_STA_OFST);
950 ti->srb = ti->init_srb; /* We use this one in the interrupt handler */
951 ti->srb_page = ti->init_srb_page;
952 DPRINTK("Opening adapter: Xmit bfrs: %d X %d, Rcv bfrs: %d X %d\n",
953 readb(ti->init_srb + NUM_DHB_OFST),
954 ntohs(readw(ti->init_srb + DHB_LENGTH_OFST)),
955 ntohs(readw(ti->init_srb + NUM_RCV_BUF_OFST)),
956 ntohs(readw(ti->init_srb + RCV_BUF_LEN_OFST)));
957 writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
958 writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
959}
960
961/*****************************************************************************/
962
963static void open_sap(unsigned char type, struct net_device *dev)
964{
965 int i;
966 struct tok_info *ti = netdev_priv(dev);
967
968 SET_PAGE(ti->srb_page);
969 for (i = 0; i < sizeof(struct dlc_open_sap); i++)
970 writeb(0, ti->srb + i);
971
972#define MAX_I_FIELD_OFST 14
973#define SAP_VALUE_OFST 16
974#define SAP_OPTIONS_OFST 17
975#define STATION_COUNT_OFST 18
976
977 writeb(DLC_OPEN_SAP, ti->srb + COMMAND_OFST);
978 writew(htons(MAX_I_FIELD), ti->srb + MAX_I_FIELD_OFST);
979 writeb(SAP_OPEN_IND_SAP | SAP_OPEN_PRIORITY, ti->srb+ SAP_OPTIONS_OFST);
980 writeb(SAP_OPEN_STATION_CNT, ti->srb + STATION_COUNT_OFST);
981 writeb(type, ti->srb + SAP_VALUE_OFST);
982 writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
983}
984
985
986/*****************************************************************************/
987
988static void tok_set_multicast_list(struct net_device *dev)
989{
990 struct tok_info *ti = netdev_priv(dev);
991 struct netdev_hw_addr *ha;
992 unsigned char address[4];
993
994 int i;
995
996 /*BMS the next line is CRUCIAL or you may be sad when you */
997 /*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/
998 if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return;
999 address[0] = address[1] = address[2] = address[3] = 0;
1000 netdev_for_each_mc_addr(ha, dev) {
1001 address[0] |= ha->addr[2];
1002 address[1] |= ha->addr[3];
1003 address[2] |= ha->addr[4];
1004 address[3] |= ha->addr[5];
1005 }
1006 SET_PAGE(ti->srb_page);
1007 for (i = 0; i < sizeof(struct srb_set_funct_addr); i++)
1008 writeb(0, ti->srb + i);
1009
1010#define FUNCT_ADDRESS_OFST 6
1011
1012 writeb(DIR_SET_FUNC_ADDR, ti->srb + COMMAND_OFST);
1013 for (i = 0; i < 4; i++)
1014 writeb(address[i], ti->srb + FUNCT_ADDRESS_OFST + i);
1015 writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1016#if TR_VERBOSE
1017 DPRINTK("Setting functional address: ");
1018 for (i=0;i<4;i++) printk("%02X ", address[i]);
1019 printk("\n");
1020#endif
1021}
1022
1023/*****************************************************************************/
1024
1025#define STATION_ID_OFST 4
1026
1027static netdev_tx_t tok_send_packet(struct sk_buff *skb,
1028 struct net_device *dev)
1029{
1030 struct tok_info *ti;
1031 unsigned long flags;
1032 ti = netdev_priv(dev);
1033
1034 netif_stop_queue(dev);
1035
1036 /* lock against other CPUs */
1037 spin_lock_irqsave(&(ti->lock), flags);
1038
1039 /* Save skb; we'll need it when the adapter asks for the data */
1040 ti->current_skb = skb;
1041 SET_PAGE(ti->srb_page);
1042 writeb(XMIT_UI_FRAME, ti->srb + COMMAND_OFST);
1043 writew(ti->exsap_station_id, ti->srb + STATION_ID_OFST);
1044 writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1045 spin_unlock_irqrestore(&(ti->lock), flags);
1046 return NETDEV_TX_OK;
1047}
1048
1049/*****************************************************************************/
1050
1051static int tok_close(struct net_device *dev)
1052{
1053 struct tok_info *ti = netdev_priv(dev);
1054
1055 /* Important for PCMCIA hot unplug, otherwise, we'll pull the card, */
1056 /* unloading the module from memory, and then if a timer pops, ouch */
1057 del_timer_sync(&ti->tr_timer);
1058 outb(0, dev->base_addr + ADAPTRESET);
1059 ti->sram_phys |= 1;
1060 ti->open_status = CLOSED;
1061
1062 netif_stop_queue(dev);
1063 DPRINTK("Adapter is closed.\n");
1064 return 0;
1065}
1066
1067/*****************************************************************************/
1068
1069#define RETCODE_OFST 2
1070#define OPEN_ERROR_CODE_OFST 6
1071#define ASB_ADDRESS_OFST 8
1072#define SRB_ADDRESS_OFST 10
1073#define ARB_ADDRESS_OFST 12
1074#define SSB_ADDRESS_OFST 14
1075
1076static char *printphase[]= {"Lobe media test","Physical insertion",
1077 "Address verification","Roll call poll","Request Parameters"};
1078static char *printerror[]={"Function failure","Signal loss","Reserved",
1079 "Frequency error","Timeout","Ring failure","Ring beaconing",
1080 "Duplicate node address",
1081 "Parameter request-retry count exceeded","Remove received",
1082 "IMPL force received","Duplicate modifier",
1083 "No monitor detected","Monitor contention failed for RPL"};
1084
1085static void __iomem *map_address(struct tok_info *ti, unsigned index, __u8 *page)
1086{
1087 if (ti->page_mask) {
1088 *page = (index >> 8) & ti->page_mask;
1089 index &= ~(ti->page_mask << 8);
1090 }
1091 return ti->sram_virt + index;
1092}
1093
1094static void dir_open_adapter (struct net_device *dev)
1095{
1096 struct tok_info *ti = netdev_priv(dev);
1097 unsigned char ret_code;
1098 __u16 err;
1099
1100 ti->srb = map_address(ti,
1101 ntohs(readw(ti->init_srb + SRB_ADDRESS_OFST)),
1102 &ti->srb_page);
1103 ti->ssb = map_address(ti,
1104 ntohs(readw(ti->init_srb + SSB_ADDRESS_OFST)),
1105 &ti->ssb_page);
1106 ti->arb = map_address(ti,
1107 ntohs(readw(ti->init_srb + ARB_ADDRESS_OFST)),
1108 &ti->arb_page);
1109 ti->asb = map_address(ti,
1110 ntohs(readw(ti->init_srb + ASB_ADDRESS_OFST)),
1111 &ti->asb_page);
1112 ti->current_skb = NULL;
1113 ret_code = readb(ti->init_srb + RETCODE_OFST);
1114 err = ntohs(readw(ti->init_srb + OPEN_ERROR_CODE_OFST));
1115 if (!ret_code) {
1116 ti->open_status = OPEN; /* TR adapter is now available */
1117 if (ti->open_mode == AUTOMATIC) {
1118 DPRINTK("Adapter reopened.\n");
1119 }
1120 writeb(~SRB_RESP_INT, ti->mmio+ACA_OFFSET+ACA_RESET+ISRP_ODD);
1121 open_sap(EXTENDED_SAP, dev);
1122 return;
1123 }
1124 ti->open_failure = YES;
1125 if (ret_code == 7){
1126 if (err == 0x24) {
1127 if (!ti->auto_speedsave) {
1128 DPRINTK("Open failed: Adapter speed must match "
1129 "ring speed if Automatic Ring Speed Save is "
1130 "disabled.\n");
1131 ti->open_action = FAIL;
1132 }else
1133 DPRINTK("Retrying open to adjust to "
1134 "ring speed, ");
1135 } else if (err == 0x2d) {
1136 DPRINTK("Physical Insertion: No Monitor Detected, ");
1137 printk("retrying after %ds delay...\n",
1138 TR_RETRY_INTERVAL/HZ);
1139 } else if (err == 0x11) {
1140 DPRINTK("Lobe Media Function Failure (0x11), ");
1141 printk(" retrying after %ds delay...\n",
1142 TR_RETRY_INTERVAL/HZ);
1143 } else {
1144 char **prphase = printphase;
1145 char **prerror = printerror;
1146 int pnr = err / 16 - 1;
1147 int enr = err % 16 - 1;
1148 DPRINTK("TR Adapter misc open failure, error code = ");
1149 if (pnr < 0 || pnr >= ARRAY_SIZE(printphase) ||
1150 enr < 0 ||
1151 enr >= ARRAY_SIZE(printerror))
1152 printk("0x%x, invalid Phase/Error.", err);
1153 else
1154 printk("0x%x, Phase: %s, Error: %s\n", err,
1155 prphase[pnr], prerror[enr]);
1156 printk(" retrying after %ds delay...\n",
1157 TR_RETRY_INTERVAL/HZ);
1158 }
1159 } else DPRINTK("open failed: ret_code = %02X..., ", ret_code);
1160 if (ti->open_action != FAIL) {
1161 if (ti->open_mode==AUTOMATIC){
1162 ti->open_action = REOPEN;
1163 ibmtr_reset_timer(&(ti->tr_timer), dev);
1164 return;
1165 }
1166 wake_up(&ti->wait_for_reset);
1167 return;
1168 }
1169 DPRINTK("FAILURE, CAPUT\n");
1170}
1171
1172/******************************************************************************/
1173
1174static irqreturn_t tok_interrupt(int irq, void *dev_id)
1175{
1176 unsigned char status;
1177 /* unsigned char status_even ; */
1178 struct tok_info *ti;
1179 struct net_device *dev;
1180#ifdef ENABLE_PAGING
1181 unsigned char save_srpr;
1182#endif
1183
1184 dev = dev_id;
1185#if TR_VERBOSE
1186 DPRINTK("Int from tok_driver, dev : %p irq%d\n", dev,irq);
1187#endif
1188 ti = netdev_priv(dev);
1189 if (ti->sram_phys & 1)
1190 return IRQ_NONE; /* PCMCIA card extraction flag */
1191 spin_lock(&(ti->lock));
1192#ifdef ENABLE_PAGING
1193 save_srpr = readb(ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
1194#endif
1195
1196 /* Disable interrupts till processing is finished */
1197 writeb((~INT_ENABLE), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
1198
1199 /* Reset interrupt for ISA boards */
1200 if (ti->adapter_int_enable)
1201 outb(0, ti->adapter_int_enable);
1202 else /* used for PCMCIA cards */
1203 outb(0, ti->global_int_enable);
1204 if (ti->do_tok_int == FIRST_INT){
1205 initial_tok_int(dev);
1206#ifdef ENABLE_PAGING
1207 writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
1208#endif
1209 spin_unlock(&(ti->lock));
1210 return IRQ_HANDLED;
1211 }
1212 /* Begin interrupt handler HERE inline to avoid the extra
1213 levels of logic and call depth for the original solution. */
1214 status = readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_ODD);
1215 /*BMSstatus_even = readb (ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) */
1216 /*BMSdebugprintk("tok_interrupt: ISRP_ODD = 0x%x ISRP_EVEN = 0x%x\n", */
1217 /*BMS status,status_even); */
1218
1219 if (status & ADAP_CHK_INT) {
1220 int i;
1221 void __iomem *check_reason;
1222 __u8 check_reason_page = 0;
1223 check_reason = map_address(ti,
1224 ntohs(readw(ti->mmio+ ACA_OFFSET+ACA_RW + WWCR_EVEN)),
1225 &check_reason_page);
1226 SET_PAGE(check_reason_page);
1227
1228 DPRINTK("Adapter check interrupt\n");
1229 DPRINTK("8 reason bytes follow: ");
1230 for (i = 0; i < 8; i++, check_reason++)
1231 printk("%02X ", (int) readb(check_reason));
1232 printk("\n");
1233 writeb(~ADAP_CHK_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD);
1234 status = readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRA_EVEN);
1235 DPRINTK("ISRA_EVEN == 0x02%x\n",status);
1236 ti->open_status = CLOSED;
1237 ti->sap_status = CLOSED;
1238 ti->open_mode = AUTOMATIC;
1239 netif_carrier_off(dev);
1240 netif_stop_queue(dev);
1241 ti->open_action = RESTART;
1242 outb(0, dev->base_addr + ADAPTRESET);
1243 ibmtr_reset_timer(&(ti->tr_timer), dev);/*BMS try to reopen*/
1244 spin_unlock(&(ti->lock));
1245 return IRQ_HANDLED;
1246 }
1247 if (readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN)
1248 & (TCR_INT | ERR_INT | ACCESS_INT)) {
1249 DPRINTK("adapter error: ISRP_EVEN : %02x\n",
1250 (int)readb(ti->mmio+ ACA_OFFSET + ACA_RW + ISRP_EVEN));
1251 writeb(~(TCR_INT | ERR_INT | ACCESS_INT),
1252 ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
1253 status= readb(ti->mmio+ ACA_OFFSET + ACA_RW + ISRA_EVEN);/*BMS*/
1254 DPRINTK("ISRA_EVEN == 0x02%x\n",status);/*BMS*/
1255 writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
1256#ifdef ENABLE_PAGING
1257 writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
1258#endif
1259 spin_unlock(&(ti->lock));
1260 return IRQ_HANDLED;
1261 }
1262 if (status & SRB_RESP_INT) { /* SRB response */
1263 SET_PAGE(ti->srb_page);
1264#if TR_VERBOSE
1265 DPRINTK("SRB resp: cmd=%02X rsp=%02X\n",
1266 readb(ti->srb), readb(ti->srb + RETCODE_OFST));
1267#endif
1268 switch (readb(ti->srb)) { /* SRB command check */
1269 case XMIT_DIR_FRAME:{
1270 unsigned char xmit_ret_code;
1271 xmit_ret_code = readb(ti->srb + RETCODE_OFST);
1272 if (xmit_ret_code == 0xff) break;
1273 DPRINTK("error on xmit_dir_frame request: %02X\n",
1274 xmit_ret_code);
1275 if (ti->current_skb) {
1276 dev_kfree_skb_irq(ti->current_skb);
1277 ti->current_skb = NULL;
1278 }
1279 /*dev->tbusy = 0;*/
1280 netif_wake_queue(dev);
1281 if (ti->readlog_pending)
1282 ibmtr_readlog(dev);
1283 break;
1284 }
1285 case XMIT_UI_FRAME:{
1286 unsigned char xmit_ret_code;
1287
1288 xmit_ret_code = readb(ti->srb + RETCODE_OFST);
1289 if (xmit_ret_code == 0xff) break;
1290 DPRINTK("error on xmit_ui_frame request: %02X\n",
1291 xmit_ret_code);
1292 if (ti->current_skb) {
1293 dev_kfree_skb_irq(ti->current_skb);
1294 ti->current_skb = NULL;
1295 }
1296 netif_wake_queue(dev);
1297 if (ti->readlog_pending)
1298 ibmtr_readlog(dev);
1299 break;
1300 }
1301 case DIR_OPEN_ADAPTER:
1302 dir_open_adapter(dev);
1303 break;
1304 case DLC_OPEN_SAP:
1305 if (readb(ti->srb + RETCODE_OFST)) {
1306 DPRINTK("open_sap failed: ret_code = %02X, "
1307 "retrying\n",
1308 (int) readb(ti->srb + RETCODE_OFST));
1309 ti->open_action = REOPEN;
1310 ibmtr_reset_timer(&(ti->tr_timer), dev);
1311 break;
1312 }
1313 ti->exsap_station_id = readw(ti->srb + STATION_ID_OFST);
1314 ti->sap_status = OPEN;/* TR adapter is now available */
1315 if (ti->open_mode==MANUAL){
1316 wake_up(&ti->wait_for_reset);
1317 break;
1318 }
1319 netif_wake_queue(dev);
1320 netif_carrier_on(dev);
1321 break;
1322 case DIR_INTERRUPT:
1323 case DIR_MOD_OPEN_PARAMS:
1324 case DIR_SET_GRP_ADDR:
1325 case DIR_SET_FUNC_ADDR:
1326 case DLC_CLOSE_SAP:
1327 if (readb(ti->srb + RETCODE_OFST))
1328 DPRINTK("error on %02X: %02X\n",
1329 (int) readb(ti->srb + COMMAND_OFST),
1330 (int) readb(ti->srb + RETCODE_OFST));
1331 break;
1332 case DIR_READ_LOG:
1333 if (readb(ti->srb + RETCODE_OFST)){
1334 DPRINTK("error on dir_read_log: %02X\n",
1335 (int) readb(ti->srb + RETCODE_OFST));
1336 netif_wake_queue(dev);
1337 break;
1338 }
1339#if IBMTR_DEBUG_MESSAGES
1340
1341#define LINE_ERRORS_OFST 0
1342#define INTERNAL_ERRORS_OFST 1
1343#define BURST_ERRORS_OFST 2
1344#define AC_ERRORS_OFST 3
1345#define ABORT_DELIMITERS_OFST 4
1346#define LOST_FRAMES_OFST 6
1347#define RECV_CONGEST_COUNT_OFST 7
1348#define FRAME_COPIED_ERRORS_OFST 8
1349#define FREQUENCY_ERRORS_OFST 9
1350#define TOKEN_ERRORS_OFST 10
1351
1352 DPRINTK("Line errors %02X, Internal errors %02X, "
1353 "Burst errors %02X\n" "A/C errors %02X, "
1354 "Abort delimiters %02X, Lost frames %02X\n"
1355 "Receive congestion count %02X, "
1356 "Frame copied errors %02X\nFrequency errors %02X, "
1357 "Token errors %02X\n",
1358 (int) readb(ti->srb + LINE_ERRORS_OFST),
1359 (int) readb(ti->srb + INTERNAL_ERRORS_OFST),
1360 (int) readb(ti->srb + BURST_ERRORS_OFST),
1361 (int) readb(ti->srb + AC_ERRORS_OFST),
1362 (int) readb(ti->srb + ABORT_DELIMITERS_OFST),
1363 (int) readb(ti->srb + LOST_FRAMES_OFST),
1364 (int) readb(ti->srb + RECV_CONGEST_COUNT_OFST),
1365 (int) readb(ti->srb + FRAME_COPIED_ERRORS_OFST),
1366 (int) readb(ti->srb + FREQUENCY_ERRORS_OFST),
1367 (int) readb(ti->srb + TOKEN_ERRORS_OFST));
1368#endif
1369 netif_wake_queue(dev);
1370 break;
1371 default:
1372 DPRINTK("Unknown command %02X encountered\n",
1373 (int) readb(ti->srb));
1374 } /* end switch SRB command check */
1375 writeb(~SRB_RESP_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD);
1376 } /* if SRB response */
1377 if (status & ASB_FREE_INT) { /* ASB response */
1378 SET_PAGE(ti->asb_page);
1379#if TR_VERBOSE
1380 DPRINTK("ASB resp: cmd=%02X\n", readb(ti->asb));
1381#endif
1382
1383 switch (readb(ti->asb)) { /* ASB command check */
1384 case REC_DATA:
1385 case XMIT_UI_FRAME:
1386 case XMIT_DIR_FRAME:
1387 break;
1388 default:
1389 DPRINTK("unknown command in asb %02X\n",
1390 (int) readb(ti->asb));
1391 } /* switch ASB command check */
1392 if (readb(ti->asb + 2) != 0xff) /* checks ret_code */
1393 DPRINTK("ASB error %02X in cmd %02X\n",
1394 (int) readb(ti->asb + 2), (int) readb(ti->asb));
1395 writeb(~ASB_FREE_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD);
1396 } /* if ASB response */
1397
1398#define STATUS_OFST 6
1399#define NETW_STATUS_OFST 6
1400
1401 if (status & ARB_CMD_INT) { /* ARB response */
1402 SET_PAGE(ti->arb_page);
1403#if TR_VERBOSE
1404 DPRINTK("ARB resp: cmd=%02X\n", readb(ti->arb));
1405#endif
1406
1407 switch (readb(ti->arb)) { /* ARB command check */
1408 case DLC_STATUS:
1409 DPRINTK("DLC_STATUS new status: %02X on station %02X\n",
1410 ntohs(readw(ti->arb + STATUS_OFST)),
1411 ntohs(readw(ti->arb+ STATION_ID_OFST)));
1412 break;
1413 case REC_DATA:
1414 tr_rx(dev);
1415 break;
1416 case RING_STAT_CHANGE:{
1417 unsigned short ring_status;
1418 ring_status= ntohs(readw(ti->arb + NETW_STATUS_OFST));
1419 if (ibmtr_debug_trace & TRC_INIT)
1420 DPRINTK("Ring Status Change...(0x%x)\n",
1421 ring_status);
1422 if(ring_status& (REMOVE_RECV|AUTO_REMOVAL|LOBE_FAULT)){
1423 netif_stop_queue(dev);
1424 netif_carrier_off(dev);
1425 DPRINTK("Remove received, or Auto-removal error"
1426 ", or Lobe fault\n");
1427 DPRINTK("We'll try to reopen the closed adapter"
1428 " after a %d second delay.\n",
1429 TR_RETRY_INTERVAL/HZ);
1430 /*I was confused: I saw the TR reopening but */
1431 /*forgot:with an RJ45 in an RJ45/ICS adapter */
1432 /*but adapter not in the ring, the TR will */
1433 /* open, and then soon close and come here. */
1434 ti->open_mode = AUTOMATIC;
1435 ti->open_status = CLOSED; /*12/2000 BMS*/
1436 ti->open_action = REOPEN;
1437 ibmtr_reset_timer(&(ti->tr_timer), dev);
1438 } else if (ring_status & LOG_OVERFLOW) {
1439 if(netif_queue_stopped(dev))
1440 ti->readlog_pending = 1;
1441 else
1442 ibmtr_readlog(dev);
1443 }
1444 break;
1445 }
1446 case XMIT_DATA_REQ:
1447 tr_tx(dev);
1448 break;
1449 default:
1450 DPRINTK("Unknown command %02X in arb\n",
1451 (int) readb(ti->arb));
1452 break;
1453 } /* switch ARB command check */
1454 writeb(~ARB_CMD_INT, ti->mmio+ ACA_OFFSET+ACA_RESET + ISRP_ODD);
1455 writeb(ARB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1456 } /* if ARB response */
1457 if (status & SSB_RESP_INT) { /* SSB response */
1458 unsigned char retcode;
1459 SET_PAGE(ti->ssb_page);
1460#if TR_VERBOSE
1461 DPRINTK("SSB resp: cmd=%02X rsp=%02X\n",
1462 readb(ti->ssb), readb(ti->ssb + 2));
1463#endif
1464
1465 switch (readb(ti->ssb)) { /* SSB command check */
1466 case XMIT_DIR_FRAME:
1467 case XMIT_UI_FRAME:
1468 retcode = readb(ti->ssb + 2);
1469 if (retcode && (retcode != 0x22))/* checks ret_code */
1470 DPRINTK("xmit ret_code: %02X xmit error code: "
1471 "%02X\n",
1472 (int)retcode, (int)readb(ti->ssb + 6));
1473 else
1474 dev->stats.tx_packets++;
1475 break;
1476 case XMIT_XID_CMD:
1477 DPRINTK("xmit xid ret_code: %02X\n",
1478 (int) readb(ti->ssb + 2));
1479 default:
1480 DPRINTK("Unknown command %02X in ssb\n",
1481 (int) readb(ti->ssb));
1482 } /* SSB command check */
1483 writeb(~SSB_RESP_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD);
1484 writeb(SSB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1485 } /* if SSB response */
1486#ifdef ENABLE_PAGING
1487 writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
1488#endif
1489 writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
1490 spin_unlock(&(ti->lock));
1491 return IRQ_HANDLED;
1492} /*tok_interrupt */
1493
1494/*****************************************************************************/
1495
1496#define INIT_STATUS_OFST 1
1497#define INIT_STATUS_2_OFST 2
1498#define ENCODED_ADDRESS_OFST 8
1499
1500static void initial_tok_int(struct net_device *dev)
1501{
1502
1503 __u32 encoded_addr, hw_encoded_addr;
1504 struct tok_info *ti;
1505 unsigned char init_status; /*BMS 12/2000*/
1506
1507 ti = netdev_priv(dev);
1508
1509 ti->do_tok_int = NOT_FIRST;
1510
1511 /* we assign the shared-ram address for ISA devices */
1512 writeb(ti->sram_base, ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN);
1513#ifndef PCMCIA
1514 ti->sram_virt = ioremap(((__u32)ti->sram_base << 12), ti->avail_shared_ram);
1515#endif
1516 ti->init_srb = map_address(ti,
1517 ntohs(readw(ti->mmio + ACA_OFFSET + WRBR_EVEN)),
1518 &ti->init_srb_page);
1519 if (ti->page_mask && ti->avail_shared_ram == 127) {
1520 void __iomem *last_512;
1521 __u8 last_512_page=0;
1522 int i;
1523 last_512 = map_address(ti, 0xfe00, &last_512_page);
1524 /* initialize high section of ram (if necessary) */
1525 SET_PAGE(last_512_page);
1526 for (i = 0; i < 512; i++)
1527 writeb(0, last_512 + i);
1528 }
1529 SET_PAGE(ti->init_srb_page);
1530
1531#if TR_VERBOSE
1532 {
1533 int i;
1534
1535 DPRINTK("ti->init_srb_page=0x%x\n", ti->init_srb_page);
1536 DPRINTK("init_srb(%p):", ti->init_srb );
1537 for (i = 0; i < 20; i++)
1538 printk("%02X ", (int) readb(ti->init_srb + i));
1539 printk("\n");
1540 }
1541#endif
1542
1543 hw_encoded_addr = readw(ti->init_srb + ENCODED_ADDRESS_OFST);
1544 encoded_addr = ntohs(hw_encoded_addr);
1545 init_status= /*BMS 12/2000 check for shallow mode possibility (Turbo)*/
1546 readb(ti->init_srb+offsetof(struct srb_init_response,init_status));
1547 /*printk("Initial interrupt: init_status= 0x%02x\n",init_status);*/
1548 ti->ring_speed = init_status & 0x01 ? 16 : 4;
1549 DPRINTK("Initial interrupt : %d Mbps, shared RAM base %08x.\n",
1550 ti->ring_speed, (unsigned int)dev->mem_start);
1551 ti->auto_speedsave = (readb(ti->init_srb+INIT_STATUS_2_OFST) & 4) != 0;
1552
1553 if (ti->open_mode == MANUAL) wake_up(&ti->wait_for_reset);
1554 else tok_open_adapter((unsigned long)dev);
1555
1556} /*initial_tok_int() */
1557
1558/*****************************************************************************/
1559
1560#define CMD_CORRELATE_OFST 1
1561#define DHB_ADDRESS_OFST 6
1562
1563#define FRAME_LENGTH_OFST 6
1564#define HEADER_LENGTH_OFST 8
1565#define RSAP_VALUE_OFST 9
1566
1567static void tr_tx(struct net_device *dev)
1568{
1569 struct tok_info *ti = netdev_priv(dev);
1570 struct trh_hdr *trhdr = (struct trh_hdr *) ti->current_skb->data;
1571 unsigned int hdr_len;
1572 __u32 dhb=0,dhb_base;
1573 void __iomem *dhbuf = NULL;
1574 unsigned char xmit_command;
1575 int i,dhb_len=0x4000,src_len,src_offset;
1576 struct trllc *llc;
1577 struct srb_xmit xsrb;
1578 __u8 dhb_page = 0;
1579 __u8 llc_ssap;
1580
1581 SET_PAGE(ti->asb_page);
1582
1583 if (readb(ti->asb+RETCODE_OFST) != 0xFF) DPRINTK("ASB not free !!!\n");
1584
1585 /* in providing the transmit interrupts, is telling us it is ready for
1586 data and providing a shared memory address for us to stuff with data.
1587 Here we compute the effective address where we will place data.
1588 */
1589 SET_PAGE(ti->arb_page);
1590 dhb=dhb_base=ntohs(readw(ti->arb + DHB_ADDRESS_OFST));
1591 if (ti->page_mask) {
1592 dhb_page = (dhb_base >> 8) & ti->page_mask;
1593 dhb=dhb_base & ~(ti->page_mask << 8);
1594 }
1595 dhbuf = ti->sram_virt + dhb;
1596
1597 /* Figure out the size of the 802.5 header */
1598 if (!(trhdr->saddr[0] & 0x80)) /* RIF present? */
1599 hdr_len = sizeof(struct trh_hdr) - TR_MAXRIFLEN;
1600 else
1601 hdr_len = ((ntohs(trhdr->rcf) & TR_RCF_LEN_MASK) >> 8)
1602 + sizeof(struct trh_hdr) - TR_MAXRIFLEN;
1603
1604 llc = (struct trllc *) (ti->current_skb->data + hdr_len);
1605
1606 llc_ssap = llc->ssap;
1607 SET_PAGE(ti->srb_page);
1608 memcpy_fromio(&xsrb, ti->srb, sizeof(xsrb));
1609 SET_PAGE(ti->asb_page);
1610 xmit_command = xsrb.command;
1611
1612 writeb(xmit_command, ti->asb + COMMAND_OFST);
1613 writew(xsrb.station_id, ti->asb + STATION_ID_OFST);
1614 writeb(llc_ssap, ti->asb + RSAP_VALUE_OFST);
1615 writeb(xsrb.cmd_corr, ti->asb + CMD_CORRELATE_OFST);
1616 writeb(0, ti->asb + RETCODE_OFST);
1617 if ((xmit_command == XMIT_XID_CMD) || (xmit_command == XMIT_TEST_CMD)) {
1618 writew(htons(0x11), ti->asb + FRAME_LENGTH_OFST);
1619 writeb(0x0e, ti->asb + HEADER_LENGTH_OFST);
1620 SET_PAGE(dhb_page);
1621 writeb(AC, dhbuf);
1622 writeb(LLC_FRAME, dhbuf + 1);
1623 for (i = 0; i < TR_ALEN; i++)
1624 writeb((int) 0x0FF, dhbuf + i + 2);
1625 for (i = 0; i < TR_ALEN; i++)
1626 writeb(0, dhbuf + i + TR_ALEN + 2);
1627 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1628 return;
1629 }
1630 /*
1631 * the token ring packet is copied from sk_buff to the adapter
1632 * buffer identified in the command data received with the interrupt.
1633 */
1634 writeb(hdr_len, ti->asb + HEADER_LENGTH_OFST);
1635 writew(htons(ti->current_skb->len), ti->asb + FRAME_LENGTH_OFST);
1636 src_len=ti->current_skb->len;
1637 src_offset=0;
1638 dhb=dhb_base;
1639 while(1) {
1640 if (ti->page_mask) {
1641 dhb_page=(dhb >> 8) & ti->page_mask;
1642 dhb=dhb & ~(ti->page_mask << 8);
1643 dhb_len=0x4000-dhb; /* remaining size of this page */
1644 }
1645 dhbuf = ti->sram_virt + dhb;
1646 SET_PAGE(dhb_page);
1647 if (src_len > dhb_len) {
1648 memcpy_toio(dhbuf,&ti->current_skb->data[src_offset],
1649 dhb_len);
1650 src_len -= dhb_len;
1651 src_offset += dhb_len;
1652 dhb_base+=dhb_len;
1653 dhb=dhb_base;
1654 continue;
1655 }
1656 memcpy_toio(dhbuf, &ti->current_skb->data[src_offset], src_len);
1657 break;
1658 }
1659 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1660 dev->stats.tx_bytes += ti->current_skb->len;
1661 dev_kfree_skb_irq(ti->current_skb);
1662 ti->current_skb = NULL;
1663 netif_wake_queue(dev);
1664 if (ti->readlog_pending)
1665 ibmtr_readlog(dev);
1666} /*tr_tx */
1667
1668/*****************************************************************************/
1669
1670
1671#define RECEIVE_BUFFER_OFST 6
1672#define LAN_HDR_LENGTH_OFST 8
1673#define DLC_HDR_LENGTH_OFST 9
1674
1675#define DSAP_OFST 0
1676#define SSAP_OFST 1
1677#define LLC_OFST 2
1678#define PROTID_OFST 3
1679#define ETHERTYPE_OFST 6
1680
1681static void tr_rx(struct net_device *dev)
1682{
1683 struct tok_info *ti = netdev_priv(dev);
1684 __u32 rbuffer;
1685 void __iomem *rbuf, *rbufdata, *llc;
1686 __u8 rbuffer_page = 0;
1687 unsigned char *data;
1688 unsigned int rbuffer_len, lan_hdr_len, hdr_len, ip_len, length;
1689 unsigned char dlc_hdr_len;
1690 struct sk_buff *skb;
1691 unsigned int skb_size = 0;
1692 int IPv4_p = 0;
1693 unsigned int chksum = 0;
1694 struct iphdr *iph;
1695 struct arb_rec_req rarb;
1696
1697 SET_PAGE(ti->arb_page);
1698 memcpy_fromio(&rarb, ti->arb, sizeof(rarb));
1699 rbuffer = ntohs(rarb.rec_buf_addr) ;
1700 rbuf = map_address(ti, rbuffer, &rbuffer_page);
1701
1702 SET_PAGE(ti->asb_page);
1703
1704 if (readb(ti->asb + RETCODE_OFST) !=0xFF) DPRINTK("ASB not free !!!\n");
1705
1706 writeb(REC_DATA, ti->asb + COMMAND_OFST);
1707 writew(rarb.station_id, ti->asb + STATION_ID_OFST);
1708 writew(rarb.rec_buf_addr, ti->asb + RECEIVE_BUFFER_OFST);
1709
1710 lan_hdr_len = rarb.lan_hdr_len;
1711 if (lan_hdr_len > sizeof(struct trh_hdr)) {
1712 DPRINTK("Linux cannot handle greater than 18 bytes RIF\n");
1713 return;
1714 } /*BMS I added this above just to be very safe */
1715 dlc_hdr_len = readb(ti->arb + DLC_HDR_LENGTH_OFST);
1716 hdr_len = lan_hdr_len + sizeof(struct trllc) + sizeof(struct iphdr);
1717
1718 SET_PAGE(rbuffer_page);
1719 llc = rbuf + offsetof(struct rec_buf, data) + lan_hdr_len;
1720
1721#if TR_VERBOSE
1722 DPRINTK("offsetof data: %02X lan_hdr_len: %02X\n",
1723 (__u32) offsetof(struct rec_buf, data), (unsigned int) lan_hdr_len);
1724 DPRINTK("llc: %08X rec_buf_addr: %04X dev->mem_start: %lX\n",
1725 llc, ntohs(rarb.rec_buf_addr), dev->mem_start);
1726 DPRINTK("dsap: %02X, ssap: %02X, llc: %02X, protid: %02X%02X%02X, "
1727 "ethertype: %04X\n",
1728 (int) readb(llc + DSAP_OFST), (int) readb(llc + SSAP_OFST),
1729 (int) readb(llc + LLC_OFST), (int) readb(llc + PROTID_OFST),
1730 (int) readb(llc+PROTID_OFST+1),(int)readb(llc+PROTID_OFST + 2),
1731 (int) ntohs(readw(llc + ETHERTYPE_OFST)));
1732#endif
1733 if (readb(llc + offsetof(struct trllc, llc)) != UI_CMD) {
1734 SET_PAGE(ti->asb_page);
1735 writeb(DATA_LOST, ti->asb + RETCODE_OFST);
1736 dev->stats.rx_dropped++;
1737 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1738 return;
1739 }
1740 length = ntohs(rarb.frame_len);
1741 if (readb(llc + DSAP_OFST) == EXTENDED_SAP &&
1742 readb(llc + SSAP_OFST) == EXTENDED_SAP &&
1743 length >= hdr_len) IPv4_p = 1;
1744#if TR_VERBOSE
1745#define SADDR_OFST 8
1746#define DADDR_OFST 2
1747
1748 if (!IPv4_p) {
1749
1750 void __iomem *trhhdr = rbuf + offsetof(struct rec_buf, data);
1751 u8 saddr[6];
1752 u8 daddr[6];
1753 int i;
1754 for (i = 0 ; i < 6 ; i++)
1755 saddr[i] = readb(trhhdr + SADDR_OFST + i);
1756 for (i = 0 ; i < 6 ; i++)
1757 daddr[i] = readb(trhhdr + DADDR_OFST + i);
1758 DPRINTK("Probably non-IP frame received.\n");
1759 DPRINTK("ssap: %02X dsap: %02X "
1760 "saddr: %pM daddr: %pM\n",
1761 readb(llc + SSAP_OFST), readb(llc + DSAP_OFST),
1762 saddr, daddr);
1763 }
1764#endif
1765
1766 /*BMS handle the case she comes in with few hops but leaves with many */
1767 skb_size=length-lan_hdr_len+sizeof(struct trh_hdr)+sizeof(struct trllc);
1768
1769 if (!(skb = dev_alloc_skb(skb_size))) {
1770 DPRINTK("out of memory. frame dropped.\n");
1771 dev->stats.rx_dropped++;
1772 SET_PAGE(ti->asb_page);
1773 writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code));
1774 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1775 return;
1776 }
1777 /*BMS again, if she comes in with few but leaves with many */
1778 skb_reserve(skb, sizeof(struct trh_hdr) - lan_hdr_len);
1779 skb_put(skb, length);
1780 data = skb->data;
1781 rbuffer_len = ntohs(readw(rbuf + offsetof(struct rec_buf, buf_len)));
1782 rbufdata = rbuf + offsetof(struct rec_buf, data);
1783
1784 if (IPv4_p) {
1785 /* Copy the headers without checksumming */
1786 memcpy_fromio(data, rbufdata, hdr_len);
1787
1788 /* Watch for padded packets and bogons */
1789 iph= (struct iphdr *)(data+ lan_hdr_len + sizeof(struct trllc));
1790 ip_len = ntohs(iph->tot_len) - sizeof(struct iphdr);
1791 length -= hdr_len;
1792 if ((ip_len <= length) && (ip_len > 7))
1793 length = ip_len;
1794 data += hdr_len;
1795 rbuffer_len -= hdr_len;
1796 rbufdata += hdr_len;
1797 }
1798 /* Copy the payload... */
1799#define BUFFER_POINTER_OFST 2
1800#define BUFFER_LENGTH_OFST 6
1801 for (;;) {
1802 if (ibmtr_debug_trace&TRC_INITV && length < rbuffer_len)
1803 DPRINTK("CURIOUS, length=%d < rbuffer_len=%d\n",
1804 length,rbuffer_len);
1805 if (IPv4_p)
1806 chksum=csum_partial_copy_nocheck((void*)rbufdata,
1807 data,length<rbuffer_len?length:rbuffer_len,chksum);
1808 else
1809 memcpy_fromio(data, rbufdata, rbuffer_len);
1810 rbuffer = ntohs(readw(rbuf+BUFFER_POINTER_OFST)) ;
1811 if (!rbuffer)
1812 break;
1813 rbuffer -= 2;
1814 length -= rbuffer_len;
1815 data += rbuffer_len;
1816 rbuf = map_address(ti, rbuffer, &rbuffer_page);
1817 SET_PAGE(rbuffer_page);
1818 rbuffer_len = ntohs(readw(rbuf + BUFFER_LENGTH_OFST));
1819 rbufdata = rbuf + offsetof(struct rec_buf, data);
1820 }
1821
1822 SET_PAGE(ti->asb_page);
1823 writeb(0, ti->asb + offsetof(struct asb_rec, ret_code));
1824
1825 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1826
1827 dev->stats.rx_bytes += skb->len;
1828 dev->stats.rx_packets++;
1829
1830 skb->protocol = tr_type_trans(skb, dev);
1831 if (IPv4_p) {
1832 skb->csum = chksum;
1833 skb->ip_summed = CHECKSUM_COMPLETE;
1834 }
1835 netif_rx(skb);
1836} /*tr_rx */
1837
1838/*****************************************************************************/
1839
1840static void ibmtr_reset_timer(struct timer_list *tmr, struct net_device *dev)
1841{
1842 tmr->expires = jiffies + TR_RETRY_INTERVAL;
1843 tmr->data = (unsigned long) dev;
1844 tmr->function = tok_rerun;
1845 init_timer(tmr);
1846 add_timer(tmr);
1847}
1848
1849/*****************************************************************************/
1850
1851static void tok_rerun(unsigned long dev_addr)
1852{
1853 struct net_device *dev = (struct net_device *)dev_addr;
1854 struct tok_info *ti = netdev_priv(dev);
1855
1856 if ( ti->open_action == RESTART){
1857 ti->do_tok_int = FIRST_INT;
1858 outb(0, dev->base_addr + ADAPTRESETREL);
1859#ifdef ENABLE_PAGING
1860 if (ti->page_mask)
1861 writeb(SRPR_ENABLE_PAGING,
1862 ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
1863#endif
1864
1865 writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
1866 } else
1867 tok_open_adapter(dev_addr);
1868}
1869
1870/*****************************************************************************/
1871
1872static void ibmtr_readlog(struct net_device *dev)
1873{
1874 struct tok_info *ti;
1875
1876 ti = netdev_priv(dev);
1877
1878 ti->readlog_pending = 0;
1879 SET_PAGE(ti->srb_page);
1880 writeb(DIR_READ_LOG, ti->srb);
1881 writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
1882 writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1883
1884 netif_stop_queue(dev);
1885
1886}
1887
1888/*****************************************************************************/
1889
1890static int ibmtr_change_mtu(struct net_device *dev, int mtu)
1891{
1892 struct tok_info *ti = netdev_priv(dev);
1893
1894 if (ti->ring_speed == 16 && mtu > ti->maxmtu16)
1895 return -EINVAL;
1896 if (ti->ring_speed == 4 && mtu > ti->maxmtu4)
1897 return -EINVAL;
1898 dev->mtu = mtu;
1899 return 0;
1900}
1901
1902/*****************************************************************************/
1903#ifdef MODULE
1904
1905/* 3COM 3C619C supports 8 interrupts, 32 I/O ports */
1906static struct net_device *dev_ibmtr[IBMTR_MAX_ADAPTERS];
1907static int io[IBMTR_MAX_ADAPTERS] = { 0xa20, 0xa24 };
1908static int irq[IBMTR_MAX_ADAPTERS];
1909static int mem[IBMTR_MAX_ADAPTERS];
1910
1911MODULE_LICENSE("GPL");
1912
1913module_param_array(io, int, NULL, 0);
1914module_param_array(irq, int, NULL, 0);
1915module_param_array(mem, int, NULL, 0);
1916
1917static int __init ibmtr_init(void)
1918{
1919 int i;
1920 int count=0;
1921
1922 find_turbo_adapters(io);
1923
1924 for (i = 0; i < IBMTR_MAX_ADAPTERS && io[i]; i++) {
1925 struct net_device *dev;
1926 irq[i] = 0;
1927 mem[i] = 0;
1928 dev = alloc_trdev(sizeof(struct tok_info));
1929 if (dev == NULL) {
1930 if (i == 0)
1931 return -ENOMEM;
1932 break;
1933 }
1934 dev->base_addr = io[i];
1935 dev->irq = irq[i];
1936 dev->mem_start = mem[i];
1937
1938 if (ibmtr_probe_card(dev)) {
1939 free_netdev(dev);
1940 continue;
1941 }
1942 dev_ibmtr[i] = dev;
1943 count++;
1944 }
1945 if (count) return 0;
1946 printk("ibmtr: register_netdev() returned non-zero.\n");
1947 return -EIO;
1948}
1949module_init(ibmtr_init);
1950
1951static void __exit ibmtr_cleanup(void)
1952{
1953 int i;
1954
1955 for (i = 0; i < IBMTR_MAX_ADAPTERS; i++){
1956 if (!dev_ibmtr[i])
1957 continue;
1958 unregister_netdev(dev_ibmtr[i]);
1959 ibmtr_cleanup_card(dev_ibmtr[i]);
1960 free_netdev(dev_ibmtr[i]);
1961 }
1962}
1963module_exit(ibmtr_cleanup);
1964#endif
diff --git a/drivers/net/tokenring/ibmtr_cs.c b/drivers/net/tokenring/ibmtr_cs.c
deleted file mode 100644
index 356e28e4881b..000000000000
--- a/drivers/net/tokenring/ibmtr_cs.c
+++ /dev/null
@@ -1,370 +0,0 @@
1/*======================================================================
2
3 A PCMCIA token-ring driver for IBM-based cards
4
5 This driver supports the IBM PCMCIA Token-Ring Card.
6 Written by Steve Kipisz, kipisz@vnet.ibm.com or
7 bungy@ibm.net
8
9 Written 1995,1996.
10
11 This code is based on pcnet_cs.c from David Hinds.
12
13 V2.2.0 February 1999 - Mike Phillips phillim@amtrak.com
14
15 Linux V2.2.x presented significant changes to the underlying
16 ibmtr.c code. Mainly the code became a lot more organized and
17 modular.
18
19 This caused the old PCMCIA Token Ring driver to give up and go
20 home early. Instead of just patching the old code to make it
21 work, the PCMCIA code has been streamlined, updated and possibly
22 improved.
23
24 This code now only contains code required for the Card Services.
25 All we do here is set the card up enough so that the real ibmtr.c
26 driver can find it and work with it properly.
27
28 i.e. We set up the io port, irq, mmio memory and shared ram
29 memory. This enables ibmtr_probe in ibmtr.c to find the card and
30 configure it as though it was a normal ISA and/or PnP card.
31
32 CHANGES
33
34 v2.2.5 April 1999 Mike Phillips (phillim@amtrak.com)
35 Obscure bug fix, required changed to ibmtr.c not ibmtr_cs.c
36
37 v2.2.7 May 1999 Mike Phillips (phillim@amtrak.com)
38 Updated to version 2.2.7 to match the first version of the kernel
39 that the modification to ibmtr.c were incorporated into.
40
41 v2.2.17 July 2000 Burt Silverman (burts@us.ibm.com)
42 Address translation feature of PCMCIA controller is usable so
43 memory windows can be placed in High memory (meaning above
44 0xFFFFF.)
45
46======================================================================*/
47
48#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49
50#include <linux/kernel.h>
51#include <linux/init.h>
52#include <linux/ptrace.h>
53#include <linux/slab.h>
54#include <linux/string.h>
55#include <linux/timer.h>
56#include <linux/module.h>
57#include <linux/netdevice.h>
58#include <linux/trdevice.h>
59#include <linux/ibmtr.h>
60
61#include <pcmcia/cistpl.h>
62#include <pcmcia/ds.h>
63
64#include <asm/uaccess.h>
65#include <asm/io.h>
66
67#define PCMCIA
68#include "ibmtr.c"
69
70
71/*====================================================================*/
72
73/* Parameters that can be set with 'insmod' */
74
75/* MMIO base address */
76static u_long mmiobase = 0xce000;
77
78/* SRAM base address */
79static u_long srambase = 0xd0000;
80
81/* SRAM size 8,16,32,64 */
82static u_long sramsize = 64;
83
84/* Ringspeed 4,16 */
85static int ringspeed = 16;
86
87module_param(mmiobase, ulong, 0);
88module_param(srambase, ulong, 0);
89module_param(sramsize, ulong, 0);
90module_param(ringspeed, int, 0);
91MODULE_LICENSE("GPL");
92
93/*====================================================================*/
94
95static int ibmtr_config(struct pcmcia_device *link);
96static void ibmtr_hw_setup(struct net_device *dev, u_int mmiobase);
97static void ibmtr_release(struct pcmcia_device *link);
98static void ibmtr_detach(struct pcmcia_device *p_dev);
99
100/*====================================================================*/
101
102typedef struct ibmtr_dev_t {
103 struct pcmcia_device *p_dev;
104 struct net_device *dev;
105 struct tok_info *ti;
106} ibmtr_dev_t;
107
108static irqreturn_t ibmtr_interrupt(int irq, void *dev_id) {
109 ibmtr_dev_t *info = dev_id;
110 struct net_device *dev = info->dev;
111 return tok_interrupt(irq, dev);
112};
113
114static int __devinit ibmtr_attach(struct pcmcia_device *link)
115{
116 ibmtr_dev_t *info;
117 struct net_device *dev;
118
119 dev_dbg(&link->dev, "ibmtr_attach()\n");
120
121 /* Create new token-ring device */
122 info = kzalloc(sizeof(*info), GFP_KERNEL);
123 if (!info) return -ENOMEM;
124 dev = alloc_trdev(sizeof(struct tok_info));
125 if (!dev) {
126 kfree(info);
127 return -ENOMEM;
128 }
129
130 info->p_dev = link;
131 link->priv = info;
132 info->ti = netdev_priv(dev);
133
134 link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
135 link->resource[0]->end = 4;
136 link->config_flags |= CONF_ENABLE_IRQ;
137 link->config_regs = PRESENT_OPTION;
138
139 info->dev = dev;
140
141 return ibmtr_config(link);
142} /* ibmtr_attach */
143
144static void ibmtr_detach(struct pcmcia_device *link)
145{
146 struct ibmtr_dev_t *info = link->priv;
147 struct net_device *dev = info->dev;
148 struct tok_info *ti = netdev_priv(dev);
149
150 dev_dbg(&link->dev, "ibmtr_detach\n");
151
152 /*
153 * When the card removal interrupt hits tok_interrupt(),
154 * bail out early, so we don't crash the machine
155 */
156 ti->sram_phys |= 1;
157
158 unregister_netdev(dev);
159
160 del_timer_sync(&(ti->tr_timer));
161
162 ibmtr_release(link);
163
164 free_netdev(dev);
165 kfree(info);
166} /* ibmtr_detach */
167
168static int __devinit ibmtr_config(struct pcmcia_device *link)
169{
170 ibmtr_dev_t *info = link->priv;
171 struct net_device *dev = info->dev;
172 struct tok_info *ti = netdev_priv(dev);
173 int i, ret;
174
175 dev_dbg(&link->dev, "ibmtr_config\n");
176
177 link->io_lines = 16;
178 link->config_index = 0x61;
179
180 /* Determine if this is PRIMARY or ALTERNATE. */
181
182 /* Try PRIMARY card at 0xA20-0xA23 */
183 link->resource[0]->start = 0xA20;
184 i = pcmcia_request_io(link);
185 if (i != 0) {
186 /* Couldn't get 0xA20-0xA23. Try ALTERNATE at 0xA24-0xA27. */
187 link->resource[0]->start = 0xA24;
188 ret = pcmcia_request_io(link);
189 if (ret)
190 goto failed;
191 }
192 dev->base_addr = link->resource[0]->start;
193
194 ret = pcmcia_request_exclusive_irq(link, ibmtr_interrupt);
195 if (ret)
196 goto failed;
197 dev->irq = link->irq;
198 ti->irq = link->irq;
199 ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq);
200
201 /* Allocate the MMIO memory window */
202 link->resource[2]->flags |= WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE;
203 link->resource[2]->flags |= WIN_USE_WAIT;
204 link->resource[2]->start = 0;
205 link->resource[2]->end = 0x2000;
206 ret = pcmcia_request_window(link, link->resource[2], 250);
207 if (ret)
208 goto failed;
209
210 ret = pcmcia_map_mem_page(link, link->resource[2], mmiobase);
211 if (ret)
212 goto failed;
213 ti->mmio = ioremap(link->resource[2]->start,
214 resource_size(link->resource[2]));
215
216 /* Allocate the SRAM memory window */
217 link->resource[3]->flags = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE;
218 link->resource[3]->flags |= WIN_USE_WAIT;
219 link->resource[3]->start = 0;
220 link->resource[3]->end = sramsize * 1024;
221 ret = pcmcia_request_window(link, link->resource[3], 250);
222 if (ret)
223 goto failed;
224
225 ret = pcmcia_map_mem_page(link, link->resource[3], srambase);
226 if (ret)
227 goto failed;
228
229 ti->sram_base = srambase >> 12;
230 ti->sram_virt = ioremap(link->resource[3]->start,
231 resource_size(link->resource[3]));
232 ti->sram_phys = link->resource[3]->start;
233
234 ret = pcmcia_enable_device(link);
235 if (ret)
236 goto failed;
237
238 /* Set up the Token-Ring Controller Configuration Register and
239 turn on the card. Check the "Local Area Network Credit Card
240 Adapters Technical Reference" SC30-3585 for this info. */
241 ibmtr_hw_setup(dev, mmiobase);
242
243 SET_NETDEV_DEV(dev, &link->dev);
244
245 i = ibmtr_probe_card(dev);
246 if (i != 0) {
247 pr_notice("register_netdev() failed\n");
248 goto failed;
249 }
250
251 netdev_info(dev, "port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n",
252 dev->base_addr, dev->irq,
253 (u_long)ti->mmio, (u_long)(ti->sram_base << 12),
254 dev->dev_addr);
255 return 0;
256
257failed:
258 ibmtr_release(link);
259 return -ENODEV;
260} /* ibmtr_config */
261
262static void ibmtr_release(struct pcmcia_device *link)
263{
264 ibmtr_dev_t *info = link->priv;
265 struct net_device *dev = info->dev;
266
267 dev_dbg(&link->dev, "ibmtr_release\n");
268
269 if (link->resource[2]->end) {
270 struct tok_info *ti = netdev_priv(dev);
271 iounmap(ti->mmio);
272 }
273 pcmcia_disable_device(link);
274}
275
276static int ibmtr_suspend(struct pcmcia_device *link)
277{
278 ibmtr_dev_t *info = link->priv;
279 struct net_device *dev = info->dev;
280
281 if (link->open)
282 netif_device_detach(dev);
283
284 return 0;
285}
286
287static int __devinit ibmtr_resume(struct pcmcia_device *link)
288{
289 ibmtr_dev_t *info = link->priv;
290 struct net_device *dev = info->dev;
291
292 if (link->open) {
293 ibmtr_probe(dev); /* really? */
294 netif_device_attach(dev);
295 }
296
297 return 0;
298}
299
300
301/*====================================================================*/
302
303static void ibmtr_hw_setup(struct net_device *dev, u_int mmiobase)
304{
305 int i;
306
307 /* Bizarre IBM behavior, there are 16 bits of information we
308 need to set, but the card only allows us to send 4 bits at a
309 time. For each byte sent to base_addr, bits 7-4 tell the
310 card which part of the 16 bits we are setting, bits 3-0 contain
311 the actual information */
312
313 /* First nibble provides 4 bits of mmio */
314 i = (mmiobase >> 16) & 0x0F;
315 outb(i, dev->base_addr);
316
317 /* Second nibble provides 3 bits of mmio */
318 i = 0x10 | ((mmiobase >> 12) & 0x0E);
319 outb(i, dev->base_addr);
320
321 /* Third nibble, hard-coded values */
322 i = 0x26;
323 outb(i, dev->base_addr);
324
325 /* Fourth nibble sets shared ram page size */
326
327 /* 8 = 00, 16 = 01, 32 = 10, 64 = 11 */
328 i = (sramsize >> 4) & 0x07;
329 i = ((i == 4) ? 3 : i) << 2;
330 i |= 0x30;
331
332 if (ringspeed == 16)
333 i |= 2;
334 if (dev->base_addr == 0xA24)
335 i |= 1;
336 outb(i, dev->base_addr);
337
338 /* 0x40 will release the card for use */
339 outb(0x40, dev->base_addr);
340}
341
342static const struct pcmcia_device_id ibmtr_ids[] = {
343 PCMCIA_DEVICE_PROD_ID12("3Com", "TokenLink Velocity PC Card", 0x41240e5b, 0x82c3734e),
344 PCMCIA_DEVICE_PROD_ID12("IBM", "TOKEN RING", 0xb569a6e5, 0xbf8eed47),
345 PCMCIA_DEVICE_NULL,
346};
347MODULE_DEVICE_TABLE(pcmcia, ibmtr_ids);
348
349static struct pcmcia_driver ibmtr_cs_driver = {
350 .owner = THIS_MODULE,
351 .name = "ibmtr_cs",
352 .probe = ibmtr_attach,
353 .remove = ibmtr_detach,
354 .id_table = ibmtr_ids,
355 .suspend = ibmtr_suspend,
356 .resume = ibmtr_resume,
357};
358
359static int __init init_ibmtr_cs(void)
360{
361 return pcmcia_register_driver(&ibmtr_cs_driver);
362}
363
364static void __exit exit_ibmtr_cs(void)
365{
366 pcmcia_unregister_driver(&ibmtr_cs_driver);
367}
368
369module_init(init_ibmtr_cs);
370module_exit(exit_ibmtr_cs);
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
deleted file mode 100644
index 3e4b4f091113..000000000000
--- a/drivers/net/tokenring/lanstreamer.c
+++ /dev/null
@@ -1,1917 +0,0 @@
1/*
2 * lanstreamer.c -- driver for the IBM Auto LANStreamer PCI Adapter
3 *
4 * Written By: Mike Sullivan, IBM Corporation
5 *
6 * Copyright (C) 1999 IBM Corporation
7 *
8 * Linux driver for IBM PCI tokenring cards based on the LanStreamer MPC
9 * chipset.
10 *
11 * This driver is based on the olympic driver for IBM PCI TokenRing cards (Pit/Pit-Phy/Olympic
12 * chipsets) written by:
13 * 1999 Peter De Schrijver All Rights Reserved
14 * 1999 Mike Phillips (phillim@amtrak.com)
15 *
16 * Base Driver Skeleton:
17 * Written 1993-94 by Donald Becker.
18 *
19 * Copyright 1993 United States Government as represented by the
20 * Director, National Security Agency.
21 *
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2 of the License, or
25 * (at your option) any later version.
26 *
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
31 *
32 * NO WARRANTY
33 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
34 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
35 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
36 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
37 * solely responsible for determining the appropriateness of using and
38 * distributing the Program and assumes all risks associated with its
39 * exercise of rights under this Agreement, including but not limited to
40 * the risks and costs of program errors, damage to or loss of data,
41 * programs or equipment, and unavailability or interruption of operations.
42 *
43 * DISCLAIMER OF LIABILITY
44 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
47 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
48 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
49 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
50 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
51 *
52 * You should have received a copy of the GNU General Public License
53 * along with this program; if not, write to the Free Software
54 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
55 *
56 *
57 * 12/10/99 - Alpha Release 0.1.0
58 * First release to the public
59 * 03/03/00 - Merged to kernel, indented -kr -i8 -bri0, fixed some missing
60 * malloc free checks, reviewed code. <alan@redhat.com>
61 * 03/13/00 - Added spinlocks for smp
62 * 03/08/01 - Added support for module_init() and module_exit()
63 * 08/15/01 - Added ioctl() functionality for debugging, changed netif_*_queue
64 * calls and other incorrectness - Kent Yoder <yoder1@us.ibm.com>
65 * 11/05/01 - Restructured the interrupt function, added delays, reduced the
66 * the number of TX descriptors to 1, which together can prevent
67 * the card from locking up the box - <yoder1@us.ibm.com>
68 * 09/27/02 - New PCI interface + bug fix. - <yoder1@us.ibm.com>
69 * 11/13/02 - Removed free_irq calls which could cause a hang, added
70 * netif_carrier_{on|off} - <yoder1@us.ibm.com>
71 *
72 * To Do:
73 *
74 *
75 * If Problems do Occur
76 * Most problems can be rectified by either closing and opening the interface
77 * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
78 * if compiled into the kernel).
79 */
80
81/* Change STREAMER_DEBUG to 1 to get verbose, and I mean really verbose, messages */
82
83#define STREAMER_DEBUG 0
84#define STREAMER_DEBUG_PACKETS 0
85
86/* Change STREAMER_NETWORK_MONITOR to receive mac frames through the arb channel.
87 * Will also create a /proc/net/streamer_tr entry if proc_fs is compiled into the
88 * kernel.
89 * Intended to be used to create a ring-error reporting network module
90 * i.e. it will give you the source address of beaconers on the ring
91 */
92
93#define STREAMER_NETWORK_MONITOR 0
94
95/* #define CONFIG_PROC_FS */
96
97/*
98 * Allow or disallow ioctl's for debugging
99 */
100
101#define STREAMER_IOCTL 0
102
103#include <linux/module.h>
104#include <linux/kernel.h>
105#include <linux/errno.h>
106#include <linux/timer.h>
107#include <linux/in.h>
108#include <linux/ioport.h>
109#include <linux/string.h>
110#include <linux/proc_fs.h>
111#include <linux/ptrace.h>
112#include <linux/skbuff.h>
113#include <linux/interrupt.h>
114#include <linux/delay.h>
115#include <linux/netdevice.h>
116#include <linux/trdevice.h>
117#include <linux/stddef.h>
118#include <linux/init.h>
119#include <linux/pci.h>
120#include <linux/dma-mapping.h>
121#include <linux/spinlock.h>
122#include <linux/bitops.h>
123#include <linux/jiffies.h>
124#include <linux/slab.h>
125
126#include <net/net_namespace.h>
127#include <net/checksum.h>
128
129#include <asm/io.h>
130
131#include "lanstreamer.h"
132
133#if (BITS_PER_LONG == 64)
134#error broken on 64-bit: stores pointer to rx_ring->buffer in 32-bit int
135#endif
136
137
138/* I've got to put some intelligence into the version number so that Peter and I know
139 * which version of the code somebody has got.
140 * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author.
141 * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
142 *
143 * Official releases will only have an a.b.c version number format.
144 */
145
146static char version[] = "LanStreamer.c v0.4.0 03/08/01 - Mike Sullivan\n"
147 " v0.5.3 11/13/02 - Kent Yoder";
148
149static DEFINE_PCI_DEVICE_TABLE(streamer_pci_tbl) = {
150 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR, PCI_ANY_ID, PCI_ANY_ID,},
151 {} /* terminating entry */
152};
153MODULE_DEVICE_TABLE(pci,streamer_pci_tbl);
154
155
156static char *open_maj_error[] = {
157 "No error", "Lobe Media Test", "Physical Insertion",
158 "Address Verification", "Neighbor Notification (Ring Poll)",
159 "Request Parameters", "FDX Registration Request",
160 "FDX Lobe Media Test", "FDX Duplicate Address Check",
161 "Unknown stage"
162};
163
164static char *open_min_error[] = {
165 "No error", "Function Failure", "Signal Lost", "Wire Fault",
166 "Ring Speed Mismatch", "Timeout", "Ring Failure", "Ring Beaconing",
167 "Duplicate Node Address", "Request Parameters", "Remove Received",
168 "Reserved", "Reserved", "No Monitor Detected for RPL",
169 "Monitor Contention failer for RPL", "FDX Protocol Error"
170};
171
172/* Module parameters */
173
174/* Ring Speed 0,4,16
175 * 0 = Autosense
176 * 4,16 = Selected speed only, no autosense
177 * This allows the card to be the first on the ring
178 * and become the active monitor.
179 *
180 * WARNING: Some hubs will allow you to insert
181 * at the wrong speed
182 */
183
184static int ringspeed[STREAMER_MAX_ADAPTERS] = { 0, };
185
186module_param_array(ringspeed, int, NULL, 0);
187
188/* Packet buffer size */
189
190static int pkt_buf_sz[STREAMER_MAX_ADAPTERS] = { 0, };
191
192module_param_array(pkt_buf_sz, int, NULL, 0);
193
194/* Message Level */
195
196static int message_level[STREAMER_MAX_ADAPTERS] = { 1, };
197
198module_param_array(message_level, int, NULL, 0);
199
200#if STREAMER_IOCTL
201static int streamer_ioctl(struct net_device *, struct ifreq *, int);
202#endif
203
204static int streamer_reset(struct net_device *dev);
205static int streamer_open(struct net_device *dev);
206static netdev_tx_t streamer_xmit(struct sk_buff *skb,
207 struct net_device *dev);
208static int streamer_close(struct net_device *dev);
209static void streamer_set_rx_mode(struct net_device *dev);
210static irqreturn_t streamer_interrupt(int irq, void *dev_id);
211static int streamer_set_mac_address(struct net_device *dev, void *addr);
212static void streamer_arb_cmd(struct net_device *dev);
213static int streamer_change_mtu(struct net_device *dev, int mtu);
214static void streamer_srb_bh(struct net_device *dev);
215static void streamer_asb_bh(struct net_device *dev);
216#if STREAMER_NETWORK_MONITOR
217#ifdef CONFIG_PROC_FS
218static int streamer_proc_info(char *buffer, char **start, off_t offset,
219 int length, int *eof, void *data);
220static int sprintf_info(char *buffer, struct net_device *dev);
221struct streamer_private *dev_streamer=NULL;
222#endif
223#endif
224
225static const struct net_device_ops streamer_netdev_ops = {
226 .ndo_open = streamer_open,
227 .ndo_stop = streamer_close,
228 .ndo_start_xmit = streamer_xmit,
229 .ndo_change_mtu = streamer_change_mtu,
230#if STREAMER_IOCTL
231 .ndo_do_ioctl = streamer_ioctl,
232#endif
233 .ndo_set_rx_mode = streamer_set_rx_mode,
234 .ndo_set_mac_address = streamer_set_mac_address,
235};
236
237static int __devinit streamer_init_one(struct pci_dev *pdev,
238 const struct pci_device_id *ent)
239{
240 struct net_device *dev;
241 struct streamer_private *streamer_priv;
242 unsigned long pio_start, pio_end, pio_flags, pio_len;
243 unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
244 int rc = 0;
245 static int card_no=-1;
246 u16 pcr;
247
248#if STREAMER_DEBUG
249 printk("lanstreamer::streamer_init_one, entry pdev %p\n",pdev);
250#endif
251
252 card_no++;
253 dev = alloc_trdev(sizeof(*streamer_priv));
254 if (dev==NULL) {
255 printk(KERN_ERR "lanstreamer: out of memory.\n");
256 return -ENOMEM;
257 }
258
259 streamer_priv = netdev_priv(dev);
260
261#if STREAMER_NETWORK_MONITOR
262#ifdef CONFIG_PROC_FS
263 if (!dev_streamer)
264 create_proc_read_entry("streamer_tr", 0, init_net.proc_net,
265 streamer_proc_info, NULL);
266 streamer_priv->next = dev_streamer;
267 dev_streamer = streamer_priv;
268#endif
269#endif
270
271 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
272 if (rc) {
273 printk(KERN_ERR "%s: No suitable PCI mapping available.\n",
274 dev->name);
275 rc = -ENODEV;
276 goto err_out;
277 }
278
279 rc = pci_enable_device(pdev);
280 if (rc) {
281 printk(KERN_ERR "lanstreamer: unable to enable pci device\n");
282 rc=-EIO;
283 goto err_out;
284 }
285
286 pci_set_master(pdev);
287
288 rc = pci_set_mwi(pdev);
289 if (rc) {
290 printk(KERN_ERR "lanstreamer: unable to enable MWI on pci device\n");
291 goto err_out_disable;
292 }
293
294 pio_start = pci_resource_start(pdev, 0);
295 pio_end = pci_resource_end(pdev, 0);
296 pio_flags = pci_resource_flags(pdev, 0);
297 pio_len = pci_resource_len(pdev, 0);
298
299 mmio_start = pci_resource_start(pdev, 1);
300 mmio_end = pci_resource_end(pdev, 1);
301 mmio_flags = pci_resource_flags(pdev, 1);
302 mmio_len = pci_resource_len(pdev, 1);
303
304#if STREAMER_DEBUG
305 printk("lanstreamer: pio_start %x pio_end %x pio_len %x pio_flags %x\n",
306 pio_start, pio_end, pio_len, pio_flags);
307 printk("lanstreamer: mmio_start %x mmio_end %x mmio_len %x mmio_flags %x\n",
308 mmio_start, mmio_end, mmio_flags, mmio_len);
309#endif
310
311 if (!request_region(pio_start, pio_len, "lanstreamer")) {
312 printk(KERN_ERR "lanstreamer: unable to get pci io addr %lx\n",
313 pio_start);
314 rc= -EBUSY;
315 goto err_out_mwi;
316 }
317
318 if (!request_mem_region(mmio_start, mmio_len, "lanstreamer")) {
319 printk(KERN_ERR "lanstreamer: unable to get pci mmio addr %lx\n",
320 mmio_start);
321 rc= -EBUSY;
322 goto err_out_free_pio;
323 }
324
325 streamer_priv->streamer_mmio=ioremap(mmio_start, mmio_len);
326 if (streamer_priv->streamer_mmio == NULL) {
327 printk(KERN_ERR "lanstreamer: unable to remap MMIO %lx\n",
328 mmio_start);
329 rc= -EIO;
330 goto err_out_free_mmio;
331 }
332
333 init_waitqueue_head(&streamer_priv->srb_wait);
334 init_waitqueue_head(&streamer_priv->trb_wait);
335
336 dev->netdev_ops = &streamer_netdev_ops;
337 dev->irq = pdev->irq;
338 dev->base_addr=pio_start;
339 SET_NETDEV_DEV(dev, &pdev->dev);
340
341 streamer_priv->streamer_card_name = (char *)pdev->resource[0].name;
342 streamer_priv->pci_dev = pdev;
343
344 if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000))
345 streamer_priv->pkt_buf_sz = PKT_BUF_SZ;
346 else
347 streamer_priv->pkt_buf_sz = pkt_buf_sz[card_no];
348
349 streamer_priv->streamer_ring_speed = ringspeed[card_no];
350 streamer_priv->streamer_message_level = message_level[card_no];
351
352 pci_set_drvdata(pdev, dev);
353
354 spin_lock_init(&streamer_priv->streamer_lock);
355
356 pci_read_config_word (pdev, PCI_COMMAND, &pcr);
357 pcr |= PCI_COMMAND_SERR;
358 pci_write_config_word (pdev, PCI_COMMAND, pcr);
359
360 printk("%s\n", version);
361 printk("%s: %s. I/O at %hx, MMIO at %p, using irq %d\n",dev->name,
362 streamer_priv->streamer_card_name,
363 (unsigned int) dev->base_addr,
364 streamer_priv->streamer_mmio,
365 dev->irq);
366
367 if (streamer_reset(dev))
368 goto err_out_unmap;
369
370 rc = register_netdev(dev);
371 if (rc)
372 goto err_out_unmap;
373 return 0;
374
375err_out_unmap:
376 iounmap(streamer_priv->streamer_mmio);
377err_out_free_mmio:
378 release_mem_region(mmio_start, mmio_len);
379err_out_free_pio:
380 release_region(pio_start, pio_len);
381err_out_mwi:
382 pci_clear_mwi(pdev);
383err_out_disable:
384 pci_disable_device(pdev);
385err_out:
386 free_netdev(dev);
387#if STREAMER_DEBUG
388 printk("lanstreamer: Exit error %x\n",rc);
389#endif
390 return rc;
391}
392
393static void __devexit streamer_remove_one(struct pci_dev *pdev)
394{
395 struct net_device *dev=pci_get_drvdata(pdev);
396 struct streamer_private *streamer_priv;
397
398#if STREAMER_DEBUG
399 printk("lanstreamer::streamer_remove_one entry pdev %p\n",pdev);
400#endif
401
402 if (dev == NULL) {
403 printk(KERN_ERR "lanstreamer::streamer_remove_one, ERROR dev is NULL\n");
404 return;
405 }
406
407 streamer_priv=netdev_priv(dev);
408 if (streamer_priv == NULL) {
409 printk(KERN_ERR "lanstreamer::streamer_remove_one, ERROR dev->priv is NULL\n");
410 return;
411 }
412
413#if STREAMER_NETWORK_MONITOR
414#ifdef CONFIG_PROC_FS
415 {
416 struct streamer_private **p, **next;
417
418 for (p = &dev_streamer; *p; p = next) {
419 next = &(*p)->next;
420 if (*p == streamer_priv) {
421 *p = *next;
422 break;
423 }
424 }
425 if (!dev_streamer)
426 remove_proc_entry("streamer_tr", init_net.proc_net);
427 }
428#endif
429#endif
430
431 unregister_netdev(dev);
432 iounmap(streamer_priv->streamer_mmio);
433 release_mem_region(pci_resource_start(pdev, 1), pci_resource_len(pdev,1));
434 release_region(pci_resource_start(pdev, 0), pci_resource_len(pdev,0));
435 pci_clear_mwi(pdev);
436 pci_disable_device(pdev);
437 free_netdev(dev);
438 pci_set_drvdata(pdev, NULL);
439}
440
441
442static int streamer_reset(struct net_device *dev)
443{
444 struct streamer_private *streamer_priv;
445 __u8 __iomem *streamer_mmio;
446 unsigned long t;
447 unsigned int uaa_addr;
448 struct sk_buff *skb = NULL;
449 __u16 misr;
450
451 streamer_priv = netdev_priv(dev);
452 streamer_mmio = streamer_priv->streamer_mmio;
453
454 writew(readw(streamer_mmio + BCTL) | BCTL_SOFTRESET, streamer_mmio + BCTL);
455 t = jiffies;
456 /* Hold soft reset bit for a while */
457 ssleep(1);
458
459 writew(readw(streamer_mmio + BCTL) & ~BCTL_SOFTRESET,
460 streamer_mmio + BCTL);
461
462#if STREAMER_DEBUG
463 printk("BCTL: %x\n", readw(streamer_mmio + BCTL));
464 printk("GPR: %x\n", readw(streamer_mmio + GPR));
465 printk("SISRMASK: %x\n", readw(streamer_mmio + SISR_MASK));
466#endif
467 writew(readw(streamer_mmio + BCTL) | (BCTL_RX_FIFO_8 | BCTL_TX_FIFO_8), streamer_mmio + BCTL );
468
469 if (streamer_priv->streamer_ring_speed == 0) { /* Autosense */
470 writew(readw(streamer_mmio + GPR) | GPR_AUTOSENSE,
471 streamer_mmio + GPR);
472 if (streamer_priv->streamer_message_level)
473 printk(KERN_INFO "%s: Ringspeed autosense mode on\n",
474 dev->name);
475 } else if (streamer_priv->streamer_ring_speed == 16) {
476 if (streamer_priv->streamer_message_level)
477 printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n",
478 dev->name);
479 writew(GPR_16MBPS, streamer_mmio + GPR);
480 } else if (streamer_priv->streamer_ring_speed == 4) {
481 if (streamer_priv->streamer_message_level)
482 printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n",
483 dev->name);
484 writew(0, streamer_mmio + GPR);
485 }
486
487 skb = dev_alloc_skb(streamer_priv->pkt_buf_sz);
488 if (!skb) {
489 printk(KERN_INFO "%s: skb allocation for diagnostics failed...proceeding\n",
490 dev->name);
491 } else {
492 struct streamer_rx_desc *rx_ring;
493 u8 *data;
494
495 rx_ring=(struct streamer_rx_desc *)skb->data;
496 data=((u8 *)skb->data)+sizeof(struct streamer_rx_desc);
497 rx_ring->forward=0;
498 rx_ring->status=0;
499 rx_ring->buffer=cpu_to_le32(pci_map_single(streamer_priv->pci_dev, data,
500 512, PCI_DMA_FROMDEVICE));
501 rx_ring->framelen_buflen=512;
502 writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, rx_ring, 512, PCI_DMA_FROMDEVICE)),
503 streamer_mmio+RXBDA);
504 }
505
506#if STREAMER_DEBUG
507 printk("GPR = %x\n", readw(streamer_mmio + GPR));
508#endif
509 /* start solo init */
510 writew(SISR_MI, streamer_mmio + SISR_MASK_SUM);
511
512 while (!((readw(streamer_mmio + SISR)) & SISR_SRB_REPLY)) {
513 msleep_interruptible(100);
514 if (time_after(jiffies, t + 40 * HZ)) {
515 printk(KERN_ERR
516 "IBM PCI tokenring card not responding\n");
517 release_region(dev->base_addr, STREAMER_IO_SPACE);
518 if (skb)
519 dev_kfree_skb(skb);
520 return -1;
521 }
522 }
523 writew(~SISR_SRB_REPLY, streamer_mmio + SISR_RUM);
524 misr = readw(streamer_mmio + MISR_RUM);
525 writew(~misr, streamer_mmio + MISR_RUM);
526
527 if (skb)
528 dev_kfree_skb(skb); /* release skb used for diagnostics */
529
530#if STREAMER_DEBUG
531 printk("LAPWWO: %x, LAPA: %x LAPE: %x\n",
532 readw(streamer_mmio + LAPWWO), readw(streamer_mmio + LAPA),
533 readw(streamer_mmio + LAPE));
534#endif
535
536#if STREAMER_DEBUG
537 {
538 int i;
539 writew(readw(streamer_mmio + LAPWWO),
540 streamer_mmio + LAPA);
541 printk("initialization response srb dump: ");
542 for (i = 0; i < 10; i++)
543 printk("%x:",
544 ntohs(readw(streamer_mmio + LAPDINC)));
545 printk("\n");
546 }
547#endif
548
549 writew(readw(streamer_mmio + LAPWWO) + 6, streamer_mmio + LAPA);
550 if (readw(streamer_mmio + LAPD)) {
551 printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",
552 ntohs(readw(streamer_mmio + LAPD)));
553 release_region(dev->base_addr, STREAMER_IO_SPACE);
554 return -1;
555 }
556
557 writew(readw(streamer_mmio + LAPWWO) + 8, streamer_mmio + LAPA);
558 uaa_addr = ntohs(readw(streamer_mmio + LAPDINC));
559 readw(streamer_mmio + LAPDINC); /* skip over Level.Addr field */
560 streamer_priv->streamer_addr_table_addr = ntohs(readw(streamer_mmio + LAPDINC));
561 streamer_priv->streamer_parms_addr = ntohs(readw(streamer_mmio + LAPDINC));
562
563#if STREAMER_DEBUG
564 printk("UAA resides at %x\n", uaa_addr);
565#endif
566
567 /* setup uaa area for access with LAPD */
568 {
569 int i;
570 __u16 addr;
571 writew(uaa_addr, streamer_mmio + LAPA);
572 for (i = 0; i < 6; i += 2) {
573 addr=ntohs(readw(streamer_mmio+LAPDINC));
574 dev->dev_addr[i]= (addr >> 8) & 0xff;
575 dev->dev_addr[i+1]= addr & 0xff;
576 }
577#if STREAMER_DEBUG
578 printk("Adapter address: %pM\n", dev->dev_addr);
579#endif
580 }
581 return 0;
582}
583
584static int streamer_open(struct net_device *dev)
585{
586 struct streamer_private *streamer_priv = netdev_priv(dev);
587 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
588 unsigned long flags;
589 char open_error[255];
590 int i, open_finished = 1;
591 __u16 srb_word;
592 __u16 srb_open;
593 int rc;
594
595 if (readw(streamer_mmio+BMCTL_SUM) & BMCTL_RX_ENABLED) {
596 rc=streamer_reset(dev);
597 }
598
599 if (request_irq(dev->irq, streamer_interrupt, IRQF_SHARED, "lanstreamer", dev)) {
600 return -EAGAIN;
601 }
602#if STREAMER_DEBUG
603 printk("BMCTL: %x\n", readw(streamer_mmio + BMCTL_SUM));
604 printk("pending ints: %x\n", readw(streamer_mmio + SISR));
605#endif
606
607 writew(SISR_MI | SISR_SRB_REPLY, streamer_mmio + SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
608 writew(LISR_LIE, streamer_mmio + LISR); /* more ints later */
609
610 /* adapter is closed, so SRB is pointed to by LAPWWO */
611 writew(readw(streamer_mmio + LAPWWO), streamer_mmio + LAPA);
612
613#if STREAMER_DEBUG
614 printk("LAPWWO: %x, LAPA: %x\n", readw(streamer_mmio + LAPWWO),
615 readw(streamer_mmio + LAPA));
616 printk("LAPE: %x\n", readw(streamer_mmio + LAPE));
617 printk("SISR Mask = %04x\n", readw(streamer_mmio + SISR_MASK));
618#endif
619 do {
620 for (i = 0; i < SRB_COMMAND_SIZE; i += 2) {
621 writew(0, streamer_mmio + LAPDINC);
622 }
623
624 writew(readw(streamer_mmio+LAPWWO),streamer_mmio+LAPA);
625 writew(htons(SRB_OPEN_ADAPTER<<8),streamer_mmio+LAPDINC) ; /* open */
626 writew(htons(STREAMER_CLEAR_RET_CODE<<8),streamer_mmio+LAPDINC);
627 writew(STREAMER_CLEAR_RET_CODE, streamer_mmio + LAPDINC);
628
629 writew(readw(streamer_mmio + LAPWWO) + 8, streamer_mmio + LAPA);
630#if STREAMER_NETWORK_MONITOR
631 /* If Network Monitor, instruct card to copy MAC frames through the ARB */
632 writew(htons(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), streamer_mmio + LAPDINC); /* offset 8 word contains open options */
633#else
634 writew(htons(OPEN_ADAPTER_ENABLE_FDX), streamer_mmio + LAPDINC); /* Offset 8 word contains Open.Options */
635#endif
636
637 if (streamer_priv->streamer_laa[0]) {
638 writew(readw(streamer_mmio + LAPWWO) + 12, streamer_mmio + LAPA);
639 writew(htons((streamer_priv->streamer_laa[0] << 8) |
640 streamer_priv->streamer_laa[1]),streamer_mmio+LAPDINC);
641 writew(htons((streamer_priv->streamer_laa[2] << 8) |
642 streamer_priv->streamer_laa[3]),streamer_mmio+LAPDINC);
643 writew(htons((streamer_priv->streamer_laa[4] << 8) |
644 streamer_priv->streamer_laa[5]),streamer_mmio+LAPDINC);
645 memcpy(dev->dev_addr, streamer_priv->streamer_laa, dev->addr_len);
646 }
647
648 /* save off srb open offset */
649 srb_open = readw(streamer_mmio + LAPWWO);
650#if STREAMER_DEBUG
651 writew(readw(streamer_mmio + LAPWWO),
652 streamer_mmio + LAPA);
653 printk("srb open request:\n");
654 for (i = 0; i < 16; i++) {
655 printk("%x:", ntohs(readw(streamer_mmio + LAPDINC)));
656 }
657 printk("\n");
658#endif
659 spin_lock_irqsave(&streamer_priv->streamer_lock, flags);
660 streamer_priv->srb_queued = 1;
661
662 /* signal solo that SRB command has been issued */
663 writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
664 spin_unlock_irqrestore(&streamer_priv->streamer_lock, flags);
665
666 while (streamer_priv->srb_queued) {
667 interruptible_sleep_on_timeout(&streamer_priv->srb_wait, 5 * HZ);
668 if (signal_pending(current)) {
669 printk(KERN_WARNING "%s: SRB timed out.\n", dev->name);
670 printk(KERN_WARNING "SISR=%x MISR=%x, LISR=%x\n",
671 readw(streamer_mmio + SISR),
672 readw(streamer_mmio + MISR_RUM),
673 readw(streamer_mmio + LISR));
674 streamer_priv->srb_queued = 0;
675 break;
676 }
677 }
678
679#if STREAMER_DEBUG
680 printk("SISR_MASK: %x\n", readw(streamer_mmio + SISR_MASK));
681 printk("srb open response:\n");
682 writew(srb_open, streamer_mmio + LAPA);
683 for (i = 0; i < 10; i++) {
684 printk("%x:",
685 ntohs(readw(streamer_mmio + LAPDINC)));
686 }
687#endif
688
689 /* If we get the same return response as we set, the interrupt wasn't raised and the open
690 * timed out.
691 */
692 writew(srb_open + 2, streamer_mmio + LAPA);
693 srb_word = ntohs(readw(streamer_mmio + LAPD)) >> 8;
694 if (srb_word == STREAMER_CLEAR_RET_CODE) {
695 printk(KERN_WARNING "%s: Adapter Open time out or error.\n",
696 dev->name);
697 return -EIO;
698 }
699
700 if (srb_word != 0) {
701 if (srb_word == 0x07) {
702 if (!streamer_priv->streamer_ring_speed && open_finished) { /* Autosense , first time around */
703 printk(KERN_WARNING "%s: Retrying at different ring speed\n",
704 dev->name);
705 open_finished = 0;
706 } else {
707 __u16 error_code;
708
709 writew(srb_open + 6, streamer_mmio + LAPA);
710 error_code = ntohs(readw(streamer_mmio + LAPD));
711 strcpy(open_error, open_maj_error[(error_code & 0xf0) >> 4]);
712 strcat(open_error, " - ");
713 strcat(open_error, open_min_error[(error_code & 0x0f)]);
714
715 if (!streamer_priv->streamer_ring_speed &&
716 ((error_code & 0x0f) == 0x0d))
717 {
718 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n", dev->name);
719 printk(KERN_WARNING "%s: Please try again with a specified ring speed\n", dev->name);
720 free_irq(dev->irq, dev);
721 return -EIO;
722 }
723
724 printk(KERN_WARNING "%s: %s\n",
725 dev->name, open_error);
726 free_irq(dev->irq, dev);
727 return -EIO;
728
729 } /* if autosense && open_finished */
730 } else {
731 printk(KERN_WARNING "%s: Bad OPEN response: %x\n",
732 dev->name, srb_word);
733 free_irq(dev->irq, dev);
734 return -EIO;
735 }
736 } else
737 open_finished = 1;
738 } while (!(open_finished)); /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
739
740 writew(srb_open + 18, streamer_mmio + LAPA);
741 srb_word=ntohs(readw(streamer_mmio+LAPD)) >> 8;
742 if (srb_word & (1 << 3))
743 if (streamer_priv->streamer_message_level)
744 printk(KERN_INFO "%s: Opened in FDX Mode\n", dev->name);
745
746 if (srb_word & 1)
747 streamer_priv->streamer_ring_speed = 16;
748 else
749 streamer_priv->streamer_ring_speed = 4;
750
751 if (streamer_priv->streamer_message_level)
752 printk(KERN_INFO "%s: Opened in %d Mbps mode\n",
753 dev->name,
754 streamer_priv->streamer_ring_speed);
755
756 writew(srb_open + 8, streamer_mmio + LAPA);
757 streamer_priv->asb = ntohs(readw(streamer_mmio + LAPDINC));
758 streamer_priv->srb = ntohs(readw(streamer_mmio + LAPDINC));
759 streamer_priv->arb = ntohs(readw(streamer_mmio + LAPDINC));
760 readw(streamer_mmio + LAPDINC); /* offset 14 word is rsvd */
761 streamer_priv->trb = ntohs(readw(streamer_mmio + LAPDINC));
762
763 streamer_priv->streamer_receive_options = 0x00;
764 streamer_priv->streamer_copy_all_options = 0;
765
766 /* setup rx ring */
767 /* enable rx channel */
768 writew(~BMCTL_RX_DIS, streamer_mmio + BMCTL_RUM);
769
770 /* setup rx descriptors */
771 streamer_priv->streamer_rx_ring=
772 kmalloc( sizeof(struct streamer_rx_desc)*
773 STREAMER_RX_RING_SIZE,GFP_KERNEL);
774 if (!streamer_priv->streamer_rx_ring) {
775 printk(KERN_WARNING "%s ALLOC of streamer rx ring FAILED!!\n",dev->name);
776 return -EIO;
777 }
778
779 for (i = 0; i < STREAMER_RX_RING_SIZE; i++) {
780 struct sk_buff *skb;
781
782 skb = dev_alloc_skb(streamer_priv->pkt_buf_sz);
783 if (skb == NULL)
784 break;
785
786 skb->dev = dev;
787
788 streamer_priv->streamer_rx_ring[i].forward =
789 cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[i + 1],
790 sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE));
791 streamer_priv->streamer_rx_ring[i].status = 0;
792 streamer_priv->streamer_rx_ring[i].buffer =
793 cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data,
794 streamer_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
795 streamer_priv->streamer_rx_ring[i].framelen_buflen = streamer_priv->pkt_buf_sz;
796 streamer_priv->rx_ring_skb[i] = skb;
797 }
798 streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1].forward =
799 cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[0],
800 sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE));
801
802 if (i == 0) {
803 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n", dev->name);
804 free_irq(dev->irq, dev);
805 return -EIO;
806 }
807
808 streamer_priv->rx_ring_last_received = STREAMER_RX_RING_SIZE - 1; /* last processed rx status */
809
810 writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[0],
811 sizeof(struct streamer_rx_desc), PCI_DMA_TODEVICE)),
812 streamer_mmio + RXBDA);
813 writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1],
814 sizeof(struct streamer_rx_desc), PCI_DMA_TODEVICE)),
815 streamer_mmio + RXLBDA);
816
817 /* set bus master interrupt event mask */
818 writew(MISR_RX_NOBUF | MISR_RX_EOF, streamer_mmio + MISR_MASK);
819
820
821 /* setup tx ring */
822 streamer_priv->streamer_tx_ring=kmalloc(sizeof(struct streamer_tx_desc)*
823 STREAMER_TX_RING_SIZE,GFP_KERNEL);
824 if (!streamer_priv->streamer_tx_ring) {
825 printk(KERN_WARNING "%s ALLOC of streamer_tx_ring FAILED\n",dev->name);
826 return -EIO;
827 }
828
829 writew(~BMCTL_TX2_DIS, streamer_mmio + BMCTL_RUM); /* Enables TX channel 2 */
830 for (i = 0; i < STREAMER_TX_RING_SIZE; i++) {
831 streamer_priv->streamer_tx_ring[i].forward = cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
832 &streamer_priv->streamer_tx_ring[i + 1],
833 sizeof(struct streamer_tx_desc),
834 PCI_DMA_TODEVICE));
835 streamer_priv->streamer_tx_ring[i].status = 0;
836 streamer_priv->streamer_tx_ring[i].bufcnt_framelen = 0;
837 streamer_priv->streamer_tx_ring[i].buffer = 0;
838 streamer_priv->streamer_tx_ring[i].buflen = 0;
839 streamer_priv->streamer_tx_ring[i].rsvd1 = 0;
840 streamer_priv->streamer_tx_ring[i].rsvd2 = 0;
841 streamer_priv->streamer_tx_ring[i].rsvd3 = 0;
842 }
843 streamer_priv->streamer_tx_ring[STREAMER_TX_RING_SIZE - 1].forward =
844 cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_tx_ring[0],
845 sizeof(struct streamer_tx_desc), PCI_DMA_TODEVICE));
846
847 streamer_priv->free_tx_ring_entries = STREAMER_TX_RING_SIZE;
848 streamer_priv->tx_ring_free = 0; /* next entry in tx ring to use */
849 streamer_priv->tx_ring_last_status = STREAMER_TX_RING_SIZE - 1;
850
851 /* set Busmaster interrupt event mask (handle receives on interrupt only */
852 writew(MISR_TX2_EOF | MISR_RX_NOBUF | MISR_RX_EOF, streamer_mmio + MISR_MASK);
853 /* set system event interrupt mask */
854 writew(SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE, streamer_mmio + SISR_MASK_SUM);
855
856#if STREAMER_DEBUG
857 printk("BMCTL: %x\n", readw(streamer_mmio + BMCTL_SUM));
858 printk("SISR MASK: %x\n", readw(streamer_mmio + SISR_MASK));
859#endif
860
861#if STREAMER_NETWORK_MONITOR
862
863 writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA);
864 printk("%s: Node Address: %04x:%04x:%04x\n", dev->name,
865 ntohs(readw(streamer_mmio + LAPDINC)),
866 ntohs(readw(streamer_mmio + LAPDINC)),
867 ntohs(readw(streamer_mmio + LAPDINC)));
868 readw(streamer_mmio + LAPDINC);
869 readw(streamer_mmio + LAPDINC);
870 printk("%s: Functional Address: %04x:%04x\n", dev->name,
871 ntohs(readw(streamer_mmio + LAPDINC)),
872 ntohs(readw(streamer_mmio + LAPDINC)));
873
874 writew(streamer_priv->streamer_parms_addr + 4,
875 streamer_mmio + LAPA);
876 printk("%s: NAUN Address: %04x:%04x:%04x\n", dev->name,
877 ntohs(readw(streamer_mmio + LAPDINC)),
878 ntohs(readw(streamer_mmio + LAPDINC)),
879 ntohs(readw(streamer_mmio + LAPDINC)));
880#endif
881
882 netif_start_queue(dev);
883 netif_carrier_on(dev);
884 return 0;
885}
886
887/*
888 * When we enter the rx routine we do not know how many frames have been
889 * queued on the rx channel. Therefore we start at the next rx status
890 * position and travel around the receive ring until we have completed
891 * all the frames.
892 *
893 * This means that we may process the frame before we receive the end
894 * of frame interrupt. This is why we always test the status instead
895 * of blindly processing the next frame.
896 *
897 */
898static void streamer_rx(struct net_device *dev)
899{
900 struct streamer_private *streamer_priv =
901 netdev_priv(dev);
902 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
903 struct streamer_rx_desc *rx_desc;
904 int rx_ring_last_received, length, frame_length, buffer_cnt = 0;
905 struct sk_buff *skb, *skb2;
906
907 /* setup the next rx descriptor to be received */
908 rx_desc = &streamer_priv->streamer_rx_ring[(streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1)];
909 rx_ring_last_received = streamer_priv->rx_ring_last_received;
910
911 while (rx_desc->status & 0x01000000) { /* While processed descriptors are available */
912 if (rx_ring_last_received != streamer_priv->rx_ring_last_received)
913 {
914 printk(KERN_WARNING "RX Error 1 rx_ring_last_received not the same %x %x\n",
915 rx_ring_last_received, streamer_priv->rx_ring_last_received);
916 }
917 streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1);
918 rx_ring_last_received = streamer_priv->rx_ring_last_received;
919
920 length = rx_desc->framelen_buflen & 0xffff; /* buffer length */
921 frame_length = (rx_desc->framelen_buflen >> 16) & 0xffff;
922
923 if (rx_desc->status & 0x7E830000) { /* errors */
924 if (streamer_priv->streamer_message_level) {
925 printk(KERN_WARNING "%s: Rx Error %x\n",
926 dev->name, rx_desc->status);
927 }
928 } else { /* received without errors */
929 if (rx_desc->status & 0x80000000) { /* frame complete */
930 buffer_cnt = 1;
931 skb = dev_alloc_skb(streamer_priv->pkt_buf_sz);
932 } else {
933 skb = dev_alloc_skb(frame_length);
934 }
935
936 if (skb == NULL)
937 {
938 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n", dev->name);
939 dev->stats.rx_dropped++;
940 } else { /* we allocated an skb OK */
941 if (buffer_cnt == 1) {
942 /* release the DMA mapping */
943 pci_unmap_single(streamer_priv->pci_dev,
944 le32_to_cpu(streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer),
945 streamer_priv->pkt_buf_sz,
946 PCI_DMA_FROMDEVICE);
947 skb2 = streamer_priv->rx_ring_skb[rx_ring_last_received];
948#if STREAMER_DEBUG_PACKETS
949 {
950 int i;
951 printk("streamer_rx packet print: skb->data2 %p skb->head %p\n", skb2->data, skb2->head);
952 for (i = 0; i < frame_length; i++)
953 {
954 printk("%x:", skb2->data[i]);
955 if (((i + 1) % 16) == 0)
956 printk("\n");
957 }
958 printk("\n");
959 }
960#endif
961 skb_put(skb2, length);
962 skb2->protocol = tr_type_trans(skb2, dev);
963 /* recycle this descriptor */
964 streamer_priv->streamer_rx_ring[rx_ring_last_received].status = 0;
965 streamer_priv->streamer_rx_ring[rx_ring_last_received].framelen_buflen = streamer_priv->pkt_buf_sz;
966 streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer =
967 cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, streamer_priv->pkt_buf_sz,
968 PCI_DMA_FROMDEVICE));
969 streamer_priv->rx_ring_skb[rx_ring_last_received] = skb;
970 /* place recycled descriptor back on the adapter */
971 writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
972 &streamer_priv->streamer_rx_ring[rx_ring_last_received],
973 sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE)),
974 streamer_mmio + RXLBDA);
975 /* pass the received skb up to the protocol */
976 netif_rx(skb2);
977 } else {
978 do { /* Walk the buffers */
979 pci_unmap_single(streamer_priv->pci_dev, le32_to_cpu(rx_desc->buffer), length, PCI_DMA_FROMDEVICE),
980 memcpy(skb_put(skb, length), (void *)rx_desc->buffer, length); /* copy this fragment */
981 streamer_priv->streamer_rx_ring[rx_ring_last_received].status = 0;
982 streamer_priv->streamer_rx_ring[rx_ring_last_received].framelen_buflen = streamer_priv->pkt_buf_sz;
983
984 /* give descriptor back to the adapter */
985 writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
986 &streamer_priv->streamer_rx_ring[rx_ring_last_received],
987 length, PCI_DMA_FROMDEVICE)),
988 streamer_mmio + RXLBDA);
989
990 if (rx_desc->status & 0x80000000)
991 break; /* this descriptor completes the frame */
992
993 /* else get the next pending descriptor */
994 if (rx_ring_last_received!= streamer_priv->rx_ring_last_received)
995 {
996 printk("RX Error rx_ring_last_received not the same %x %x\n",
997 rx_ring_last_received,
998 streamer_priv->rx_ring_last_received);
999 }
1000 rx_desc = &streamer_priv->streamer_rx_ring[(streamer_priv->rx_ring_last_received+1) & (STREAMER_RX_RING_SIZE-1)];
1001
1002 length = rx_desc->framelen_buflen & 0xffff; /* buffer length */
1003 streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received+1) & (STREAMER_RX_RING_SIZE - 1);
1004 rx_ring_last_received = streamer_priv->rx_ring_last_received;
1005 } while (1);
1006
1007 skb->protocol = tr_type_trans(skb, dev);
1008 /* send up to the protocol */
1009 netif_rx(skb);
1010 }
1011 dev->stats.rx_packets++;
1012 dev->stats.rx_bytes += length;
1013 } /* if skb == null */
1014 } /* end received without errors */
1015
1016 /* try the next one */
1017 rx_desc = &streamer_priv->streamer_rx_ring[(rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1)];
1018 } /* end for all completed rx descriptors */
1019}
1020
1021static irqreturn_t streamer_interrupt(int irq, void *dev_id)
1022{
1023 struct net_device *dev = (struct net_device *) dev_id;
1024 struct streamer_private *streamer_priv =
1025 netdev_priv(dev);
1026 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
1027 __u16 sisr;
1028 __u16 misr;
1029 u8 max_intr = MAX_INTR;
1030
1031 spin_lock(&streamer_priv->streamer_lock);
1032 sisr = readw(streamer_mmio + SISR);
1033
1034 while((sisr & (SISR_MI | SISR_SRB_REPLY | SISR_ADAPTER_CHECK | SISR_ASB_FREE |
1035 SISR_ARB_CMD | SISR_TRB_REPLY | SISR_PAR_ERR | SISR_SERR_ERR)) &&
1036 (max_intr > 0)) {
1037
1038 if(sisr & SISR_PAR_ERR) {
1039 writew(~SISR_PAR_ERR, streamer_mmio + SISR_RUM);
1040 (void)readw(streamer_mmio + SISR_RUM);
1041 }
1042
1043 else if(sisr & SISR_SERR_ERR) {
1044 writew(~SISR_SERR_ERR, streamer_mmio + SISR_RUM);
1045 (void)readw(streamer_mmio + SISR_RUM);
1046 }
1047
1048 else if(sisr & SISR_MI) {
1049 misr = readw(streamer_mmio + MISR_RUM);
1050
1051 if (misr & MISR_TX2_EOF) {
1052 while(streamer_priv->streamer_tx_ring[(streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1)].status) {
1053 streamer_priv->tx_ring_last_status = (streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1);
1054 streamer_priv->free_tx_ring_entries++;
1055 dev->stats.tx_bytes += streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]->len;
1056 dev->stats.tx_packets++;
1057 dev_kfree_skb_irq(streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]);
1058 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buffer = 0xdeadbeef;
1059 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].status = 0;
1060 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].bufcnt_framelen = 0;
1061 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buflen = 0;
1062 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd1 = 0;
1063 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd2 = 0;
1064 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd3 = 0;
1065 }
1066 netif_wake_queue(dev);
1067 }
1068
1069 if (misr & MISR_RX_EOF) {
1070 streamer_rx(dev);
1071 }
1072 /* MISR_RX_EOF */
1073
1074 if (misr & MISR_RX_NOBUF) {
1075 /* According to the documentation, we don't have to do anything,
1076 * but trapping it keeps it out of /var/log/messages.
1077 */
1078 } /* SISR_RX_NOBUF */
1079
1080 writew(~misr, streamer_mmio + MISR_RUM);
1081 (void)readw(streamer_mmio + MISR_RUM);
1082 }
1083
1084 else if (sisr & SISR_SRB_REPLY) {
1085 if (streamer_priv->srb_queued == 1) {
1086 wake_up_interruptible(&streamer_priv->srb_wait);
1087 } else if (streamer_priv->srb_queued == 2) {
1088 streamer_srb_bh(dev);
1089 }
1090 streamer_priv->srb_queued = 0;
1091
1092 writew(~SISR_SRB_REPLY, streamer_mmio + SISR_RUM);
1093 (void)readw(streamer_mmio + SISR_RUM);
1094 }
1095
1096 else if (sisr & SISR_ADAPTER_CHECK) {
1097 printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
1098 writel(readl(streamer_mmio + LAPWWO), streamer_mmio + LAPA);
1099 printk(KERN_WARNING "%s: Words %x:%x:%x:%x:\n",
1100 dev->name, readw(streamer_mmio + LAPDINC),
1101 ntohs(readw(streamer_mmio + LAPDINC)),
1102 ntohs(readw(streamer_mmio + LAPDINC)),
1103 ntohs(readw(streamer_mmio + LAPDINC)));
1104 netif_stop_queue(dev);
1105 netif_carrier_off(dev);
1106 printk(KERN_WARNING "%s: Adapter must be manually reset.\n", dev->name);
1107 }
1108
1109 /* SISR_ADAPTER_CHECK */
1110 else if (sisr & SISR_ASB_FREE) {
1111 /* Wake up anything that is waiting for the asb response */
1112 if (streamer_priv->asb_queued) {
1113 streamer_asb_bh(dev);
1114 }
1115 writew(~SISR_ASB_FREE, streamer_mmio + SISR_RUM);
1116 (void)readw(streamer_mmio + SISR_RUM);
1117 }
1118 /* SISR_ASB_FREE */
1119 else if (sisr & SISR_ARB_CMD) {
1120 streamer_arb_cmd(dev);
1121 writew(~SISR_ARB_CMD, streamer_mmio + SISR_RUM);
1122 (void)readw(streamer_mmio + SISR_RUM);
1123 }
1124 /* SISR_ARB_CMD */
1125 else if (sisr & SISR_TRB_REPLY) {
1126 /* Wake up anything that is waiting for the trb response */
1127 if (streamer_priv->trb_queued) {
1128 wake_up_interruptible(&streamer_priv->
1129 trb_wait);
1130 }
1131 streamer_priv->trb_queued = 0;
1132 writew(~SISR_TRB_REPLY, streamer_mmio + SISR_RUM);
1133 (void)readw(streamer_mmio + SISR_RUM);
1134 }
1135 /* SISR_TRB_REPLY */
1136
1137 sisr = readw(streamer_mmio + SISR);
1138 max_intr--;
1139 } /* while() */
1140
1141 spin_unlock(&streamer_priv->streamer_lock) ;
1142 return IRQ_HANDLED;
1143}
1144
1145static netdev_tx_t streamer_xmit(struct sk_buff *skb,
1146 struct net_device *dev)
1147{
1148 struct streamer_private *streamer_priv =
1149 netdev_priv(dev);
1150 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
1151 unsigned long flags ;
1152
1153 spin_lock_irqsave(&streamer_priv->streamer_lock, flags);
1154
1155 if (streamer_priv->free_tx_ring_entries) {
1156 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].status = 0;
1157 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].bufcnt_framelen = 0x00020000 | skb->len;
1158 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].buffer =
1159 cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE));
1160 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd1 = skb->len;
1161 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd2 = 0;
1162 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd3 = 0;
1163 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].buflen = skb->len;
1164
1165 streamer_priv->tx_ring_skb[streamer_priv->tx_ring_free] = skb;
1166 streamer_priv->free_tx_ring_entries--;
1167#if STREAMER_DEBUG_PACKETS
1168 {
1169 int i;
1170 printk("streamer_xmit packet print:\n");
1171 for (i = 0; i < skb->len; i++) {
1172 printk("%x:", skb->data[i]);
1173 if (((i + 1) % 16) == 0)
1174 printk("\n");
1175 }
1176 printk("\n");
1177 }
1178#endif
1179
1180 writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
1181 &streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free],
1182 sizeof(struct streamer_tx_desc), PCI_DMA_TODEVICE)),
1183 streamer_mmio + TX2LFDA);
1184 (void)readl(streamer_mmio + TX2LFDA);
1185
1186 streamer_priv->tx_ring_free = (streamer_priv->tx_ring_free + 1) & (STREAMER_TX_RING_SIZE - 1);
1187 spin_unlock_irqrestore(&streamer_priv->streamer_lock,flags);
1188 return NETDEV_TX_OK;
1189 } else {
1190 netif_stop_queue(dev);
1191 spin_unlock_irqrestore(&streamer_priv->streamer_lock,flags);
1192 return NETDEV_TX_BUSY;
1193 }
1194}
1195
1196
1197static int streamer_close(struct net_device *dev)
1198{
1199 struct streamer_private *streamer_priv =
1200 netdev_priv(dev);
1201 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
1202 unsigned long flags;
1203 int i;
1204
1205 netif_stop_queue(dev);
1206 netif_carrier_off(dev);
1207 writew(streamer_priv->srb, streamer_mmio + LAPA);
1208 writew(htons(SRB_CLOSE_ADAPTER << 8),streamer_mmio+LAPDINC);
1209 writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
1210
1211 spin_lock_irqsave(&streamer_priv->streamer_lock, flags);
1212
1213 streamer_priv->srb_queued = 1;
1214 writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
1215
1216 spin_unlock_irqrestore(&streamer_priv->streamer_lock, flags);
1217
1218 while (streamer_priv->srb_queued)
1219 {
1220 interruptible_sleep_on_timeout(&streamer_priv->srb_wait,
1221 jiffies + 60 * HZ);
1222 if (signal_pending(current))
1223 {
1224 printk(KERN_WARNING "%s: SRB timed out.\n", dev->name);
1225 printk(KERN_WARNING "SISR=%x MISR=%x LISR=%x\n",
1226 readw(streamer_mmio + SISR),
1227 readw(streamer_mmio + MISR_RUM),
1228 readw(streamer_mmio + LISR));
1229 streamer_priv->srb_queued = 0;
1230 break;
1231 }
1232 }
1233
1234 streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1);
1235
1236 for (i = 0; i < STREAMER_RX_RING_SIZE; i++) {
1237 if (streamer_priv->rx_ring_skb[streamer_priv->rx_ring_last_received]) {
1238 dev_kfree_skb(streamer_priv->rx_ring_skb[streamer_priv->rx_ring_last_received]);
1239 }
1240 streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1);
1241 }
1242
1243 /* reset tx/rx fifo's and busmaster logic */
1244
1245 /* TBD. Add graceful way to reset the LLC channel without doing a soft reset.
1246 writel(readl(streamer_mmio+BCTL)|(3<<13),streamer_mmio+BCTL);
1247 udelay(1);
1248 writel(readl(streamer_mmio+BCTL)&~(3<<13),streamer_mmio+BCTL);
1249 */
1250
1251#if STREAMER_DEBUG
1252 writew(streamer_priv->srb, streamer_mmio + LAPA);
1253 printk("srb): ");
1254 for (i = 0; i < 2; i++) {
1255 printk("%x ", ntohs(readw(streamer_mmio + LAPDINC)));
1256 }
1257 printk("\n");
1258#endif
1259 free_irq(dev->irq, dev);
1260 return 0;
1261}
1262
1263static void streamer_set_rx_mode(struct net_device *dev)
1264{
1265 struct streamer_private *streamer_priv =
1266 netdev_priv(dev);
1267 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
1268 __u8 options = 0;
1269 struct netdev_hw_addr *ha;
1270 unsigned char dev_mc_address[5];
1271
1272 writel(streamer_priv->srb, streamer_mmio + LAPA);
1273 options = streamer_priv->streamer_copy_all_options;
1274
1275 if (dev->flags & IFF_PROMISC)
1276 options |= (3 << 5); /* All LLC and MAC frames, all through the main rx channel */
1277 else
1278 options &= ~(3 << 5);
1279
1280 /* Only issue the srb if there is a change in options */
1281
1282 if ((options ^ streamer_priv->streamer_copy_all_options))
1283 {
1284 /* Now to issue the srb command to alter the copy.all.options */
1285 writew(htons(SRB_MODIFY_RECEIVE_OPTIONS << 8), streamer_mmio+LAPDINC);
1286 writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
1287 writew(htons((streamer_priv->streamer_receive_options << 8) | options),streamer_mmio+LAPDINC);
1288 writew(htons(0x4a41),streamer_mmio+LAPDINC);
1289 writew(htons(0x4d45),streamer_mmio+LAPDINC);
1290 writew(htons(0x5320),streamer_mmio+LAPDINC);
1291 writew(0x2020, streamer_mmio + LAPDINC);
1292
1293 streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */
1294
1295 writel(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
1296
1297 streamer_priv->streamer_copy_all_options = options;
1298 return;
1299 }
1300
1301 /* Set the functional addresses we need for multicast */
1302 writel(streamer_priv->srb,streamer_mmio+LAPA);
1303 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1304
1305 netdev_for_each_mc_addr(ha, dev) {
1306 dev_mc_address[0] |= ha->addr[2];
1307 dev_mc_address[1] |= ha->addr[3];
1308 dev_mc_address[2] |= ha->addr[4];
1309 dev_mc_address[3] |= ha->addr[5];
1310 }
1311
1312 writew(htons(SRB_SET_FUNC_ADDRESS << 8),streamer_mmio+LAPDINC);
1313 writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
1314 writew(0,streamer_mmio+LAPDINC);
1315 writew(htons( (dev_mc_address[0] << 8) | dev_mc_address[1]),streamer_mmio+LAPDINC);
1316 writew(htons( (dev_mc_address[2] << 8) | dev_mc_address[3]),streamer_mmio+LAPDINC);
1317 streamer_priv->srb_queued = 2 ;
1318 writel(LISR_SRB_CMD,streamer_mmio+LISR_SUM);
1319}
1320
1321static void streamer_srb_bh(struct net_device *dev)
1322{
1323 struct streamer_private *streamer_priv = netdev_priv(dev);
1324 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
1325 __u16 srb_word;
1326
1327 writew(streamer_priv->srb, streamer_mmio + LAPA);
1328 srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
1329
1330 switch (srb_word) {
1331
1332 /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
1333 * At some point we should do something if we get an error, such as
1334 * resetting the IFF_PROMISC flag in dev
1335 */
1336
1337 case SRB_MODIFY_RECEIVE_OPTIONS:
1338 srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
1339
1340 switch (srb_word) {
1341 case 0x01:
1342 printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
1343 break;
1344 case 0x04:
1345 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
1346 break;
1347 default:
1348 if (streamer_priv->streamer_message_level)
1349 printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",
1350 dev->name,
1351 streamer_priv->streamer_copy_all_options,
1352 streamer_priv->streamer_receive_options);
1353 break;
1354 } /* switch srb[2] */
1355 break;
1356
1357
1358 /* SRB_SET_GROUP_ADDRESS - Multicast group setting
1359 */
1360 case SRB_SET_GROUP_ADDRESS:
1361 srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
1362 switch (srb_word) {
1363 case 0x00:
1364 break;
1365 case 0x01:
1366 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1367 break;
1368 case 0x04:
1369 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
1370 break;
1371 case 0x3c:
1372 printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n", dev->name);
1373 break;
1374 case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
1375 printk(KERN_WARNING "%s: Group address registers full\n", dev->name);
1376 break;
1377 case 0x55:
1378 printk(KERN_INFO "%s: Group Address already set.\n", dev->name);
1379 break;
1380 default:
1381 break;
1382 } /* switch srb[2] */
1383 break;
1384
1385
1386 /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
1387 */
1388 case SRB_RESET_GROUP_ADDRESS:
1389 srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
1390 switch (srb_word) {
1391 case 0x00:
1392 break;
1393 case 0x01:
1394 printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
1395 break;
1396 case 0x04:
1397 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
1398 break;
1399 case 0x39: /* Must deal with this if individual multicast addresses used */
1400 printk(KERN_INFO "%s: Group address not found\n", dev->name);
1401 break;
1402 default:
1403 break;
1404 } /* switch srb[2] */
1405 break;
1406
1407
1408 /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
1409 */
1410
1411 case SRB_SET_FUNC_ADDRESS:
1412 srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
1413 switch (srb_word) {
1414 case 0x00:
1415 if (streamer_priv->streamer_message_level)
1416 printk(KERN_INFO "%s: Functional Address Mask Set\n", dev->name);
1417 break;
1418 case 0x01:
1419 printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
1420 break;
1421 case 0x04:
1422 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
1423 break;
1424 default:
1425 break;
1426 } /* switch srb[2] */
1427 break;
1428
1429 /* SRB_READ_LOG - Read and reset the adapter error counters
1430 */
1431
1432 case SRB_READ_LOG:
1433 srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
1434 switch (srb_word) {
1435 case 0x00:
1436 {
1437 int i;
1438 if (streamer_priv->streamer_message_level)
1439 printk(KERN_INFO "%s: Read Log command complete\n", dev->name);
1440 printk("Read Log statistics: ");
1441 writew(streamer_priv->srb + 6,
1442 streamer_mmio + LAPA);
1443 for (i = 0; i < 5; i++) {
1444 printk("%x:", ntohs(readw(streamer_mmio + LAPDINC)));
1445 }
1446 printk("\n");
1447 }
1448 break;
1449 case 0x01:
1450 printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
1451 break;
1452 case 0x04:
1453 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
1454 break;
1455
1456 } /* switch srb[2] */
1457 break;
1458
1459 /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
1460
1461 case SRB_READ_SR_COUNTERS:
1462 srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
1463 switch (srb_word) {
1464 case 0x00:
1465 if (streamer_priv->streamer_message_level)
1466 printk(KERN_INFO "%s: Read Source Routing Counters issued\n", dev->name);
1467 break;
1468 case 0x01:
1469 printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
1470 break;
1471 case 0x04:
1472 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
1473 break;
1474 default:
1475 break;
1476 } /* switch srb[2] */
1477 break;
1478
1479 default:
1480 printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n", dev->name);
1481 break;
1482 } /* switch srb[0] */
1483}
1484
1485static int streamer_set_mac_address(struct net_device *dev, void *addr)
1486{
1487 struct sockaddr *saddr = addr;
1488 struct streamer_private *streamer_priv = netdev_priv(dev);
1489
1490 if (netif_running(dev))
1491 {
1492 printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name);
1493 return -EIO;
1494 }
1495
1496 memcpy(streamer_priv->streamer_laa, saddr->sa_data, dev->addr_len);
1497
1498 if (streamer_priv->streamer_message_level) {
1499 printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",
1500 dev->name, streamer_priv->streamer_laa[0],
1501 streamer_priv->streamer_laa[1],
1502 streamer_priv->streamer_laa[2],
1503 streamer_priv->streamer_laa[3],
1504 streamer_priv->streamer_laa[4],
1505 streamer_priv->streamer_laa[5]);
1506 }
1507 return 0;
1508}
1509
1510static void streamer_arb_cmd(struct net_device *dev)
1511{
1512 struct streamer_private *streamer_priv =
1513 netdev_priv(dev);
1514 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
1515 __u8 header_len;
1516 __u16 frame_len, buffer_len;
1517 struct sk_buff *mac_frame;
1518 __u8 frame_data[256];
1519 __u16 buff_off;
1520 __u16 lan_status = 0, lan_status_diff; /* Initialize to stop compiler warning */
1521 __u8 fdx_prot_error;
1522 __u16 next_ptr;
1523 __u16 arb_word;
1524
1525#if STREAMER_NETWORK_MONITOR
1526 struct trh_hdr *mac_hdr;
1527#endif
1528
1529 writew(streamer_priv->arb, streamer_mmio + LAPA);
1530 arb_word=ntohs(readw(streamer_mmio+LAPD)) >> 8;
1531
1532 if (arb_word == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
1533 writew(streamer_priv->arb + 6, streamer_mmio + LAPA);
1534 streamer_priv->mac_rx_buffer = buff_off = ntohs(readw(streamer_mmio + LAPDINC));
1535 header_len=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; /* 802.5 Token-Ring Header Length */
1536 frame_len = ntohs(readw(streamer_mmio + LAPDINC));
1537
1538#if STREAMER_DEBUG
1539 {
1540 int i;
1541 __u16 next;
1542 __u8 status;
1543 __u16 len;
1544
1545 writew(ntohs(buff_off), streamer_mmio + LAPA); /*setup window to frame data */
1546 next = htons(readw(streamer_mmio + LAPDINC));
1547 status =
1548 ntohs(readw(streamer_mmio + LAPDINC)) & 0xff;
1549 len = ntohs(readw(streamer_mmio + LAPDINC));
1550
1551 /* print out 1st 14 bytes of frame data */
1552 for (i = 0; i < 7; i++) {
1553 printk("Loc %d = %04x\n", i,
1554 ntohs(readw
1555 (streamer_mmio + LAPDINC)));
1556 }
1557
1558 printk("next %04x, fs %02x, len %04x\n", next,
1559 status, len);
1560 }
1561#endif
1562 if (!(mac_frame = dev_alloc_skb(frame_len))) {
1563 printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n",
1564 dev->name);
1565 goto drop_frame;
1566 }
1567 /* Walk the buffer chain, creating the frame */
1568
1569 do {
1570 int i;
1571 __u16 rx_word;
1572
1573 writew(htons(buff_off), streamer_mmio + LAPA); /* setup window to frame data */
1574 next_ptr = ntohs(readw(streamer_mmio + LAPDINC));
1575 readw(streamer_mmio + LAPDINC); /* read thru status word */
1576 buffer_len = ntohs(readw(streamer_mmio + LAPDINC));
1577
1578 if (buffer_len > 256)
1579 break;
1580
1581 i = 0;
1582 while (i < buffer_len) {
1583 rx_word=ntohs(readw(streamer_mmio+LAPDINC));
1584 frame_data[i]=rx_word >> 8;
1585 frame_data[i+1]=rx_word & 0xff;
1586 i += 2;
1587 }
1588
1589 memcpy(skb_put(mac_frame, buffer_len),
1590 frame_data, buffer_len);
1591 } while (next_ptr && (buff_off = next_ptr));
1592
1593 mac_frame->protocol = tr_type_trans(mac_frame, dev);
1594#if STREAMER_NETWORK_MONITOR
1595 printk(KERN_WARNING "%s: Received MAC Frame, details:\n",
1596 dev->name);
1597 mac_hdr = tr_hdr(mac_frame);
1598 printk(KERN_WARNING
1599 "%s: MAC Frame Dest. Addr: %pM\n",
1600 dev->name, mac_hdr->daddr);
1601 printk(KERN_WARNING
1602 "%s: MAC Frame Srce. Addr: %pM\n",
1603 dev->name, mac_hdr->saddr);
1604#endif
1605 netif_rx(mac_frame);
1606
1607 /* Now tell the card we have dealt with the received frame */
1608drop_frame:
1609 /* Set LISR Bit 1 */
1610 writel(LISR_ARB_FREE, streamer_priv->streamer_mmio + LISR_SUM);
1611
1612 /* Is the ASB free ? */
1613
1614 if (!(readl(streamer_priv->streamer_mmio + SISR) & SISR_ASB_FREE))
1615 {
1616 streamer_priv->asb_queued = 1;
1617 writel(LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM);
1618 return;
1619 /* Drop out and wait for the bottom half to be run */
1620 }
1621
1622
1623 writew(streamer_priv->asb, streamer_mmio + LAPA);
1624 writew(htons(ASB_RECEIVE_DATA << 8), streamer_mmio+LAPDINC);
1625 writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
1626 writew(0, streamer_mmio + LAPDINC);
1627 writew(htons(streamer_priv->mac_rx_buffer), streamer_mmio + LAPD);
1628
1629 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM);
1630
1631 streamer_priv->asb_queued = 2;
1632 return;
1633
1634 } else if (arb_word == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
1635 writew(streamer_priv->arb + 6, streamer_mmio + LAPA);
1636 lan_status = ntohs(readw(streamer_mmio + LAPDINC));
1637 fdx_prot_error = ntohs(readw(streamer_mmio+LAPD)) >> 8;
1638
1639 /* Issue ARB Free */
1640 writew(LISR_ARB_FREE, streamer_priv->streamer_mmio + LISR_SUM);
1641
1642 lan_status_diff = (streamer_priv->streamer_lan_status ^ lan_status) &
1643 lan_status;
1644
1645 if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR))
1646 {
1647 if (lan_status_diff & LSC_LWF)
1648 printk(KERN_WARNING "%s: Short circuit detected on the lobe\n", dev->name);
1649 if (lan_status_diff & LSC_ARW)
1650 printk(KERN_WARNING "%s: Auto removal error\n", dev->name);
1651 if (lan_status_diff & LSC_FPE)
1652 printk(KERN_WARNING "%s: FDX Protocol Error\n", dev->name);
1653 if (lan_status_diff & LSC_RR)
1654 printk(KERN_WARNING "%s: Force remove MAC frame received\n", dev->name);
1655
1656 /* Adapter has been closed by the hardware */
1657
1658 /* reset tx/rx fifo's and busmaster logic */
1659
1660 /* @TBD. no llc reset on autostreamer writel(readl(streamer_mmio+BCTL)|(3<<13),streamer_mmio+BCTL);
1661 udelay(1);
1662 writel(readl(streamer_mmio+BCTL)&~(3<<13),streamer_mmio+BCTL); */
1663
1664 netif_stop_queue(dev);
1665 netif_carrier_off(dev);
1666 printk(KERN_WARNING "%s: Adapter must be manually reset.\n", dev->name);
1667 }
1668 /* If serious error */
1669 if (streamer_priv->streamer_message_level) {
1670 if (lan_status_diff & LSC_SIG_LOSS)
1671 printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
1672 if (lan_status_diff & LSC_HARD_ERR)
1673 printk(KERN_INFO "%s: Beaconing\n", dev->name);
1674 if (lan_status_diff & LSC_SOFT_ERR)
1675 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name);
1676 if (lan_status_diff & LSC_TRAN_BCN)
1677 printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n", dev->name);
1678 if (lan_status_diff & LSC_SS)
1679 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1680 if (lan_status_diff & LSC_RING_REC)
1681 printk(KERN_INFO "%s: Ring recovery ongoing\n", dev->name);
1682 if (lan_status_diff & LSC_FDX_MODE)
1683 printk(KERN_INFO "%s: Operating in FDX mode\n", dev->name);
1684 }
1685
1686 if (lan_status_diff & LSC_CO) {
1687 if (streamer_priv->streamer_message_level)
1688 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
1689
1690 /* Issue READ.LOG command */
1691
1692 writew(streamer_priv->srb, streamer_mmio + LAPA);
1693 writew(htons(SRB_READ_LOG << 8),streamer_mmio+LAPDINC);
1694 writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
1695 writew(0, streamer_mmio + LAPDINC);
1696 streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */
1697
1698 writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
1699 }
1700
1701 if (lan_status_diff & LSC_SR_CO) {
1702 if (streamer_priv->streamer_message_level)
1703 printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
1704
1705 /* Issue a READ.SR.COUNTERS */
1706 writew(streamer_priv->srb, streamer_mmio + LAPA);
1707 writew(htons(SRB_READ_SR_COUNTERS << 8),
1708 streamer_mmio+LAPDINC);
1709 writew(htons(STREAMER_CLEAR_RET_CODE << 8),
1710 streamer_mmio+LAPDINC);
1711 streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */
1712 writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
1713
1714 }
1715 streamer_priv->streamer_lan_status = lan_status;
1716 } /* Lan.change.status */
1717 else
1718 printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
1719}
1720
1721static void streamer_asb_bh(struct net_device *dev)
1722{
1723 struct streamer_private *streamer_priv =
1724 netdev_priv(dev);
1725 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
1726
1727 if (streamer_priv->asb_queued == 1)
1728 {
1729 /* Dropped through the first time */
1730
1731 writew(streamer_priv->asb, streamer_mmio + LAPA);
1732 writew(htons(ASB_RECEIVE_DATA << 8),streamer_mmio+LAPDINC);
1733 writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
1734 writew(0, streamer_mmio + LAPDINC);
1735 writew(htons(streamer_priv->mac_rx_buffer), streamer_mmio + LAPD);
1736
1737 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM);
1738 streamer_priv->asb_queued = 2;
1739
1740 return;
1741 }
1742
1743 if (streamer_priv->asb_queued == 2) {
1744 __u8 rc;
1745 writew(streamer_priv->asb + 2, streamer_mmio + LAPA);
1746 rc=ntohs(readw(streamer_mmio+LAPD)) >> 8;
1747 switch (rc) {
1748 case 0x01:
1749 printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
1750 break;
1751 case 0x26:
1752 printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
1753 break;
1754 case 0xFF:
1755 /* Valid response, everything should be ok again */
1756 break;
1757 default:
1758 printk(KERN_WARNING "%s: Invalid return code in asb\n", dev->name);
1759 break;
1760 }
1761 }
1762 streamer_priv->asb_queued = 0;
1763}
1764
1765static int streamer_change_mtu(struct net_device *dev, int mtu)
1766{
1767 struct streamer_private *streamer_priv =
1768 netdev_priv(dev);
1769 __u16 max_mtu;
1770
1771 if (streamer_priv->streamer_ring_speed == 4)
1772 max_mtu = 4500;
1773 else
1774 max_mtu = 18000;
1775
1776 if (mtu > max_mtu)
1777 return -EINVAL;
1778 if (mtu < 100)
1779 return -EINVAL;
1780
1781 dev->mtu = mtu;
1782 streamer_priv->pkt_buf_sz = mtu + TR_HLEN;
1783
1784 return 0;
1785}
1786
1787#if STREAMER_NETWORK_MONITOR
1788#ifdef CONFIG_PROC_FS
1789static int streamer_proc_info(char *buffer, char **start, off_t offset,
1790 int length, int *eof, void *data)
1791{
1792 struct streamer_private *sdev=NULL;
1793 struct pci_dev *pci_device = NULL;
1794 int len = 0;
1795 off_t begin = 0;
1796 off_t pos = 0;
1797 int size;
1798
1799 struct net_device *dev;
1800
1801 size = sprintf(buffer, "IBM LanStreamer/MPC Chipset Token Ring Adapters\n");
1802
1803 pos += size;
1804 len += size;
1805
1806 for(sdev=dev_streamer; sdev; sdev=sdev->next) {
1807 pci_device=sdev->pci_dev;
1808 dev=pci_get_drvdata(pci_device);
1809
1810 size = sprintf_info(buffer + len, dev);
1811 len += size;
1812 pos = begin + len;
1813
1814 if (pos < offset) {
1815 len = 0;
1816 begin = pos;
1817 }
1818 if (pos > offset + length)
1819 break;
1820 } /* for */
1821
1822 *start = buffer + (offset - begin); /* Start of wanted data */
1823 len -= (offset - begin); /* Start slop */
1824 if (len > length)
1825 len = length; /* Ending slop */
1826 return len;
1827}
1828
1829static int sprintf_info(char *buffer, struct net_device *dev)
1830{
1831 struct streamer_private *streamer_priv =
1832 netdev_priv(dev);
1833 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
1834 struct streamer_adapter_addr_table sat;
1835 struct streamer_parameters_table spt;
1836 int size = 0;
1837 int i;
1838
1839 writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA);
1840 for (i = 0; i < 14; i += 2) {
1841 __u16 io_word;
1842 __u8 *datap = (__u8 *) & sat;
1843 io_word=ntohs(readw(streamer_mmio+LAPDINC));
1844 datap[size]=io_word >> 8;
1845 datap[size+1]=io_word & 0xff;
1846 }
1847 writew(streamer_priv->streamer_parms_addr, streamer_mmio + LAPA);
1848 for (i = 0; i < 68; i += 2) {
1849 __u16 io_word;
1850 __u8 *datap = (__u8 *) & spt;
1851 io_word=ntohs(readw(streamer_mmio+LAPDINC));
1852 datap[size]=io_word >> 8;
1853 datap[size+1]=io_word & 0xff;
1854 }
1855
1856 size = sprintf(buffer, "\n%6s: Adapter Address : Node Address : Functional Addr\n", dev->name);
1857
1858 size += sprintf(buffer + size,
1859 "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n",
1860 dev->name, dev->dev_addr, sat.node_addr,
1861 sat.func_addr[0], sat.func_addr[1],
1862 sat.func_addr[2], sat.func_addr[3]);
1863
1864 size += sprintf(buffer + size, "\n%6s: Token Ring Parameters Table:\n", dev->name);
1865
1866 size += sprintf(buffer + size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n", dev->name);
1867
1868 size += sprintf(buffer + size,
1869 "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n",
1870 dev->name, spt.phys_addr[0], spt.phys_addr[1],
1871 spt.phys_addr[2], spt.phys_addr[3],
1872 spt.up_node_addr, spt.poll_addr,
1873 ntohs(spt.acc_priority), ntohs(spt.auth_source_class),
1874 ntohs(spt.att_code));
1875
1876 size += sprintf(buffer + size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n", dev->name);
1877
1878 size += sprintf(buffer + size,
1879 "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n",
1880 dev->name, spt.source_addr,
1881 ntohs(spt.beacon_type), ntohs(spt.major_vector),
1882 ntohs(spt.lan_status), ntohs(spt.local_ring),
1883 ntohs(spt.mon_error), ntohs(spt.frame_correl));
1884
1885 size += sprintf(buffer + size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
1886 dev->name);
1887
1888 size += sprintf(buffer + size,
1889 "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n",
1890 dev->name, ntohs(spt.beacon_transmit),
1891 ntohs(spt.beacon_receive),
1892 spt.beacon_naun,
1893 spt.beacon_phys[0], spt.beacon_phys[1],
1894 spt.beacon_phys[2], spt.beacon_phys[3]);
1895 return size;
1896}
1897#endif
1898#endif
1899
1900static struct pci_driver streamer_pci_driver = {
1901 .name = "lanstreamer",
1902 .id_table = streamer_pci_tbl,
1903 .probe = streamer_init_one,
1904 .remove = __devexit_p(streamer_remove_one),
1905};
1906
1907static int __init streamer_init_module(void) {
1908 return pci_register_driver(&streamer_pci_driver);
1909}
1910
1911static void __exit streamer_cleanup_module(void) {
1912 pci_unregister_driver(&streamer_pci_driver);
1913}
1914
1915module_init(streamer_init_module);
1916module_exit(streamer_cleanup_module);
1917MODULE_LICENSE("GPL");
diff --git a/drivers/net/tokenring/lanstreamer.h b/drivers/net/tokenring/lanstreamer.h
deleted file mode 100644
index 3c58d6a3fbc9..000000000000
--- a/drivers/net/tokenring/lanstreamer.h
+++ /dev/null
@@ -1,343 +0,0 @@
1/*
2 * lanstreamer.h -- driver for the IBM Auto LANStreamer PCI Adapter
3 *
4 * Written By: Mike Sullivan, IBM Corporation
5 *
6 * Copyright (C) 1999 IBM Corporation
7 *
8 * Linux driver for IBM PCI tokenring cards based on the LanStreamer MPC
9 * chipset.
10 *
11 * This driver is based on the olympic driver for IBM PCI TokenRing cards (Pit/Pit-Phy/Olympic
12 * chipsets) written by:
13 * 1999 Peter De Schrijver All Rights Reserved
14 * 1999 Mike Phillips (phillim@amtrak.com)
15 *
16 * Base Driver Skeleton:
17 * Written 1993-94 by Donald Becker.
18 *
19 * Copyright 1993 United States Government as represented by the
20 * Director, National Security Agency.
21 *
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2 of the License, or
25 * (at your option) any later version.
26 *
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
31 *
32 * NO WARRANTY
33 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
34 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
35 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
36 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
37 * solely responsible for determining the appropriateness of using and
38 * distributing the Program and assumes all risks associated with its
39 * exercise of rights under this Agreement, including but not limited to
40 * the risks and costs of program errors, damage to or loss of data,
41 * programs or equipment, and unavailability or interruption of operations.
42 *
43 * DISCLAIMER OF LIABILITY
44 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
47 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
48 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
49 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
50 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
51 *
52 * You should have received a copy of the GNU General Public License
53 * along with this program; if not, write to the Free Software
54 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
55 *
56 *
57 * 12/10/99 - Alpha Release 0.1.0
58 * First release to the public
59 * 08/15/01 - Added ioctl() definitions and others - Kent Yoder <yoder1@us.ibm.com>
60 *
61 */
62
63/* MAX_INTR - the maximum number of times we can loop
64 * inside the interrupt function before returning
65 * control to the OS (maximum value is 256)
66 */
67#define MAX_INTR 5
68
69#define CLS 0x0C
70#define MLR 0x86
71#define LTR 0x0D
72
73#define BCTL 0x60
74#define BCTL_SOFTRESET (1<<15)
75#define BCTL_RX_FIFO_8 (1<<1)
76#define BCTL_TX_FIFO_8 (1<<3)
77
78#define GPR 0x4a
79#define GPR_AUTOSENSE (1<<2)
80#define GPR_16MBPS (1<<3)
81
82#define LISR 0x10
83#define LISR_SUM 0x12
84#define LISR_RUM 0x14
85
86#define LISR_LIE (1<<15)
87#define LISR_SLIM (1<<13)
88#define LISR_SLI (1<<12)
89#define LISR_BPEI (1<<9)
90#define LISR_BPE (1<<8)
91#define LISR_SRB_CMD (1<<5)
92#define LISR_ASB_REPLY (1<<4)
93#define LISR_ASB_FREE_REQ (1<<2)
94#define LISR_ARB_FREE (1<<1)
95#define LISR_TRB_FRAME (1<<0)
96
97#define SISR 0x16
98#define SISR_SUM 0x18
99#define SISR_RUM 0x1A
100#define SISR_MASK 0x54
101#define SISR_MASK_SUM 0x56
102#define SISR_MASK_RUM 0x58
103
104#define SISR_MI (1<<15)
105#define SISR_SERR_ERR (1<<14)
106#define SISR_TIMER (1<<11)
107#define SISR_LAP_PAR_ERR (1<<10)
108#define SISR_LAP_ACC_ERR (1<<9)
109#define SISR_PAR_ERR (1<<8)
110#define SISR_ADAPTER_CHECK (1<<6)
111#define SISR_SRB_REPLY (1<<5)
112#define SISR_ASB_FREE (1<<4)
113#define SISR_ARB_CMD (1<<3)
114#define SISR_TRB_REPLY (1<<2)
115
116#define MISR_RUM 0x5A
117#define MISR_MASK 0x5C
118#define MISR_MASK_RUM 0x5E
119
120#define MISR_TX2_IDLE (1<<15)
121#define MISR_TX2_NO_STATUS (1<<14)
122#define MISR_TX2_HALT (1<<13)
123#define MISR_TX2_EOF (1<<12)
124#define MISR_TX1_IDLE (1<<11)
125#define MISR_TX1_NO_STATUS (1<<10)
126#define MISR_TX1_HALT (1<<9)
127#define MISR_TX1_EOF (1<<8)
128#define MISR_RX_NOBUF (1<<5)
129#define MISR_RX_EOB (1<<4)
130#define MISR_RX_NO_STATUS (1<<2)
131#define MISR_RX_HALT (1<<1)
132#define MISR_RX_EOF (1<<0)
133
134#define LAPA 0x62
135#define LAPE 0x64
136#define LAPD 0x66
137#define LAPDINC 0x68
138#define LAPWWO 0x6A
139#define LAPWWC 0x6C
140#define LAPCTL 0x6E
141
142#define TIMER 0x4E4
143
144#define BMCTL_SUM 0x50
145#define BMCTL_RUM 0x52
146#define BMCTL_TX1_DIS (1<<14)
147#define BMCTL_TX2_DIS (1<<10)
148#define BMCTL_RX_DIS (1<<6)
149#define BMCTL_RX_ENABLED (1<<5)
150
151#define RXLBDA 0x90
152#define RXBDA 0x94
153#define RXSTAT 0x98
154#define RXDBA 0x9C
155
156#define TX1LFDA 0xA0
157#define TX1FDA 0xA4
158#define TX1STAT 0xA8
159#define TX1DBA 0xAC
160#define TX2LFDA 0xB0
161#define TX2FDA 0xB4
162#define TX2STAT 0xB8
163#define TX2DBA 0xBC
164
165#define STREAMER_IO_SPACE 256
166
167#define SRB_COMMAND_SIZE 50
168
169#define STREAMER_MAX_ADAPTERS 8 /* 0x08 __MODULE_STRING can't hand 0xnn */
170
171/* Defines for LAN STATUS CHANGE reports */
172#define LSC_SIG_LOSS 0x8000
173#define LSC_HARD_ERR 0x4000
174#define LSC_SOFT_ERR 0x2000
175#define LSC_TRAN_BCN 0x1000
176#define LSC_LWF 0x0800
177#define LSC_ARW 0x0400
178#define LSC_FPE 0x0200
179#define LSC_RR 0x0100
180#define LSC_CO 0x0080
181#define LSC_SS 0x0040
182#define LSC_RING_REC 0x0020
183#define LSC_SR_CO 0x0010
184#define LSC_FDX_MODE 0x0004
185
186/* Defines for OPEN ADAPTER command */
187
188#define OPEN_ADAPTER_EXT_WRAP (1<<15)
189#define OPEN_ADAPTER_DIS_HARDEE (1<<14)
190#define OPEN_ADAPTER_DIS_SOFTERR (1<<13)
191#define OPEN_ADAPTER_PASS_ADC_MAC (1<<12)
192#define OPEN_ADAPTER_PASS_ATT_MAC (1<<11)
193#define OPEN_ADAPTER_ENABLE_EC (1<<10)
194#define OPEN_ADAPTER_CONTENDER (1<<8)
195#define OPEN_ADAPTER_PASS_BEACON (1<<7)
196#define OPEN_ADAPTER_ENABLE_FDX (1<<6)
197#define OPEN_ADAPTER_ENABLE_RPL (1<<5)
198#define OPEN_ADAPTER_INHIBIT_ETR (1<<4)
199#define OPEN_ADAPTER_INTERNAL_WRAP (1<<3)
200
201
202/* Defines for SRB Commands */
203#define SRB_CLOSE_ADAPTER 0x04
204#define SRB_CONFIGURE_BRIDGE 0x0c
205#define SRB_CONFIGURE_HP_CHANNEL 0x13
206#define SRB_MODIFY_BRIDGE_PARMS 0x15
207#define SRB_MODIFY_OPEN_OPTIONS 0x01
208#define SRB_MODIFY_RECEIVE_OPTIONS 0x17
209#define SRB_NO_OPERATION 0x00
210#define SRB_OPEN_ADAPTER 0x03
211#define SRB_READ_LOG 0x08
212#define SRB_READ_SR_COUNTERS 0x16
213#define SRB_RESET_GROUP_ADDRESS 0x02
214#define SRB_RESET_TARGET_SEGMETN 0x14
215#define SRB_SAVE_CONFIGURATION 0x1b
216#define SRB_SET_BRIDGE_PARMS 0x09
217#define SRB_SET_FUNC_ADDRESS 0x07
218#define SRB_SET_GROUP_ADDRESS 0x06
219#define SRB_SET_TARGET_SEGMENT 0x05
220
221/* Clear return code */
222#define STREAMER_CLEAR_RET_CODE 0xfe
223
224/* ARB Commands */
225#define ARB_RECEIVE_DATA 0x81
226#define ARB_LAN_CHANGE_STATUS 0x84
227
228/* ASB Response commands */
229#define ASB_RECEIVE_DATA 0x81
230
231
232/* Streamer defaults for buffers */
233
234#define STREAMER_RX_RING_SIZE 16 /* should be a power of 2 */
235/* Setting the number of TX descriptors to 1 is a workaround for an
236 * undocumented hardware problem with the lanstreamer board. Setting
237 * this to something higher may slightly increase the throughput you
238 * can get from the card, but at the risk of locking up the box. -
239 * <yoder1@us.ibm.com>
240 */
241#define STREAMER_TX_RING_SIZE 1 /* should be a power of 2 */
242
243#define PKT_BUF_SZ 4096 /* Default packet size */
244
245/* Streamer data structures */
246
247struct streamer_tx_desc {
248 __u32 forward;
249 __u32 status;
250 __u32 bufcnt_framelen;
251 __u32 buffer;
252 __u32 buflen;
253 __u32 rsvd1;
254 __u32 rsvd2;
255 __u32 rsvd3;
256};
257
258struct streamer_rx_desc {
259 __u32 forward;
260 __u32 status;
261 __u32 buffer;
262 __u32 framelen_buflen;
263};
264
265struct mac_receive_buffer {
266 __u16 next;
267 __u8 padding;
268 __u8 frame_status;
269 __u16 buffer_length;
270 __u8 frame_data;
271};
272
273struct streamer_private {
274
275 __u16 srb;
276 __u16 trb;
277 __u16 arb;
278 __u16 asb;
279
280 struct streamer_private *next;
281 struct pci_dev *pci_dev;
282 __u8 __iomem *streamer_mmio;
283 char *streamer_card_name;
284
285 spinlock_t streamer_lock;
286
287 volatile int srb_queued; /* True if an SRB is still posted */
288 wait_queue_head_t srb_wait;
289
290 volatile int asb_queued; /* True if an ASB is posted */
291
292 volatile int trb_queued; /* True if a TRB is posted */
293 wait_queue_head_t trb_wait;
294
295 struct streamer_rx_desc *streamer_rx_ring;
296 struct streamer_tx_desc *streamer_tx_ring;
297 struct sk_buff *tx_ring_skb[STREAMER_TX_RING_SIZE],
298 *rx_ring_skb[STREAMER_RX_RING_SIZE];
299 int tx_ring_free, tx_ring_last_status, rx_ring_last_received,
300 free_tx_ring_entries;
301
302 __u16 streamer_lan_status;
303 __u8 streamer_ring_speed;
304 __u16 pkt_buf_sz;
305 __u8 streamer_receive_options, streamer_copy_all_options,
306 streamer_message_level;
307 __u16 streamer_addr_table_addr, streamer_parms_addr;
308 __u16 mac_rx_buffer;
309 __u8 streamer_laa[6];
310};
311
312struct streamer_adapter_addr_table {
313
314 __u8 node_addr[6];
315 __u8 reserved[4];
316 __u8 func_addr[4];
317};
318
319struct streamer_parameters_table {
320
321 __u8 phys_addr[4];
322 __u8 up_node_addr[6];
323 __u8 up_phys_addr[4];
324 __u8 poll_addr[6];
325 __u16 reserved;
326 __u16 acc_priority;
327 __u16 auth_source_class;
328 __u16 att_code;
329 __u8 source_addr[6];
330 __u16 beacon_type;
331 __u16 major_vector;
332 __u16 lan_status;
333 __u16 soft_error_time;
334 __u16 reserved1;
335 __u16 local_ring;
336 __u16 mon_error;
337 __u16 beacon_transmit;
338 __u16 beacon_receive;
339 __u16 frame_correl;
340 __u8 beacon_naun[6];
341 __u32 reserved2;
342 __u8 beacon_phys[4];
343};
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
deleted file mode 100644
index 28adcdf3b14c..000000000000
--- a/drivers/net/tokenring/madgemc.c
+++ /dev/null
@@ -1,761 +0,0 @@
1/*
2 * madgemc.c: Driver for the Madge Smart 16/4 MC16 MCA token ring card.
3 *
4 * Written 2000 by Adam Fritzler
5 *
6 * This software may be used and distributed according to the terms
7 * of the GNU General Public License, incorporated herein by reference.
8 *
9 * This driver module supports the following cards:
10 * - Madge Smart 16/4 Ringnode MC16
11 * - Madge Smart 16/4 Ringnode MC32 (??)
12 *
13 * Maintainer(s):
14 * AF Adam Fritzler
15 *
16 * Modification History:
17 * 16-Jan-00 AF Created
18 *
19 */
20static const char version[] = "madgemc.c: v0.91 23/01/2000 by Adam Fritzler\n";
21
22#include <linux/module.h>
23#include <linux/mca.h>
24#include <linux/slab.h>
25#include <linux/kernel.h>
26#include <linux/errno.h>
27#include <linux/init.h>
28#include <linux/netdevice.h>
29#include <linux/trdevice.h>
30
31#include <asm/io.h>
32#include <asm/irq.h>
33
34#include "tms380tr.h"
35#include "madgemc.h" /* Madge-specific constants */
36
37#define MADGEMC_IO_EXTENT 32
38#define MADGEMC_SIF_OFFSET 0x08
39
40struct card_info {
41 /*
42 * These are read from the BIA ROM.
43 */
44 unsigned int manid;
45 unsigned int cardtype;
46 unsigned int cardrev;
47 unsigned int ramsize;
48
49 /*
50 * These are read from the MCA POS registers.
51 */
52 unsigned int burstmode:2;
53 unsigned int fairness:1; /* 0 = Fair, 1 = Unfair */
54 unsigned int arblevel:4;
55 unsigned int ringspeed:2; /* 0 = 4mb, 1 = 16, 2 = Auto/none */
56 unsigned int cabletype:1; /* 0 = RJ45, 1 = DB9 */
57};
58
59static int madgemc_open(struct net_device *dev);
60static int madgemc_close(struct net_device *dev);
61static int madgemc_chipset_init(struct net_device *dev);
62static void madgemc_read_rom(struct net_device *dev, struct card_info *card);
63static unsigned short madgemc_setnselout_pins(struct net_device *dev);
64static void madgemc_setcabletype(struct net_device *dev, int type);
65
66static int madgemc_mcaproc(char *buf, int slot, void *d);
67
68static void madgemc_setregpage(struct net_device *dev, int page);
69static void madgemc_setsifsel(struct net_device *dev, int val);
70static void madgemc_setint(struct net_device *dev, int val);
71
72static irqreturn_t madgemc_interrupt(int irq, void *dev_id);
73
74/*
75 * These work around paging, however they don't guarantee you're on the
76 * right page.
77 */
78#define SIFREADB(reg) (inb(dev->base_addr + ((reg<0x8)?reg:reg-0x8)))
79#define SIFWRITEB(val, reg) (outb(val, dev->base_addr + ((reg<0x8)?reg:reg-0x8)))
80#define SIFREADW(reg) (inw(dev->base_addr + ((reg<0x8)?reg:reg-0x8)))
81#define SIFWRITEW(val, reg) (outw(val, dev->base_addr + ((reg<0x8)?reg:reg-0x8)))
82
83/*
84 * Read a byte-length value from the register.
85 */
86static unsigned short madgemc_sifreadb(struct net_device *dev, unsigned short reg)
87{
88 unsigned short ret;
89 if (reg<0x8)
90 ret = SIFREADB(reg);
91 else {
92 madgemc_setregpage(dev, 1);
93 ret = SIFREADB(reg);
94 madgemc_setregpage(dev, 0);
95 }
96 return ret;
97}
98
99/*
100 * Write a byte-length value to a register.
101 */
102static void madgemc_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
103{
104 if (reg<0x8)
105 SIFWRITEB(val, reg);
106 else {
107 madgemc_setregpage(dev, 1);
108 SIFWRITEB(val, reg);
109 madgemc_setregpage(dev, 0);
110 }
111}
112
113/*
114 * Read a word-length value from a register
115 */
116static unsigned short madgemc_sifreadw(struct net_device *dev, unsigned short reg)
117{
118 unsigned short ret;
119 if (reg<0x8)
120 ret = SIFREADW(reg);
121 else {
122 madgemc_setregpage(dev, 1);
123 ret = SIFREADW(reg);
124 madgemc_setregpage(dev, 0);
125 }
126 return ret;
127}
128
129/*
130 * Write a word-length value to a register.
131 */
132static void madgemc_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
133{
134 if (reg<0x8)
135 SIFWRITEW(val, reg);
136 else {
137 madgemc_setregpage(dev, 1);
138 SIFWRITEW(val, reg);
139 madgemc_setregpage(dev, 0);
140 }
141}
142
143static struct net_device_ops madgemc_netdev_ops __read_mostly;
144
145static int __devinit madgemc_probe(struct device *device)
146{
147 static int versionprinted;
148 struct net_device *dev;
149 struct net_local *tp;
150 struct card_info *card;
151 struct mca_device *mdev = to_mca_device(device);
152 int ret = 0;
153
154 if (versionprinted++ == 0)
155 printk("%s", version);
156
157 if(mca_device_claimed(mdev))
158 return -EBUSY;
159 mca_device_set_claim(mdev, 1);
160
161 dev = alloc_trdev(sizeof(struct net_local));
162 if (!dev) {
163 printk("madgemc: unable to allocate dev space\n");
164 mca_device_set_claim(mdev, 0);
165 ret = -ENOMEM;
166 goto getout;
167 }
168
169 dev->netdev_ops = &madgemc_netdev_ops;
170
171 card = kmalloc(sizeof(struct card_info), GFP_KERNEL);
172 if (card==NULL) {
173 ret = -ENOMEM;
174 goto getout1;
175 }
176
177 /*
178 * Parse configuration information. This all comes
179 * directly from the publicly available @002d.ADF.
180 * Get it from Madge or your local ADF library.
181 */
182
183 /*
184 * Base address
185 */
186 dev->base_addr = 0x0a20 +
187 ((mdev->pos[2] & MC16_POS2_ADDR2)?0x0400:0) +
188 ((mdev->pos[0] & MC16_POS0_ADDR1)?0x1000:0) +
189 ((mdev->pos[3] & MC16_POS3_ADDR3)?0x2000:0);
190
191 /*
192 * Interrupt line
193 */
194 switch(mdev->pos[0] >> 6) { /* upper two bits */
195 case 0x1: dev->irq = 3; break;
196 case 0x2: dev->irq = 9; break; /* IRQ 2 = IRQ 9 */
197 case 0x3: dev->irq = 10; break;
198 default: dev->irq = 0; break;
199 }
200
201 if (dev->irq == 0) {
202 printk("%s: invalid IRQ\n", dev->name);
203 ret = -EBUSY;
204 goto getout2;
205 }
206
207 if (!request_region(dev->base_addr, MADGEMC_IO_EXTENT,
208 "madgemc")) {
209 printk(KERN_INFO "madgemc: unable to setup Smart MC in slot %d because of I/O base conflict at 0x%04lx\n", mdev->slot, dev->base_addr);
210 dev->base_addr += MADGEMC_SIF_OFFSET;
211 ret = -EBUSY;
212 goto getout2;
213 }
214 dev->base_addr += MADGEMC_SIF_OFFSET;
215
216 /*
217 * Arbitration Level
218 */
219 card->arblevel = ((mdev->pos[0] >> 1) & 0x7) + 8;
220
221 /*
222 * Burst mode and Fairness
223 */
224 card->burstmode = ((mdev->pos[2] >> 6) & 0x3);
225 card->fairness = ((mdev->pos[2] >> 4) & 0x1);
226
227 /*
228 * Ring Speed
229 */
230 if ((mdev->pos[1] >> 2)&0x1)
231 card->ringspeed = 2; /* not selected */
232 else if ((mdev->pos[2] >> 5) & 0x1)
233 card->ringspeed = 1; /* 16Mb */
234 else
235 card->ringspeed = 0; /* 4Mb */
236
237 /*
238 * Cable type
239 */
240 if ((mdev->pos[1] >> 6)&0x1)
241 card->cabletype = 1; /* STP/DB9 */
242 else
243 card->cabletype = 0; /* UTP/RJ-45 */
244
245
246 /*
247 * ROM Info. This requires us to actually twiddle
248 * bits on the card, so we must ensure above that
249 * the base address is free of conflict (request_region above).
250 */
251 madgemc_read_rom(dev, card);
252
253 if (card->manid != 0x4d) { /* something went wrong */
254 printk(KERN_INFO "%s: Madge MC ROM read failed (unknown manufacturer ID %02x)\n", dev->name, card->manid);
255 goto getout3;
256 }
257
258 if ((card->cardtype != 0x08) && (card->cardtype != 0x0d)) {
259 printk(KERN_INFO "%s: Madge MC ROM read failed (unknown card ID %02x)\n", dev->name, card->cardtype);
260 ret = -EIO;
261 goto getout3;
262 }
263
264 /* All cards except Rev 0 and 1 MC16's have 256kb of RAM */
265 if ((card->cardtype == 0x08) && (card->cardrev <= 0x01))
266 card->ramsize = 128;
267 else
268 card->ramsize = 256;
269
270 printk("%s: %s Rev %d at 0x%04lx IRQ %d\n",
271 dev->name,
272 (card->cardtype == 0x08)?MADGEMC16_CARDNAME:
273 MADGEMC32_CARDNAME, card->cardrev,
274 dev->base_addr, dev->irq);
275
276 if (card->cardtype == 0x0d)
277 printk("%s: Warning: MC32 support is experimental and highly untested\n", dev->name);
278
279 if (card->ringspeed==2) { /* Unknown */
280 printk("%s: Warning: Ring speed not set in POS -- Please run the reference disk and set it!\n", dev->name);
281 card->ringspeed = 1; /* default to 16mb */
282 }
283
284 printk("%s: RAM Size: %dKB\n", dev->name, card->ramsize);
285
286 printk("%s: Ring Speed: %dMb/sec on %s\n", dev->name,
287 (card->ringspeed)?16:4,
288 card->cabletype?"STP/DB9":"UTP/RJ-45");
289 printk("%s: Arbitration Level: %d\n", dev->name,
290 card->arblevel);
291
292 printk("%s: Burst Mode: ", dev->name);
293 switch(card->burstmode) {
294 case 0: printk("Cycle steal"); break;
295 case 1: printk("Limited burst"); break;
296 case 2: printk("Delayed release"); break;
297 case 3: printk("Immediate release"); break;
298 }
299 printk(" (%s)\n", (card->fairness)?"Unfair":"Fair");
300
301
302 /*
303 * Enable SIF before we assign the interrupt handler,
304 * just in case we get spurious interrupts that need
305 * handling.
306 */
307 outb(0, dev->base_addr + MC_CONTROL_REG0); /* sanity */
308 madgemc_setsifsel(dev, 1);
309 if (request_irq(dev->irq, madgemc_interrupt, IRQF_SHARED,
310 "madgemc", dev)) {
311 ret = -EBUSY;
312 goto getout3;
313 }
314
315 madgemc_chipset_init(dev); /* enables interrupts! */
316 madgemc_setcabletype(dev, card->cabletype);
317
318 /* Setup MCA structures */
319 mca_device_set_name(mdev, (card->cardtype == 0x08)?MADGEMC16_CARDNAME:MADGEMC32_CARDNAME);
320 mca_set_adapter_procfn(mdev->slot, madgemc_mcaproc, dev);
321
322 printk("%s: Ring Station Address: %pM\n",
323 dev->name, dev->dev_addr);
324
325 if (tmsdev_init(dev, device)) {
326 printk("%s: unable to get memory for dev->priv.\n",
327 dev->name);
328 ret = -ENOMEM;
329 goto getout4;
330 }
331 tp = netdev_priv(dev);
332
333 /*
334 * The MC16 is physically a 32bit card. However, Madge
335 * insists on calling it 16bit, so I'll assume here that
336 * they know what they're talking about. Cut off DMA
337 * at 16mb.
338 */
339 tp->setnselout = madgemc_setnselout_pins;
340 tp->sifwriteb = madgemc_sifwriteb;
341 tp->sifreadb = madgemc_sifreadb;
342 tp->sifwritew = madgemc_sifwritew;
343 tp->sifreadw = madgemc_sifreadw;
344 tp->DataRate = (card->ringspeed)?SPEED_16:SPEED_4;
345
346 memcpy(tp->ProductID, "Madge MCA 16/4 ", PROD_ID_SIZE + 1);
347
348 tp->tmspriv = card;
349 dev_set_drvdata(device, dev);
350
351 if (register_netdev(dev) == 0)
352 return 0;
353
354 dev_set_drvdata(device, NULL);
355 ret = -ENOMEM;
356getout4:
357 free_irq(dev->irq, dev);
358getout3:
359 release_region(dev->base_addr-MADGEMC_SIF_OFFSET,
360 MADGEMC_IO_EXTENT);
361getout2:
362 kfree(card);
363getout1:
364 free_netdev(dev);
365getout:
366 mca_device_set_claim(mdev, 0);
367 return ret;
368}
369
370/*
371 * Handle interrupts generated by the card
372 *
373 * The MicroChannel Madge cards need slightly more handling
374 * after an interrupt than other TMS380 cards do.
375 *
376 * First we must make sure it was this card that generated the
377 * interrupt (since interrupt sharing is allowed). Then,
378 * because we're using level-triggered interrupts (as is
379 * standard on MCA), we must toggle the interrupt line
380 * on the card in order to claim and acknowledge the interrupt.
381 * Once that is done, the interrupt should be handlable in
382 * the normal tms380tr_interrupt() routine.
383 *
384 * There's two ways we can check to see if the interrupt is ours,
385 * both with their own disadvantages...
386 *
387 * 1) Read in the SIFSTS register from the TMS controller. This
388 * is guaranteed to be accurate, however, there's a fairly
389 * large performance penalty for doing so: the Madge chips
390 * must request the register from the Eagle, the Eagle must
391 * read them from its internal bus, and then take the route
392 * back out again, for a 16bit read.
393 *
394 * 2) Use the MC_CONTROL_REG0_SINTR bit from the Madge ASICs.
395 * The major disadvantage here is that the accuracy of the
396 * bit is in question. However, it cuts out the extra read
397 * cycles it takes to read the Eagle's SIF, as its only an
398 * 8bit read, and theoretically the Madge bit is directly
399 * connected to the interrupt latch coming out of the Eagle
400 * hardware (that statement is not verified).
401 *
402 * I can't determine which of these methods has the best win. For now,
403 * we make a compromise. Use the Madge way for the first interrupt,
404 * which should be the fast-path, and then once we hit the first
405 * interrupt, keep on trying using the SIF method until we've
406 * exhausted all contiguous interrupts.
407 *
408 */
409static irqreturn_t madgemc_interrupt(int irq, void *dev_id)
410{
411 int pending,reg1;
412 struct net_device *dev;
413
414 if (!dev_id) {
415 printk("madgemc_interrupt: was not passed a dev_id!\n");
416 return IRQ_NONE;
417 }
418
419 dev = dev_id;
420
421 /* Make sure its really us. -- the Madge way */
422 pending = inb(dev->base_addr + MC_CONTROL_REG0);
423 if (!(pending & MC_CONTROL_REG0_SINTR))
424 return IRQ_NONE; /* not our interrupt */
425
426 /*
427 * Since we're level-triggered, we may miss the rising edge
428 * of the next interrupt while we're off handling this one,
429 * so keep checking until the SIF verifies that it has nothing
430 * left for us to do.
431 */
432 pending = STS_SYSTEM_IRQ;
433 do {
434 if (pending & STS_SYSTEM_IRQ) {
435
436 /* Toggle the interrupt to reset the latch on card */
437 reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
438 outb(reg1 ^ MC_CONTROL_REG1_SINTEN,
439 dev->base_addr + MC_CONTROL_REG1);
440 outb(reg1, dev->base_addr + MC_CONTROL_REG1);
441
442 /* Continue handling as normal */
443 tms380tr_interrupt(irq, dev_id);
444
445 pending = SIFREADW(SIFSTS); /* restart - the SIF way */
446
447 } else
448 return IRQ_HANDLED;
449 } while (1);
450
451 return IRQ_HANDLED; /* not reachable */
452}
453
454/*
455 * Set the card to the preferred ring speed.
456 *
457 * Unlike newer cards, the MC16/32 have their speed selection
458 * circuit connected to the Madge ASICs and not to the TMS380
459 * NSELOUT pins. Set the ASIC bits correctly here, and return
460 * zero to leave the TMS NSELOUT bits unaffected.
461 *
462 */
463static unsigned short madgemc_setnselout_pins(struct net_device *dev)
464{
465 unsigned char reg1;
466 struct net_local *tp = netdev_priv(dev);
467
468 reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
469
470 if(tp->DataRate == SPEED_16)
471 reg1 |= MC_CONTROL_REG1_SPEED_SEL; /* add for 16mb */
472 else if (reg1 & MC_CONTROL_REG1_SPEED_SEL)
473 reg1 ^= MC_CONTROL_REG1_SPEED_SEL; /* remove for 4mb */
474 outb(reg1, dev->base_addr + MC_CONTROL_REG1);
475
476 return 0; /* no change */
477}
478
479/*
480 * Set the register page. This equates to the SRSX line
481 * on the TMS380Cx6.
482 *
483 * Register selection is normally done via three contiguous
484 * bits. However, some boards (such as the MC16/32) use only
485 * two bits, plus a separate bit in the glue chip. This
486 * sets the SRSX bit (the top bit). See page 4-17 in the
487 * Yellow Book for which registers are affected.
488 *
489 */
490static void madgemc_setregpage(struct net_device *dev, int page)
491{
492 static int reg1;
493
494 reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
495 if ((page == 0) && (reg1 & MC_CONTROL_REG1_SRSX)) {
496 outb(reg1 ^ MC_CONTROL_REG1_SRSX,
497 dev->base_addr + MC_CONTROL_REG1);
498 }
499 else if (page == 1) {
500 outb(reg1 | MC_CONTROL_REG1_SRSX,
501 dev->base_addr + MC_CONTROL_REG1);
502 }
503 reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
504}
505
506/*
507 * The SIF registers are not mapped into register space by default
508 * Set this to 1 to map them, 0 to map the BIA ROM.
509 *
510 */
511static void madgemc_setsifsel(struct net_device *dev, int val)
512{
513 unsigned int reg0;
514
515 reg0 = inb(dev->base_addr + MC_CONTROL_REG0);
516 if ((val == 0) && (reg0 & MC_CONTROL_REG0_SIFSEL)) {
517 outb(reg0 ^ MC_CONTROL_REG0_SIFSEL,
518 dev->base_addr + MC_CONTROL_REG0);
519 } else if (val == 1) {
520 outb(reg0 | MC_CONTROL_REG0_SIFSEL,
521 dev->base_addr + MC_CONTROL_REG0);
522 }
523 reg0 = inb(dev->base_addr + MC_CONTROL_REG0);
524}
525
526/*
527 * Enable SIF interrupts
528 *
529 * This does not enable interrupts in the SIF, but rather
530 * enables SIF interrupts to be passed onto the host.
531 *
532 */
533static void madgemc_setint(struct net_device *dev, int val)
534{
535 unsigned int reg1;
536
537 reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
538 if ((val == 0) && (reg1 & MC_CONTROL_REG1_SINTEN)) {
539 outb(reg1 ^ MC_CONTROL_REG1_SINTEN,
540 dev->base_addr + MC_CONTROL_REG1);
541 } else if (val == 1) {
542 outb(reg1 | MC_CONTROL_REG1_SINTEN,
543 dev->base_addr + MC_CONTROL_REG1);
544 }
545}
546
547/*
548 * Cable type is set via control register 7. Bit zero high
549 * for UTP, low for STP.
550 */
551static void madgemc_setcabletype(struct net_device *dev, int type)
552{
553 outb((type==0)?MC_CONTROL_REG7_CABLEUTP:MC_CONTROL_REG7_CABLESTP,
554 dev->base_addr + MC_CONTROL_REG7);
555}
556
557/*
558 * Enable the functions of the Madge chipset needed for
559 * full working order.
560 */
561static int madgemc_chipset_init(struct net_device *dev)
562{
563 outb(0, dev->base_addr + MC_CONTROL_REG1); /* pull SRESET low */
564 tms380tr_wait(100); /* wait for card to reset */
565
566 /* bring back into normal operating mode */
567 outb(MC_CONTROL_REG1_NSRESET, dev->base_addr + MC_CONTROL_REG1);
568
569 /* map SIF registers */
570 madgemc_setsifsel(dev, 1);
571
572 /* enable SIF interrupts */
573 madgemc_setint(dev, 1);
574
575 return 0;
576}
577
578/*
579 * Disable the board, and put back into power-up state.
580 */
581static void madgemc_chipset_close(struct net_device *dev)
582{
583 /* disable interrupts */
584 madgemc_setint(dev, 0);
585 /* unmap SIF registers */
586 madgemc_setsifsel(dev, 0);
587}
588
589/*
590 * Read the card type (MC16 or MC32) from the card.
591 *
592 * The configuration registers are stored in two separate
593 * pages. Pages are flipped by clearing bit 3 of CONTROL_REG0 (PAGE)
594 * for page zero, or setting bit 3 for page one.
595 *
596 * Page zero contains the following data:
597 * Byte 0: Manufacturer ID (0x4D -- ASCII "M")
598 * Byte 1: Card type:
599 * 0x08 for MC16
600 * 0x0D for MC32
601 * Byte 2: Card revision
602 * Byte 3: Mirror of POS config register 0
603 * Byte 4: Mirror of POS 1
604 * Byte 5: Mirror of POS 2
605 *
606 * Page one contains the following data:
607 * Byte 0: Unused
608 * Byte 1-6: BIA, MSB to LSB.
609 *
610 * Note that to read the BIA, we must unmap the SIF registers
611 * by clearing bit 2 of CONTROL_REG0 (SIFSEL), as the data
612 * will reside in the same logical location. For this reason,
613 * _never_ read the BIA while the Eagle processor is running!
614 * The SIF will be completely inaccessible until the BIA operation
615 * is complete.
616 *
617 */
618static void madgemc_read_rom(struct net_device *dev, struct card_info *card)
619{
620 unsigned long ioaddr;
621 unsigned char reg0, reg1, tmpreg0, i;
622
623 ioaddr = dev->base_addr;
624
625 reg0 = inb(ioaddr + MC_CONTROL_REG0);
626 reg1 = inb(ioaddr + MC_CONTROL_REG1);
627
628 /* Switch to page zero and unmap SIF */
629 tmpreg0 = reg0 & ~(MC_CONTROL_REG0_PAGE + MC_CONTROL_REG0_SIFSEL);
630 outb(tmpreg0, ioaddr + MC_CONTROL_REG0);
631
632 card->manid = inb(ioaddr + MC_ROM_MANUFACTURERID);
633 card->cardtype = inb(ioaddr + MC_ROM_ADAPTERID);
634 card->cardrev = inb(ioaddr + MC_ROM_REVISION);
635
636 /* Switch to rom page one */
637 outb(tmpreg0 | MC_CONTROL_REG0_PAGE, ioaddr + MC_CONTROL_REG0);
638
639 /* Read BIA */
640 dev->addr_len = 6;
641 for (i = 0; i < 6; i++)
642 dev->dev_addr[i] = inb(ioaddr + MC_ROM_BIA_START + i);
643
644 /* Restore original register values */
645 outb(reg0, ioaddr + MC_CONTROL_REG0);
646 outb(reg1, ioaddr + MC_CONTROL_REG1);
647}
648
649static int madgemc_open(struct net_device *dev)
650{
651 /*
652 * Go ahead and reinitialize the chipset again, just to
653 * make sure we didn't get left in a bad state.
654 */
655 madgemc_chipset_init(dev);
656 tms380tr_open(dev);
657 return 0;
658}
659
660static int madgemc_close(struct net_device *dev)
661{
662 tms380tr_close(dev);
663 madgemc_chipset_close(dev);
664 return 0;
665}
666
667/*
668 * Give some details available from /proc/mca/slotX
669 */
670static int madgemc_mcaproc(char *buf, int slot, void *d)
671{
672 struct net_device *dev = (struct net_device *)d;
673 struct net_local *tp = netdev_priv(dev);
674 struct card_info *curcard = tp->tmspriv;
675 int len = 0;
676
677 len += sprintf(buf+len, "-------\n");
678 if (curcard) {
679 len += sprintf(buf+len, "Card Revision: %d\n", curcard->cardrev);
680 len += sprintf(buf+len, "RAM Size: %dkb\n", curcard->ramsize);
681 len += sprintf(buf+len, "Cable type: %s\n", (curcard->cabletype)?"STP/DB9":"UTP/RJ-45");
682 len += sprintf(buf+len, "Configured ring speed: %dMb/sec\n", (curcard->ringspeed)?16:4);
683 len += sprintf(buf+len, "Running ring speed: %dMb/sec\n", (tp->DataRate==SPEED_16)?16:4);
684 len += sprintf(buf+len, "Device: %s\n", dev->name);
685 len += sprintf(buf+len, "IO Port: 0x%04lx\n", dev->base_addr);
686 len += sprintf(buf+len, "IRQ: %d\n", dev->irq);
687 len += sprintf(buf+len, "Arbitration Level: %d\n", curcard->arblevel);
688 len += sprintf(buf+len, "Burst Mode: ");
689 switch(curcard->burstmode) {
690 case 0: len += sprintf(buf+len, "Cycle steal"); break;
691 case 1: len += sprintf(buf+len, "Limited burst"); break;
692 case 2: len += sprintf(buf+len, "Delayed release"); break;
693 case 3: len += sprintf(buf+len, "Immediate release"); break;
694 }
695 len += sprintf(buf+len, " (%s)\n", (curcard->fairness)?"Unfair":"Fair");
696
697 len += sprintf(buf+len, "Ring Station Address: %pM\n",
698 dev->dev_addr);
699 } else
700 len += sprintf(buf+len, "Card not configured\n");
701
702 return len;
703}
704
705static int __devexit madgemc_remove(struct device *device)
706{
707 struct net_device *dev = dev_get_drvdata(device);
708 struct net_local *tp;
709 struct card_info *card;
710
711 BUG_ON(!dev);
712
713 tp = netdev_priv(dev);
714 card = tp->tmspriv;
715 kfree(card);
716 tp->tmspriv = NULL;
717
718 unregister_netdev(dev);
719 release_region(dev->base_addr-MADGEMC_SIF_OFFSET, MADGEMC_IO_EXTENT);
720 free_irq(dev->irq, dev);
721 tmsdev_term(dev);
722 free_netdev(dev);
723 dev_set_drvdata(device, NULL);
724
725 return 0;
726}
727
728static short madgemc_adapter_ids[] __initdata = {
729 0x002d,
730 0x0000
731};
732
733static struct mca_driver madgemc_driver = {
734 .id_table = madgemc_adapter_ids,
735 .driver = {
736 .name = "madgemc",
737 .bus = &mca_bus_type,
738 .probe = madgemc_probe,
739 .remove = __devexit_p(madgemc_remove),
740 },
741};
742
743static int __init madgemc_init (void)
744{
745 madgemc_netdev_ops = tms380tr_netdev_ops;
746 madgemc_netdev_ops.ndo_open = madgemc_open;
747 madgemc_netdev_ops.ndo_stop = madgemc_close;
748
749 return mca_register_driver (&madgemc_driver);
750}
751
752static void __exit madgemc_exit (void)
753{
754 mca_unregister_driver (&madgemc_driver);
755}
756
757module_init(madgemc_init);
758module_exit(madgemc_exit);
759
760MODULE_LICENSE("GPL");
761
diff --git a/drivers/net/tokenring/madgemc.h b/drivers/net/tokenring/madgemc.h
deleted file mode 100644
index fe88e272c531..000000000000
--- a/drivers/net/tokenring/madgemc.h
+++ /dev/null
@@ -1,70 +0,0 @@
1/*
2 * madgemc.h: Header for the madgemc tms380tr module
3 *
4 * Authors:
5 * - Adam Fritzler
6 */
7
8#ifndef __LINUX_MADGEMC_H
9#define __LINUX_MADGEMC_H
10
11#ifdef __KERNEL__
12
13#define MADGEMC16_CARDNAME "Madge Smart 16/4 MC16 Ringnode"
14#define MADGEMC32_CARDNAME "Madge Smart 16/4 MC32 Ringnode"
15
16/*
17 * Bit definitions for the POS config registers
18 */
19#define MC16_POS0_ADDR1 0x20
20#define MC16_POS2_ADDR2 0x04
21#define MC16_POS3_ADDR3 0x20
22
23#define MC_CONTROL_REG0 ((long)-8) /* 0x00 */
24#define MC_CONTROL_REG1 ((long)-7) /* 0x01 */
25#define MC_ADAPTER_POS_REG0 ((long)-6) /* 0x02 */
26#define MC_ADAPTER_POS_REG1 ((long)-5) /* 0x03 */
27#define MC_ADAPTER_POS_REG2 ((long)-4) /* 0x04 */
28#define MC_ADAPTER_REG5_UNUSED ((long)-3) /* 0x05 */
29#define MC_ADAPTER_REG6_UNUSED ((long)-2) /* 0x06 */
30#define MC_CONTROL_REG7 ((long)-1) /* 0x07 */
31
32#define MC_CONTROL_REG0_UNKNOWN1 0x01
33#define MC_CONTROL_REG0_UNKNOWN2 0x02
34#define MC_CONTROL_REG0_SIFSEL 0x04
35#define MC_CONTROL_REG0_PAGE 0x08
36#define MC_CONTROL_REG0_TESTINTERRUPT 0x10
37#define MC_CONTROL_REG0_UNKNOWN20 0x20
38#define MC_CONTROL_REG0_SINTR 0x40
39#define MC_CONTROL_REG0_UNKNOWN80 0x80
40
41#define MC_CONTROL_REG1_SINTEN 0x01
42#define MC_CONTROL_REG1_BITOFDEATH 0x02
43#define MC_CONTROL_REG1_NSRESET 0x04
44#define MC_CONTROL_REG1_UNKNOWN8 0x08
45#define MC_CONTROL_REG1_UNKNOWN10 0x10
46#define MC_CONTROL_REG1_UNKNOWN20 0x20
47#define MC_CONTROL_REG1_SRSX 0x40
48#define MC_CONTROL_REG1_SPEED_SEL 0x80
49
50#define MC_CONTROL_REG7_CABLESTP 0x00
51#define MC_CONTROL_REG7_CABLEUTP 0x01
52
53/*
54 * ROM Page Zero
55 */
56#define MC_ROM_MANUFACTURERID 0x00
57#define MC_ROM_ADAPTERID 0x01
58#define MC_ROM_REVISION 0x02
59#define MC_ROM_CONFIG0 0x03
60#define MC_ROM_CONFIG1 0x04
61#define MC_ROM_CONFIG2 0x05
62
63/*
64 * ROM Page One
65 */
66#define MC_ROM_UNUSED_BYTE 0x00
67#define MC_ROM_BIA_START 0x01
68
69#endif /* __KERNEL__ */
70#endif /* __LINUX_MADGEMC_H */
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
deleted file mode 100644
index 0e234741cc79..000000000000
--- a/drivers/net/tokenring/olympic.c
+++ /dev/null
@@ -1,1749 +0,0 @@
1/*
2 * olympic.c (c) 1999 Peter De Schrijver All Rights Reserved
3 * 1999/2000 Mike Phillips (mikep@linuxtr.net)
4 *
5 * Linux driver for IBM PCI tokenring cards based on the Pit/Pit-Phy/Olympic
6 * chipset.
7 *
8 * Base Driver Skeleton:
9 * Written 1993-94 by Donald Becker.
10 *
11 * Copyright 1993 United States Government as represented by the
12 * Director, National Security Agency.
13 *
14 * Thanks to Erik De Cock, Adrian Bridgett and Frank Fiene for their
15 * assistance and perserverance with the testing of this driver.
16 *
17 * This software may be used and distributed according to the terms
18 * of the GNU General Public License, incorporated herein by reference.
19 *
20 * 4/27/99 - Alpha Release 0.1.0
21 * First release to the public
22 *
23 * 6/8/99 - Official Release 0.2.0
24 * Merged into the kernel code
25 * 8/18/99 - Updated driver for 2.3.13 kernel to use new pci
26 * resource. Driver also reports the card name returned by
27 * the pci resource.
28 * 1/11/00 - Added spinlocks for smp
29 * 2/23/00 - Updated to dev_kfree_irq
30 * 3/10/00 - Fixed FDX enable which triggered other bugs also
31 * squashed.
32 * 5/20/00 - Changes to handle Olympic on LinuxPPC. Endian changes.
33 * The odd thing about the changes is that the fix for
34 * endian issues with the big-endian data in the arb, asb...
35 * was to always swab() the bytes, no matter what CPU.
36 * That's because the read[wl]() functions always swap the
37 * bytes on the way in on PPC.
38 * Fixing the hardware descriptors was another matter,
39 * because they weren't going through read[wl](), there all
40 * the results had to be in memory in le32 values. kdaaker
41 *
42 * 12/23/00 - Added minimal Cardbus support (Thanks Donald).
43 *
44 * 03/09/01 - Add new pci api, dev_base_lock, general clean up.
45 *
46 * 03/27/01 - Add new dma pci (Thanks to Kyle Lucke) and alloc_trdev
47 * Change proc_fs behaviour, now one entry per adapter.
48 *
49 * 04/09/01 - Couple of bug fixes to the dma unmaps and ejecting the
50 * adapter when live does not take the system down with it.
51 *
52 * 06/02/01 - Clean up, copy skb for small packets
53 *
54 * 06/22/01 - Add EISR error handling routines
55 *
56 * 07/19/01 - Improve bad LAA reporting, strip out freemem
57 * into a separate function, its called from 3
58 * different places now.
59 * 02/09/02 - Replaced sleep_on.
60 * 03/01/02 - Replace access to several registers from 32 bit to
61 * 16 bit. Fixes alignment errors on PPC 64 bit machines.
62 * Thanks to Al Trautman for this one.
63 * 03/10/02 - Fix BUG in arb_cmd. Bug was there all along but was
64 * silently ignored until the error checking code
65 * went into version 1.0.0
66 * 06/04/02 - Add correct start up sequence for the cardbus adapters.
67 * Required for strict compliance with pci power mgmt specs.
68 * To Do:
69 *
70 * Wake on lan
71 *
72 * If Problems do Occur
73 * Most problems can be rectified by either closing and opening the interface
74 * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
75 * if compiled into the kernel).
76 */
77
78/* Change OLYMPIC_DEBUG to 1 to get verbose, and I mean really verbose, messages */
79
80#define OLYMPIC_DEBUG 0
81
82
83#include <linux/module.h>
84#include <linux/kernel.h>
85#include <linux/errno.h>
86#include <linux/timer.h>
87#include <linux/in.h>
88#include <linux/ioport.h>
89#include <linux/seq_file.h>
90#include <linux/string.h>
91#include <linux/proc_fs.h>
92#include <linux/ptrace.h>
93#include <linux/skbuff.h>
94#include <linux/interrupt.h>
95#include <linux/delay.h>
96#include <linux/netdevice.h>
97#include <linux/trdevice.h>
98#include <linux/stddef.h>
99#include <linux/init.h>
100#include <linux/pci.h>
101#include <linux/spinlock.h>
102#include <linux/bitops.h>
103#include <linux/jiffies.h>
104
105#include <net/checksum.h>
106#include <net/net_namespace.h>
107
108#include <asm/io.h>
109
110#include "olympic.h"
111
112/* I've got to put some intelligence into the version number so that Peter and I know
113 * which version of the code somebody has got.
114 * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author.
115 * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
116 *
117 * Official releases will only have an a.b.c version number format.
118 */
119
120static char version[] =
121"Olympic.c v1.0.5 6/04/02 - Peter De Schrijver & Mike Phillips" ;
122
123static char *open_maj_error[] = {"No error", "Lobe Media Test", "Physical Insertion",
124 "Address Verification", "Neighbor Notification (Ring Poll)",
125 "Request Parameters","FDX Registration Request",
126 "FDX Duplicate Address Check", "Station registration Query Wait",
127 "Unknown stage"};
128
129static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Wire Fault",
130 "Ring Speed Mismatch", "Timeout","Ring Failure","Ring Beaconing",
131 "Duplicate Node Address","Request Parameters","Remove Received",
132 "Reserved", "Reserved", "No Monitor Detected for RPL",
133 "Monitor Contention failer for RPL", "FDX Protocol Error"};
134
135/* Module parameters */
136
137MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
138MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ;
139
140/* Ring Speed 0,4,16,100
141 * 0 = Autosense
142 * 4,16 = Selected speed only, no autosense
143 * This allows the card to be the first on the ring
144 * and become the active monitor.
145 * 100 = Nothing at present, 100mbps is autodetected
146 * if FDX is turned on. May be implemented in the future to
147 * fail if 100mpbs is not detected.
148 *
149 * WARNING: Some hubs will allow you to insert
150 * at the wrong speed
151 */
152
153static int ringspeed[OLYMPIC_MAX_ADAPTERS] = {0,} ;
154module_param_array(ringspeed, int, NULL, 0);
155
156/* Packet buffer size */
157
158static int pkt_buf_sz[OLYMPIC_MAX_ADAPTERS] = {0,} ;
159module_param_array(pkt_buf_sz, int, NULL, 0) ;
160
161/* Message Level */
162
163static int message_level[OLYMPIC_MAX_ADAPTERS] = {0,} ;
164module_param_array(message_level, int, NULL, 0) ;
165
166/* Change network_monitor to receive mac frames through the arb channel.
167 * Will also create a /proc/net/olympic_tr%d entry, where %d is the tr
168 * device, i.e. tr0, tr1 etc.
169 * Intended to be used to create a ring-error reporting network module
170 * i.e. it will give you the source address of beaconers on the ring
171 */
172static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
173module_param_array(network_monitor, int, NULL, 0);
174
175static DEFINE_PCI_DEVICE_TABLE(olympic_pci_tbl) = {
176 {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
177 { } /* Terminating Entry */
178};
179MODULE_DEVICE_TABLE(pci,olympic_pci_tbl) ;
180
181
182static int olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
183static int olympic_init(struct net_device *dev);
184static int olympic_open(struct net_device *dev);
185static netdev_tx_t olympic_xmit(struct sk_buff *skb,
186 struct net_device *dev);
187static int olympic_close(struct net_device *dev);
188static void olympic_set_rx_mode(struct net_device *dev);
189static void olympic_freemem(struct net_device *dev) ;
190static irqreturn_t olympic_interrupt(int irq, void *dev_id);
191static int olympic_set_mac_address(struct net_device *dev, void *addr) ;
192static void olympic_arb_cmd(struct net_device *dev);
193static int olympic_change_mtu(struct net_device *dev, int mtu);
194static void olympic_srb_bh(struct net_device *dev) ;
195static void olympic_asb_bh(struct net_device *dev) ;
196static const struct file_operations olympic_proc_ops;
197
198static const struct net_device_ops olympic_netdev_ops = {
199 .ndo_open = olympic_open,
200 .ndo_stop = olympic_close,
201 .ndo_start_xmit = olympic_xmit,
202 .ndo_change_mtu = olympic_change_mtu,
203 .ndo_set_rx_mode = olympic_set_rx_mode,
204 .ndo_set_mac_address = olympic_set_mac_address,
205};
206
207static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
208{
209 struct net_device *dev ;
210 struct olympic_private *olympic_priv;
211 static int card_no = -1 ;
212 int i ;
213
214 card_no++ ;
215
216 if ((i = pci_enable_device(pdev))) {
217 return i ;
218 }
219
220 pci_set_master(pdev);
221
222 if ((i = pci_request_regions(pdev,"olympic"))) {
223 goto op_disable_dev;
224 }
225
226 dev = alloc_trdev(sizeof(struct olympic_private)) ;
227 if (!dev) {
228 i = -ENOMEM;
229 goto op_release_dev;
230 }
231
232 olympic_priv = netdev_priv(dev) ;
233
234 spin_lock_init(&olympic_priv->olympic_lock) ;
235
236 init_waitqueue_head(&olympic_priv->srb_wait);
237 init_waitqueue_head(&olympic_priv->trb_wait);
238#if OLYMPIC_DEBUG
239 printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, netdev_priv(dev));
240#endif
241 dev->irq=pdev->irq;
242 dev->base_addr=pci_resource_start(pdev, 0);
243 olympic_priv->olympic_card_name = pci_name(pdev);
244 olympic_priv->pdev = pdev;
245 olympic_priv->olympic_mmio = ioremap(pci_resource_start(pdev,1),256);
246 olympic_priv->olympic_lap = ioremap(pci_resource_start(pdev,2),2048);
247 if (!olympic_priv->olympic_mmio || !olympic_priv->olympic_lap) {
248 goto op_free_iomap;
249 }
250
251 if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
252 olympic_priv->pkt_buf_sz = PKT_BUF_SZ ;
253 else
254 olympic_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
255
256 dev->mtu = olympic_priv->pkt_buf_sz - TR_HLEN ;
257 olympic_priv->olympic_ring_speed = ringspeed[card_no] ;
258 olympic_priv->olympic_message_level = message_level[card_no] ;
259 olympic_priv->olympic_network_monitor = network_monitor[card_no];
260
261 if ((i = olympic_init(dev))) {
262 goto op_free_iomap;
263 }
264
265 dev->netdev_ops = &olympic_netdev_ops;
266 SET_NETDEV_DEV(dev, &pdev->dev);
267
268 pci_set_drvdata(pdev,dev) ;
269 register_netdev(dev) ;
270 printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name);
271 if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */
272 char proc_name[20] ;
273 strcpy(proc_name,"olympic_") ;
274 strcat(proc_name,dev->name) ;
275 proc_create_data(proc_name, 0, init_net.proc_net, &olympic_proc_ops, dev);
276 printk("Olympic: Network Monitor information: /proc/%s\n",proc_name);
277 }
278 return 0 ;
279
280op_free_iomap:
281 if (olympic_priv->olympic_mmio)
282 iounmap(olympic_priv->olympic_mmio);
283 if (olympic_priv->olympic_lap)
284 iounmap(olympic_priv->olympic_lap);
285
286 free_netdev(dev);
287op_release_dev:
288 pci_release_regions(pdev);
289
290op_disable_dev:
291 pci_disable_device(pdev);
292 return i;
293}
294
295static int olympic_init(struct net_device *dev)
296{
297 struct olympic_private *olympic_priv;
298 u8 __iomem *olympic_mmio, *init_srb,*adapter_addr;
299 unsigned long t;
300 unsigned int uaa_addr;
301
302 olympic_priv=netdev_priv(dev);
303 olympic_mmio=olympic_priv->olympic_mmio;
304
305 printk("%s\n", version);
306 printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
307
308 writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
309 t=jiffies;
310 while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) {
311 schedule();
312 if(time_after(jiffies, t + 40*HZ)) {
313 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
314 return -ENODEV;
315 }
316 }
317
318
319 /* Needed for cardbus */
320 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
321 writel(readl(olympic_priv->olympic_mmio+FERMASK)|FERMASK_INT_BIT, olympic_mmio+FERMASK);
322 }
323
324#if OLYMPIC_DEBUG
325 printk("BCTL: %x\n",readl(olympic_mmio+BCTL));
326 printk("GPR: %x\n",readw(olympic_mmio+GPR));
327 printk("SISRMASK: %x\n",readl(olympic_mmio+SISR_MASK));
328#endif
329 /* Aaaahhh, You have got to be real careful setting GPR, the card
330 holds the previous values from flash memory, including autosense
331 and ring speed */
332
333 writel(readl(olympic_mmio+BCTL)|BCTL_MIMREB,olympic_mmio+BCTL);
334
335 if (olympic_priv->olympic_ring_speed == 0) { /* Autosense */
336 writew(readw(olympic_mmio+GPR)|GPR_AUTOSENSE,olympic_mmio+GPR);
337 if (olympic_priv->olympic_message_level)
338 printk(KERN_INFO "%s: Ringspeed autosense mode on\n",olympic_priv->olympic_card_name);
339 } else if (olympic_priv->olympic_ring_speed == 16) {
340 if (olympic_priv->olympic_message_level)
341 printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", olympic_priv->olympic_card_name);
342 writew(GPR_16MBPS, olympic_mmio+GPR);
343 } else if (olympic_priv->olympic_ring_speed == 4) {
344 if (olympic_priv->olympic_message_level)
345 printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", olympic_priv->olympic_card_name) ;
346 writew(0, olympic_mmio+GPR);
347 }
348
349 writew(readw(olympic_mmio+GPR)|GPR_NEPTUNE_BF,olympic_mmio+GPR);
350
351#if OLYMPIC_DEBUG
352 printk("GPR = %x\n",readw(olympic_mmio + GPR) ) ;
353#endif
354 /* Solo has been paused to meet the Cardbus power
355 * specs if the adapter is cardbus. Check to
356 * see its been paused and then restart solo. The
357 * adapter should set the pause bit within 1 second.
358 */
359
360 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
361 t=jiffies;
362 while (!(readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE)) {
363 schedule() ;
364 if(time_after(jiffies, t + 2*HZ)) {
365 printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ;
366 return -ENODEV;
367 }
368 }
369 writel(readl(olympic_mmio+CLKCTL) & ~CLKCTL_PAUSE, olympic_mmio+CLKCTL) ;
370 }
371
372 /* start solo init */
373 writel((1<<15),olympic_mmio+SISR_MASK_SUM);
374
375 t=jiffies;
376 while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) {
377 schedule();
378 if(time_after(jiffies, t + 15*HZ)) {
379 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
380 return -ENODEV;
381 }
382 }
383
384 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
385
386#if OLYMPIC_DEBUG
387 printk("LAPWWO: %x, LAPA: %x\n",readl(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
388#endif
389
390 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
391
392#if OLYMPIC_DEBUG
393{
394 int i;
395 printk("init_srb(%p): ",init_srb);
396 for(i=0;i<20;i++)
397 printk("%x ",readb(init_srb+i));
398 printk("\n");
399}
400#endif
401 if(readw(init_srb+6)) {
402 printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",readw(init_srb+6));
403 return -ENODEV;
404 }
405
406 if (olympic_priv->olympic_message_level) {
407 if ( readb(init_srb +2) & 0x40) {
408 printk(KERN_INFO "Olympic: Adapter is FDX capable.\n") ;
409 } else {
410 printk(KERN_INFO "Olympic: Adapter cannot do FDX.\n");
411 }
412 }
413
414 uaa_addr=swab16(readw(init_srb+8));
415
416#if OLYMPIC_DEBUG
417 printk("UAA resides at %x\n",uaa_addr);
418#endif
419
420 writel(uaa_addr,olympic_mmio+LAPA);
421 adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800));
422
423 memcpy_fromio(&dev->dev_addr[0], adapter_addr,6);
424
425#if OLYMPIC_DEBUG
426 printk("adapter address: %pM\n", dev->dev_addr);
427#endif
428
429 olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12));
430 olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14));
431
432 return 0;
433
434}
435
436static int olympic_open(struct net_device *dev)
437{
438 struct olympic_private *olympic_priv=netdev_priv(dev);
439 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
440 unsigned long flags, t;
441 int i, open_finished = 1 ;
442 u8 resp, err;
443
444 DECLARE_WAITQUEUE(wait,current) ;
445
446 olympic_init(dev);
447
448 if (request_irq(dev->irq, olympic_interrupt, IRQF_SHARED , "olympic",
449 dev))
450 return -EAGAIN;
451
452#if OLYMPIC_DEBUG
453 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
454 printk("pending ints: %x\n",readl(olympic_mmio+SISR_RR));
455#endif
456
457 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
458
459 writel(SISR_MI | SISR_SRB_REPLY, olympic_mmio+SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
460
461 writel(LISR_LIE,olympic_mmio+LISR); /* more ints later */
462
463 /* adapter is closed, so SRB is pointed to by LAPWWO */
464
465 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
466 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
467
468#if OLYMPIC_DEBUG
469 printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
470 printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
471 printk("Before the open command\n");
472#endif
473 do {
474 memset_io(init_srb,0,SRB_COMMAND_SIZE);
475
476 writeb(SRB_OPEN_ADAPTER,init_srb) ; /* open */
477 writeb(OLYMPIC_CLEAR_RET_CODE,init_srb+2);
478
479 /* If Network Monitor, instruct card to copy MAC frames through the ARB */
480 if (olympic_priv->olympic_network_monitor)
481 writew(swab16(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), init_srb+8);
482 else
483 writew(swab16(OPEN_ADAPTER_ENABLE_FDX), init_srb+8);
484
485 /* Test OR of first 3 bytes as its totally possible for
486 * someone to set the first 2 bytes to be zero, although this
487 * is an error, the first byte must have bit 6 set to 1 */
488
489 if (olympic_priv->olympic_laa[0] | olympic_priv->olympic_laa[1] | olympic_priv->olympic_laa[2]) {
490 writeb(olympic_priv->olympic_laa[0],init_srb+12);
491 writeb(olympic_priv->olympic_laa[1],init_srb+13);
492 writeb(olympic_priv->olympic_laa[2],init_srb+14);
493 writeb(olympic_priv->olympic_laa[3],init_srb+15);
494 writeb(olympic_priv->olympic_laa[4],init_srb+16);
495 writeb(olympic_priv->olympic_laa[5],init_srb+17);
496 memcpy(dev->dev_addr,olympic_priv->olympic_laa,dev->addr_len) ;
497 }
498 writeb(1,init_srb+30);
499
500 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
501 olympic_priv->srb_queued=1;
502
503 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
504 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
505
506 t = jiffies ;
507
508 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
509 set_current_state(TASK_INTERRUPTIBLE) ;
510
511 while(olympic_priv->srb_queued) {
512 schedule() ;
513 if(signal_pending(current)) {
514 printk(KERN_WARNING "%s: Signal received in open.\n",
515 dev->name);
516 printk(KERN_WARNING "SISR=%x LISR=%x\n",
517 readl(olympic_mmio+SISR),
518 readl(olympic_mmio+LISR));
519 olympic_priv->srb_queued=0;
520 break;
521 }
522 if (time_after(jiffies, t + 10*HZ)) {
523 printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
524 olympic_priv->srb_queued=0;
525 break ;
526 }
527 set_current_state(TASK_INTERRUPTIBLE) ;
528 }
529 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
530 set_current_state(TASK_RUNNING) ;
531 olympic_priv->srb_queued = 0 ;
532#if OLYMPIC_DEBUG
533 printk("init_srb(%p): ",init_srb);
534 for(i=0;i<20;i++)
535 printk("%02x ",readb(init_srb+i));
536 printk("\n");
537#endif
538
539 /* If we get the same return response as we set, the interrupt wasn't raised and the open
540 * timed out.
541 */
542
543 switch (resp = readb(init_srb+2)) {
544 case OLYMPIC_CLEAR_RET_CODE:
545 printk(KERN_WARNING "%s: Adapter Open time out or error.\n", dev->name) ;
546 goto out;
547 case 0:
548 open_finished = 1;
549 break;
550 case 0x07:
551 if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
552 printk(KERN_WARNING "%s: Retrying at different ring speed\n", dev->name);
553 open_finished = 0 ;
554 continue;
555 }
556
557 err = readb(init_srb+7);
558
559 if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) {
560 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
561 printk(KERN_WARNING "%s: Please try again with a specified ring speed\n",dev->name);
562 } else {
563 printk(KERN_WARNING "%s: %s - %s\n", dev->name,
564 open_maj_error[(err & 0xf0) >> 4],
565 open_min_error[(err & 0x0f)]);
566 }
567 goto out;
568
569 case 0x32:
570 printk(KERN_WARNING "%s: Invalid LAA: %pM\n",
571 dev->name, olympic_priv->olympic_laa);
572 goto out;
573
574 default:
575 printk(KERN_WARNING "%s: Bad OPEN response: %x\n", dev->name, resp);
576 goto out;
577
578 }
579 } while (!(open_finished)) ; /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
580
581 if (readb(init_srb+18) & (1<<3))
582 if (olympic_priv->olympic_message_level)
583 printk(KERN_INFO "%s: Opened in FDX Mode\n",dev->name);
584
585 if (readb(init_srb+18) & (1<<1))
586 olympic_priv->olympic_ring_speed = 100 ;
587 else if (readb(init_srb+18) & 1)
588 olympic_priv->olympic_ring_speed = 16 ;
589 else
590 olympic_priv->olympic_ring_speed = 4 ;
591
592 if (olympic_priv->olympic_message_level)
593 printk(KERN_INFO "%s: Opened in %d Mbps mode\n",dev->name, olympic_priv->olympic_ring_speed);
594
595 olympic_priv->asb = swab16(readw(init_srb+8));
596 olympic_priv->srb = swab16(readw(init_srb+10));
597 olympic_priv->arb = swab16(readw(init_srb+12));
598 olympic_priv->trb = swab16(readw(init_srb+16));
599
600 olympic_priv->olympic_receive_options = 0x01 ;
601 olympic_priv->olympic_copy_all_options = 0 ;
602
603 /* setup rx ring */
604
605 writel((3<<16),olympic_mmio+BMCTL_RWM); /* Ensure end of frame generated interrupts */
606
607 writel(BMCTL_RX_DIS|3,olympic_mmio+BMCTL_RWM); /* Yes, this the enables RX channel */
608
609 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
610
611 struct sk_buff *skb;
612
613 skb=dev_alloc_skb(olympic_priv->pkt_buf_sz);
614 if(skb == NULL)
615 break;
616
617 skb->dev = dev;
618
619 olympic_priv->olympic_rx_ring[i].buffer = cpu_to_le32(pci_map_single(olympic_priv->pdev,
620 skb->data,olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)) ;
621 olympic_priv->olympic_rx_ring[i].res_length = cpu_to_le32(olympic_priv->pkt_buf_sz);
622 olympic_priv->rx_ring_skb[i]=skb;
623 }
624
625 if (i==0) {
626 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
627 goto out;
628 }
629
630 olympic_priv->rx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_rx_ring,
631 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
632 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXDESCQ);
633 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXCDA);
634 writew(i, olympic_mmio+RXDESCQCNT);
635
636 olympic_priv->rx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_rx_status_ring,
637 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
638 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXSTATQ);
639 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXCSA);
640
641 olympic_priv->rx_ring_last_received = OLYMPIC_RX_RING_SIZE - 1; /* last processed rx status */
642 olympic_priv->rx_status_last_received = OLYMPIC_RX_RING_SIZE - 1;
643
644 writew(i, olympic_mmio+RXSTATQCNT);
645
646#if OLYMPIC_DEBUG
647 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
648 printk("RXCSA: %x, rx_status_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
649 printk(" stat_ring[1]: %p, stat_ring[2]: %p, stat_ring[3]: %p\n", &(olympic_priv->olympic_rx_status_ring[1]), &(olympic_priv->olympic_rx_status_ring[2]), &(olympic_priv->olympic_rx_status_ring[3]) );
650 printk(" stat_ring[4]: %p, stat_ring[5]: %p, stat_ring[6]: %p\n", &(olympic_priv->olympic_rx_status_ring[4]), &(olympic_priv->olympic_rx_status_ring[5]), &(olympic_priv->olympic_rx_status_ring[6]) );
651 printk(" stat_ring[7]: %p\n", &(olympic_priv->olympic_rx_status_ring[7]) );
652
653 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
654 printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr = %08x\n",
655 olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ;
656#endif
657
658 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | i,olympic_mmio+RXENQ);
659
660#if OLYMPIC_DEBUG
661 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
662 printk("RXCSA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
663 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
664#endif
665
666 writel(SISR_RX_STATUS | SISR_RX_NOBUF,olympic_mmio+SISR_MASK_SUM);
667
668 /* setup tx ring */
669
670 writel(BMCTL_TX1_DIS,olympic_mmio+BMCTL_RWM); /* Yes, this enables TX channel 1 */
671 for(i=0;i<OLYMPIC_TX_RING_SIZE;i++)
672 olympic_priv->olympic_tx_ring[i].buffer=cpu_to_le32(0xdeadbeef);
673
674 olympic_priv->free_tx_ring_entries=OLYMPIC_TX_RING_SIZE;
675 olympic_priv->tx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_tx_ring,
676 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
677 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXDESCQ_1);
678 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXCDA_1);
679 writew(OLYMPIC_TX_RING_SIZE, olympic_mmio+TXDESCQCNT_1);
680
681 olympic_priv->tx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_tx_status_ring,
682 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
683 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXSTATQ_1);
684 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXCSA_1);
685 writew(OLYMPIC_TX_RING_SIZE,olympic_mmio+TXSTATQCNT_1);
686
687 olympic_priv->tx_ring_free=0; /* next entry in tx ring to use */
688 olympic_priv->tx_ring_last_status=OLYMPIC_TX_RING_SIZE-1; /* last processed tx status */
689
690 writel(0xffffffff, olympic_mmio+EISR_RWM) ; /* clean the eisr */
691 writel(0,olympic_mmio+EISR) ;
692 writel(EISR_MASK_OPTIONS,olympic_mmio+EISR_MASK) ; /* enables most of the TX error interrupts */
693 writel(SISR_TX1_EOF | SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE | SISR_ERR,olympic_mmio+SISR_MASK_SUM);
694
695#if OLYMPIC_DEBUG
696 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
697 printk("SISR MASK: %x\n",readl(olympic_mmio+SISR_MASK));
698#endif
699
700 if (olympic_priv->olympic_network_monitor) {
701 u8 __iomem *oat;
702 u8 __iomem *opt;
703 u8 addr[6];
704 oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr);
705 opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr);
706
707 for (i = 0; i < 6; i++)
708 addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+i);
709 printk("%s: Node Address: %pM\n", dev->name, addr);
710 printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name,
711 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
712 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
713 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
714 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
715
716 for (i = 0; i < 6; i++)
717 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+i);
718 printk("%s: NAUN Address: %pM\n", dev->name, addr);
719 }
720
721 netif_start_queue(dev);
722 return 0;
723
724out:
725 free_irq(dev->irq, dev);
726 return -EIO;
727}
728
729/*
730 * When we enter the rx routine we do not know how many frames have been
731 * queued on the rx channel. Therefore we start at the next rx status
732 * position and travel around the receive ring until we have completed
733 * all the frames.
734 *
735 * This means that we may process the frame before we receive the end
736 * of frame interrupt. This is why we always test the status instead
737 * of blindly processing the next frame.
738 *
739 * We also remove the last 4 bytes from the packet as well, these are
740 * just token ring trailer info and upset protocols that don't check
741 * their own length, i.e. SNA.
742 *
743 */
744static void olympic_rx(struct net_device *dev)
745{
746 struct olympic_private *olympic_priv=netdev_priv(dev);
747 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
748 struct olympic_rx_status *rx_status;
749 struct olympic_rx_desc *rx_desc ;
750 int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len;
751 struct sk_buff *skb, *skb2;
752 int i;
753
754 rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ;
755
756 while (rx_status->status_buffercnt) {
757 u32 l_status_buffercnt;
758
759 olympic_priv->rx_status_last_received++ ;
760 olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
761#if OLYMPIC_DEBUG
762 printk("rx status: %x rx len: %x\n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
763#endif
764 length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
765 buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
766 i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */
767 frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16;
768
769#if OLYMPIC_DEBUG
770 printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt);
771#endif
772 l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt);
773 if(l_status_buffercnt & 0xC0000000) {
774 if (l_status_buffercnt & 0x3B000000) {
775 if (olympic_priv->olympic_message_level) {
776 if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
777 printk(KERN_WARNING "%s: Rx Frame Truncated\n",dev->name);
778 if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
779 printk(KERN_WARNING "%s: Rx Frame Receive overrun\n",dev->name);
780 if (l_status_buffercnt & (1<<27)) /* No receive buffers */
781 printk(KERN_WARNING "%s: No receive buffers\n",dev->name);
782 if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
783 printk(KERN_WARNING "%s: Receive frame error detect\n",dev->name);
784 if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
785 printk(KERN_WARNING "%s: Received Error Detect\n",dev->name);
786 }
787 olympic_priv->rx_ring_last_received += i ;
788 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
789 dev->stats.rx_errors++;
790 } else {
791
792 if (buffer_cnt == 1) {
793 skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ;
794 } else {
795 skb = dev_alloc_skb(length) ;
796 }
797
798 if (skb == NULL) {
799 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n",dev->name) ;
800 dev->stats.rx_dropped++;
801 /* Update counters even though we don't transfer the frame */
802 olympic_priv->rx_ring_last_received += i ;
803 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
804 } else {
805 /* Optimise based upon number of buffers used.
806 If only one buffer is used we can simply swap the buffers around.
807 If more than one then we must use the new buffer and copy the information
808 first. Ideally all frames would be in a single buffer, this can be tuned by
809 altering the buffer size. If the length of the packet is less than
810 1500 bytes we're going to copy it over anyway to stop packets getting
811 dropped from sockets with buffers smaller than our pkt_buf_sz. */
812
813 if (buffer_cnt==1) {
814 olympic_priv->rx_ring_last_received++ ;
815 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
816 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
817 if (length > 1500) {
818 skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ;
819 /* unmap buffer */
820 pci_unmap_single(olympic_priv->pdev,
821 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
822 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
823 skb_put(skb2,length-4);
824 skb2->protocol = tr_type_trans(skb2,dev);
825 olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer =
826 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data,
827 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
828 olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length =
829 cpu_to_le32(olympic_priv->pkt_buf_sz);
830 olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ;
831 netif_rx(skb2) ;
832 } else {
833 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
834 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
835 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
836 skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
837 skb_put(skb,length - 4),
838 length - 4);
839 pci_dma_sync_single_for_device(olympic_priv->pdev,
840 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
841 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
842 skb->protocol = tr_type_trans(skb,dev) ;
843 netif_rx(skb) ;
844 }
845 } else {
846 do { /* Walk the buffers */
847 olympic_priv->rx_ring_last_received++ ;
848 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
849 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
850 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
851 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
852 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
853 rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
854 cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length));
855 skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
856 skb_put(skb, cpy_length),
857 cpy_length);
858 pci_dma_sync_single_for_device(olympic_priv->pdev,
859 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
860 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
861 } while (--i) ;
862 skb_trim(skb,skb->len-4) ;
863 skb->protocol = tr_type_trans(skb,dev);
864 netif_rx(skb) ;
865 }
866 dev->stats.rx_packets++ ;
867 dev->stats.rx_bytes += length ;
868 } /* if skb == null */
869 } /* If status & 0x3b */
870
871 } else { /*if buffercnt & 0xC */
872 olympic_priv->rx_ring_last_received += i ;
873 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ;
874 }
875
876 rx_status->fragmentcnt_framelen = 0 ;
877 rx_status->status_buffercnt = 0 ;
878 rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]);
879
880 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | buffer_cnt , olympic_mmio+RXENQ);
881 } /* while */
882
883}
884
885static void olympic_freemem(struct net_device *dev)
886{
887 struct olympic_private *olympic_priv=netdev_priv(dev);
888 int i;
889
890 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
891 if (olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] != NULL) {
892 dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
893 olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] = NULL;
894 }
895 if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != cpu_to_le32(0xdeadbeef)) {
896 pci_unmap_single(olympic_priv->pdev,
897 le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
898 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
899 }
900 olympic_priv->rx_status_last_received++;
901 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
902 }
903 /* unmap rings */
904 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr,
905 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
906 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
907 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
908
909 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr,
910 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
911 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr,
912 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
913
914 return ;
915}
916
917static irqreturn_t olympic_interrupt(int irq, void *dev_id)
918{
919 struct net_device *dev= (struct net_device *)dev_id;
920 struct olympic_private *olympic_priv=netdev_priv(dev);
921 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
922 u32 sisr;
923 u8 __iomem *adapter_check_area ;
924
925 /*
926 * Read sisr but don't reset it yet.
927 * The indication bit may have been set but the interrupt latch
928 * bit may not be set, so we'd lose the interrupt later.
929 */
930 sisr=readl(olympic_mmio+SISR) ;
931 if (!(sisr & SISR_MI)) /* Interrupt isn't for us */
932 return IRQ_NONE;
933 sisr=readl(olympic_mmio+SISR_RR) ; /* Read & Reset sisr */
934
935 spin_lock(&olympic_priv->olympic_lock);
936
937 /* Hotswap gives us this on removal */
938 if (sisr == 0xffffffff) {
939 printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ;
940 spin_unlock(&olympic_priv->olympic_lock) ;
941 return IRQ_NONE;
942 }
943
944 if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK |
945 SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF | SISR_ERR)) {
946
947 /* If we ever get this the adapter is seriously dead. Only a reset is going to
948 * bring it back to life. We're talking pci bus errors and such like :( */
949 if((sisr & SISR_ERR) && (readl(olympic_mmio+EISR) & EISR_MASK_OPTIONS)) {
950 printk(KERN_ERR "Olympic: EISR Error, EISR=%08x\n",readl(olympic_mmio+EISR)) ;
951 printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ;
952 printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ;
953 printk(KERN_ERR "or the linux-tr mailing list.\n") ;
954 wake_up_interruptible(&olympic_priv->srb_wait);
955 spin_unlock(&olympic_priv->olympic_lock) ;
956 return IRQ_HANDLED;
957 } /* SISR_ERR */
958
959 if(sisr & SISR_SRB_REPLY) {
960 if(olympic_priv->srb_queued==1) {
961 wake_up_interruptible(&olympic_priv->srb_wait);
962 } else if (olympic_priv->srb_queued==2) {
963 olympic_srb_bh(dev) ;
964 }
965 olympic_priv->srb_queued=0;
966 } /* SISR_SRB_REPLY */
967
968 /* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure
969 we get all tx completions. */
970 if (sisr & SISR_TX1_EOF) {
971 while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) {
972 olympic_priv->tx_ring_last_status++;
973 olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);
974 olympic_priv->free_tx_ring_entries++;
975 dev->stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
976 dev->stats.tx_packets++ ;
977 pci_unmap_single(olympic_priv->pdev,
978 le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer),
979 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);
980 dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]);
981 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=cpu_to_le32(0xdeadbeef);
982 olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0;
983 }
984 netif_wake_queue(dev);
985 } /* SISR_TX1_EOF */
986
987 if (sisr & SISR_RX_STATUS) {
988 olympic_rx(dev);
989 } /* SISR_RX_STATUS */
990
991 if (sisr & SISR_ADAPTER_CHECK) {
992 netif_stop_queue(dev);
993 printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
994 writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA);
995 adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ;
996 printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ;
997 spin_unlock(&olympic_priv->olympic_lock) ;
998 return IRQ_HANDLED;
999 } /* SISR_ADAPTER_CHECK */
1000
1001 if (sisr & SISR_ASB_FREE) {
1002 /* Wake up anything that is waiting for the asb response */
1003 if (olympic_priv->asb_queued) {
1004 olympic_asb_bh(dev) ;
1005 }
1006 } /* SISR_ASB_FREE */
1007
1008 if (sisr & SISR_ARB_CMD) {
1009 olympic_arb_cmd(dev) ;
1010 } /* SISR_ARB_CMD */
1011
1012 if (sisr & SISR_TRB_REPLY) {
1013 /* Wake up anything that is waiting for the trb response */
1014 if (olympic_priv->trb_queued) {
1015 wake_up_interruptible(&olympic_priv->trb_wait);
1016 }
1017 olympic_priv->trb_queued = 0 ;
1018 } /* SISR_TRB_REPLY */
1019
1020 if (sisr & SISR_RX_NOBUF) {
1021 /* According to the documentation, we don't have to do anything, but trapping it keeps it out of
1022 /var/log/messages. */
1023 } /* SISR_RX_NOBUF */
1024 } else {
1025 printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr);
1026 printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ;
1027 } /* One if the interrupts we want */
1028 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
1029
1030 spin_unlock(&olympic_priv->olympic_lock) ;
1031 return IRQ_HANDLED;
1032}
1033
1034static netdev_tx_t olympic_xmit(struct sk_buff *skb,
1035 struct net_device *dev)
1036{
1037 struct olympic_private *olympic_priv=netdev_priv(dev);
1038 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
1039 unsigned long flags ;
1040
1041 spin_lock_irqsave(&olympic_priv->olympic_lock, flags);
1042
1043 netif_stop_queue(dev);
1044
1045 if(olympic_priv->free_tx_ring_entries) {
1046 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].buffer =
1047 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, skb->len,PCI_DMA_TODEVICE));
1048 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].status_length = cpu_to_le32(skb->len | (0x80000000));
1049 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_free]=skb;
1050 olympic_priv->free_tx_ring_entries--;
1051
1052 olympic_priv->tx_ring_free++;
1053 olympic_priv->tx_ring_free &= (OLYMPIC_TX_RING_SIZE-1);
1054 writew((((readw(olympic_mmio+TXENQ_1)) & 0x8000) ^ 0x8000) | 1,olympic_mmio+TXENQ_1);
1055 netif_wake_queue(dev);
1056 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1057 return NETDEV_TX_OK;
1058 } else {
1059 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1060 return NETDEV_TX_BUSY;
1061 }
1062
1063}
1064
1065
1066static int olympic_close(struct net_device *dev)
1067{
1068 struct olympic_private *olympic_priv=netdev_priv(dev);
1069 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*srb;
1070 unsigned long t,flags;
1071
1072 DECLARE_WAITQUEUE(wait,current) ;
1073
1074 netif_stop_queue(dev);
1075
1076 writel(olympic_priv->srb,olympic_mmio+LAPA);
1077 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1078
1079 writeb(SRB_CLOSE_ADAPTER,srb+0);
1080 writeb(0,srb+1);
1081 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1082
1083 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
1084 set_current_state(TASK_INTERRUPTIBLE) ;
1085
1086 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
1087 olympic_priv->srb_queued=1;
1088
1089 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1090 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1091
1092 while(olympic_priv->srb_queued) {
1093
1094 t = schedule_timeout_interruptible(60*HZ);
1095
1096 if(signal_pending(current)) {
1097 printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
1098 printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR));
1099 olympic_priv->srb_queued=0;
1100 break;
1101 }
1102
1103 if (t == 0) {
1104 printk(KERN_WARNING "%s: SRB timed out. May not be fatal.\n",dev->name);
1105 }
1106 olympic_priv->srb_queued=0;
1107 }
1108 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
1109
1110 olympic_priv->rx_status_last_received++;
1111 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
1112
1113 olympic_freemem(dev) ;
1114
1115 /* reset tx/rx fifo's and busmaster logic */
1116
1117 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1118 udelay(1);
1119 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1120
1121#if OLYMPIC_DEBUG
1122 {
1123 int i ;
1124 printk("srb(%p): ",srb);
1125 for(i=0;i<4;i++)
1126 printk("%x ",readb(srb+i));
1127 printk("\n");
1128 }
1129#endif
1130 free_irq(dev->irq,dev);
1131
1132 return 0;
1133
1134}
1135
1136static void olympic_set_rx_mode(struct net_device *dev)
1137{
1138 struct olympic_private *olympic_priv = netdev_priv(dev);
1139 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1140 u8 options = 0;
1141 u8 __iomem *srb;
1142 struct netdev_hw_addr *ha;
1143 unsigned char dev_mc_address[4] ;
1144
1145 writel(olympic_priv->srb,olympic_mmio+LAPA);
1146 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1147 options = olympic_priv->olympic_copy_all_options;
1148
1149 if (dev->flags&IFF_PROMISC)
1150 options |= 0x61 ;
1151 else
1152 options &= ~0x61 ;
1153
1154 /* Only issue the srb if there is a change in options */
1155
1156 if ((options ^ olympic_priv->olympic_copy_all_options)) {
1157
1158 /* Now to issue the srb command to alter the copy.all.options */
1159
1160 writeb(SRB_MODIFY_RECEIVE_OPTIONS,srb);
1161 writeb(0,srb+1);
1162 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1163 writeb(0,srb+3);
1164 writeb(olympic_priv->olympic_receive_options,srb+4);
1165 writeb(options,srb+5);
1166
1167 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1168
1169 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1170
1171 olympic_priv->olympic_copy_all_options = options ;
1172
1173 return ;
1174 }
1175
1176 /* Set the functional addresses we need for multicast */
1177
1178 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1179
1180 netdev_for_each_mc_addr(ha, dev) {
1181 dev_mc_address[0] |= ha->addr[2];
1182 dev_mc_address[1] |= ha->addr[3];
1183 dev_mc_address[2] |= ha->addr[4];
1184 dev_mc_address[3] |= ha->addr[5];
1185 }
1186
1187 writeb(SRB_SET_FUNC_ADDRESS,srb+0);
1188 writeb(0,srb+1);
1189 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1190 writeb(0,srb+3);
1191 writeb(0,srb+4);
1192 writeb(0,srb+5);
1193 writeb(dev_mc_address[0],srb+6);
1194 writeb(dev_mc_address[1],srb+7);
1195 writeb(dev_mc_address[2],srb+8);
1196 writeb(dev_mc_address[3],srb+9);
1197
1198 olympic_priv->srb_queued = 2 ;
1199 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1200
1201}
1202
1203static void olympic_srb_bh(struct net_device *dev)
1204{
1205 struct olympic_private *olympic_priv = netdev_priv(dev);
1206 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1207 u8 __iomem *srb;
1208
1209 writel(olympic_priv->srb,olympic_mmio+LAPA);
1210 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1211
1212 switch (readb(srb)) {
1213
1214 /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
1215 * At some point we should do something if we get an error, such as
1216 * resetting the IFF_PROMISC flag in dev
1217 */
1218
1219 case SRB_MODIFY_RECEIVE_OPTIONS:
1220 switch (readb(srb+2)) {
1221 case 0x01:
1222 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name) ;
1223 break ;
1224 case 0x04:
1225 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1226 break ;
1227 default:
1228 if (olympic_priv->olympic_message_level)
1229 printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",dev->name,olympic_priv->olympic_copy_all_options, olympic_priv->olympic_receive_options) ;
1230 break ;
1231 } /* switch srb[2] */
1232 break ;
1233
1234 /* SRB_SET_GROUP_ADDRESS - Multicast group setting
1235 */
1236
1237 case SRB_SET_GROUP_ADDRESS:
1238 switch (readb(srb+2)) {
1239 case 0x00:
1240 break ;
1241 case 0x01:
1242 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1243 break ;
1244 case 0x04:
1245 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1246 break ;
1247 case 0x3c:
1248 printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n",dev->name) ;
1249 break ;
1250 case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
1251 printk(KERN_WARNING "%s: Group address registers full\n",dev->name) ;
1252 break ;
1253 case 0x55:
1254 printk(KERN_INFO "%s: Group Address already set.\n",dev->name) ;
1255 break ;
1256 default:
1257 break ;
1258 } /* switch srb[2] */
1259 break ;
1260
1261 /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
1262 */
1263
1264 case SRB_RESET_GROUP_ADDRESS:
1265 switch (readb(srb+2)) {
1266 case 0x00:
1267 break ;
1268 case 0x01:
1269 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1270 break ;
1271 case 0x04:
1272 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1273 break ;
1274 case 0x39: /* Must deal with this if individual multicast addresses used */
1275 printk(KERN_INFO "%s: Group address not found\n",dev->name);
1276 break ;
1277 default:
1278 break ;
1279 } /* switch srb[2] */
1280 break ;
1281
1282
1283 /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
1284 */
1285
1286 case SRB_SET_FUNC_ADDRESS:
1287 switch (readb(srb+2)) {
1288 case 0x00:
1289 if (olympic_priv->olympic_message_level)
1290 printk(KERN_INFO "%s: Functional Address Mask Set\n",dev->name);
1291 break ;
1292 case 0x01:
1293 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1294 break ;
1295 case 0x04:
1296 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1297 break ;
1298 default:
1299 break ;
1300 } /* switch srb[2] */
1301 break ;
1302
1303 /* SRB_READ_LOG - Read and reset the adapter error counters
1304 */
1305
1306 case SRB_READ_LOG:
1307 switch (readb(srb+2)) {
1308 case 0x00:
1309 if (olympic_priv->olympic_message_level)
1310 printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
1311 break ;
1312 case 0x01:
1313 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1314 break ;
1315 case 0x04:
1316 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1317 break ;
1318
1319 } /* switch srb[2] */
1320 break ;
1321
1322 /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
1323
1324 case SRB_READ_SR_COUNTERS:
1325 switch (readb(srb+2)) {
1326 case 0x00:
1327 if (olympic_priv->olympic_message_level)
1328 printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
1329 break ;
1330 case 0x01:
1331 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1332 break ;
1333 case 0x04:
1334 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1335 break ;
1336 default:
1337 break ;
1338 } /* switch srb[2] */
1339 break ;
1340
1341 default:
1342 printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n",dev->name);
1343 break ;
1344 } /* switch srb[0] */
1345
1346}
1347
1348static int olympic_set_mac_address (struct net_device *dev, void *addr)
1349{
1350 struct sockaddr *saddr = addr ;
1351 struct olympic_private *olympic_priv = netdev_priv(dev);
1352
1353 if (netif_running(dev)) {
1354 printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
1355 return -EIO ;
1356 }
1357
1358 memcpy(olympic_priv->olympic_laa, saddr->sa_data,dev->addr_len) ;
1359
1360 if (olympic_priv->olympic_message_level) {
1361 printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, olympic_priv->olympic_laa[0],
1362 olympic_priv->olympic_laa[1], olympic_priv->olympic_laa[2],
1363 olympic_priv->olympic_laa[3], olympic_priv->olympic_laa[4],
1364 olympic_priv->olympic_laa[5]);
1365 }
1366
1367 return 0 ;
1368}
1369
1370static void olympic_arb_cmd(struct net_device *dev)
1371{
1372 struct olympic_private *olympic_priv = netdev_priv(dev);
1373 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
1374 u8 __iomem *arb_block, *asb_block, *srb ;
1375 u8 header_len ;
1376 u16 frame_len, buffer_len ;
1377 struct sk_buff *mac_frame ;
1378 u8 __iomem *buf_ptr ;
1379 u8 __iomem *frame_data ;
1380 u16 buff_off ;
1381 u16 lan_status = 0, lan_status_diff ; /* Initialize to stop compiler warning */
1382 u8 fdx_prot_error ;
1383 u16 next_ptr;
1384
1385 arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
1386 asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
1387 srb = (olympic_priv->olympic_lap + olympic_priv->srb) ;
1388
1389 if (readb(arb_block+0) == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
1390
1391 header_len = readb(arb_block+8) ; /* 802.5 Token-Ring Header Length */
1392 frame_len = swab16(readw(arb_block + 10)) ;
1393
1394 buff_off = swab16(readw(arb_block + 6)) ;
1395
1396 buf_ptr = olympic_priv->olympic_lap + buff_off ;
1397
1398#if OLYMPIC_DEBUG
1399{
1400 int i;
1401 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1402
1403 for (i=0 ; i < 14 ; i++) {
1404 printk("Loc %d = %02x\n",i,readb(frame_data + i));
1405 }
1406
1407 printk("next %04x, fs %02x, len %04x\n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1408}
1409#endif
1410 mac_frame = dev_alloc_skb(frame_len) ;
1411 if (!mac_frame) {
1412 printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", dev->name);
1413 goto drop_frame;
1414 }
1415
1416 /* Walk the buffer chain, creating the frame */
1417
1418 do {
1419 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1420 buffer_len = swab16(readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1421 memcpy_fromio(skb_put(mac_frame, buffer_len), frame_data , buffer_len ) ;
1422 next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next));
1423 } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + swab16(next_ptr)));
1424
1425 mac_frame->protocol = tr_type_trans(mac_frame, dev);
1426
1427 if (olympic_priv->olympic_network_monitor) {
1428 struct trh_hdr *mac_hdr;
1429 printk(KERN_WARNING "%s: Received MAC Frame, details:\n",dev->name);
1430 mac_hdr = tr_hdr(mac_frame);
1431 printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n",
1432 dev->name, mac_hdr->daddr);
1433 printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %pM\n",
1434 dev->name, mac_hdr->saddr);
1435 }
1436 netif_rx(mac_frame);
1437
1438drop_frame:
1439 /* Now tell the card we have dealt with the received frame */
1440
1441 /* Set LISR Bit 1 */
1442 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio + LISR_SUM);
1443
1444 /* Is the ASB free ? */
1445
1446 if (readb(asb_block + 2) != 0xff) {
1447 olympic_priv->asb_queued = 1 ;
1448 writel(LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1449 return ;
1450 /* Drop out and wait for the bottom half to be run */
1451 }
1452
1453 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1454 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1455 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1456 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1457
1458 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1459
1460 olympic_priv->asb_queued = 2 ;
1461
1462 return ;
1463
1464 } else if (readb(arb_block) == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
1465 lan_status = swab16(readw(arb_block+6));
1466 fdx_prot_error = readb(arb_block+8) ;
1467
1468 /* Issue ARB Free */
1469 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio+LISR_SUM);
1470
1471 lan_status_diff = olympic_priv->olympic_lan_status ^ lan_status ;
1472
1473 if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) {
1474 if (lan_status_diff & LSC_LWF)
1475 printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
1476 if (lan_status_diff & LSC_ARW)
1477 printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
1478 if (lan_status_diff & LSC_FPE)
1479 printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
1480 if (lan_status_diff & LSC_RR)
1481 printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
1482
1483 /* Adapter has been closed by the hardware */
1484
1485 /* reset tx/rx fifo's and busmaster logic */
1486
1487 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1488 udelay(1);
1489 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1490 netif_stop_queue(dev);
1491 olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
1492 printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
1493 } /* If serious error */
1494
1495 if (olympic_priv->olympic_message_level) {
1496 if (lan_status_diff & LSC_SIG_LOSS)
1497 printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
1498 if (lan_status_diff & LSC_HARD_ERR)
1499 printk(KERN_INFO "%s: Beaconing\n",dev->name);
1500 if (lan_status_diff & LSC_SOFT_ERR)
1501 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
1502 if (lan_status_diff & LSC_TRAN_BCN)
1503 printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
1504 if (lan_status_diff & LSC_SS)
1505 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1506 if (lan_status_diff & LSC_RING_REC)
1507 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
1508 if (lan_status_diff & LSC_FDX_MODE)
1509 printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
1510 }
1511
1512 if (lan_status_diff & LSC_CO) {
1513
1514 if (olympic_priv->olympic_message_level)
1515 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
1516
1517 /* Issue READ.LOG command */
1518
1519 writeb(SRB_READ_LOG, srb);
1520 writeb(0,srb+1);
1521 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1522 writeb(0,srb+3);
1523 writeb(0,srb+4);
1524 writeb(0,srb+5);
1525
1526 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1527
1528 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1529
1530 }
1531
1532 if (lan_status_diff & LSC_SR_CO) {
1533
1534 if (olympic_priv->olympic_message_level)
1535 printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
1536
1537 /* Issue a READ.SR.COUNTERS */
1538
1539 writeb(SRB_READ_SR_COUNTERS,srb);
1540 writeb(0,srb+1);
1541 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1542 writeb(0,srb+3);
1543
1544 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1545
1546 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1547
1548 }
1549
1550 olympic_priv->olympic_lan_status = lan_status ;
1551
1552 } /* Lan.change.status */
1553 else
1554 printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
1555}
1556
1557static void olympic_asb_bh(struct net_device *dev)
1558{
1559 struct olympic_private *olympic_priv = netdev_priv(dev);
1560 u8 __iomem *arb_block, *asb_block ;
1561
1562 arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
1563 asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
1564
1565 if (olympic_priv->asb_queued == 1) { /* Dropped through the first time */
1566
1567 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1568 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1569 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1570 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1571
1572 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1573 olympic_priv->asb_queued = 2 ;
1574
1575 return ;
1576 }
1577
1578 if (olympic_priv->asb_queued == 2) {
1579 switch (readb(asb_block+2)) {
1580 case 0x01:
1581 printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
1582 break ;
1583 case 0x26:
1584 printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
1585 break ;
1586 case 0xFF:
1587 /* Valid response, everything should be ok again */
1588 break ;
1589 default:
1590 printk(KERN_WARNING "%s: Invalid return code in asb\n",dev->name);
1591 break ;
1592 }
1593 }
1594 olympic_priv->asb_queued = 0 ;
1595}
1596
1597static int olympic_change_mtu(struct net_device *dev, int mtu)
1598{
1599 struct olympic_private *olympic_priv = netdev_priv(dev);
1600 u16 max_mtu ;
1601
1602 if (olympic_priv->olympic_ring_speed == 4)
1603 max_mtu = 4500 ;
1604 else
1605 max_mtu = 18000 ;
1606
1607 if (mtu > max_mtu)
1608 return -EINVAL ;
1609 if (mtu < 100)
1610 return -EINVAL ;
1611
1612 dev->mtu = mtu ;
1613 olympic_priv->pkt_buf_sz = mtu + TR_HLEN ;
1614
1615 return 0 ;
1616}
1617
1618static int olympic_proc_show(struct seq_file *m, void *v)
1619{
1620 struct net_device *dev = m->private;
1621 struct olympic_private *olympic_priv=netdev_priv(dev);
1622 u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
1623 u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
1624 u8 addr[6];
1625 u8 addr2[6];
1626 int i;
1627
1628 seq_printf(m,
1629 "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name);
1630 seq_printf(m, "\n%6s: Adapter Address : Node Address : Functional Addr\n",
1631 dev->name);
1632
1633 for (i = 0 ; i < 6 ; i++)
1634 addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr) + i);
1635
1636 seq_printf(m, "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n",
1637 dev->name,
1638 dev->dev_addr, addr,
1639 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
1640 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
1641 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
1642 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
1643
1644 seq_printf(m, "\n%6s: Token Ring Parameters Table:\n", dev->name);
1645
1646 seq_printf(m, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n",
1647 dev->name) ;
1648
1649 for (i = 0 ; i < 6 ; i++)
1650 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr) + i);
1651 for (i = 0 ; i < 6 ; i++)
1652 addr2[i] = readb(opt+offsetof(struct olympic_parameters_table, poll_addr) + i);
1653
1654 seq_printf(m, "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n",
1655 dev->name,
1656 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)),
1657 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1),
1658 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2),
1659 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3),
1660 addr, addr2,
1661 swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))),
1662 swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))),
1663 swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code))));
1664
1665 seq_printf(m, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n",
1666 dev->name) ;
1667
1668 for (i = 0 ; i < 6 ; i++)
1669 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, source_addr) + i);
1670 seq_printf(m, "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n",
1671 dev->name, addr,
1672 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))),
1673 swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))),
1674 swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))),
1675 swab16(readw(opt+offsetof(struct olympic_parameters_table, local_ring))),
1676 swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))),
1677 swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl))));
1678
1679 seq_printf(m, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
1680 dev->name) ;
1681
1682 for (i = 0 ; i < 6 ; i++)
1683 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, beacon_naun) + i);
1684 seq_printf(m, "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n",
1685 dev->name,
1686 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))),
1687 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))),
1688 addr,
1689 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)),
1690 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1),
1691 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2),
1692 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3));
1693
1694 return 0;
1695}
1696
1697static int olympic_proc_open(struct inode *inode, struct file *file)
1698{
1699 return single_open(file, olympic_proc_show, PDE(inode)->data);
1700}
1701
1702static const struct file_operations olympic_proc_ops = {
1703 .open = olympic_proc_open,
1704 .read = seq_read,
1705 .llseek = seq_lseek,
1706 .release = single_release,
1707};
1708
1709static void __devexit olympic_remove_one(struct pci_dev *pdev)
1710{
1711 struct net_device *dev = pci_get_drvdata(pdev) ;
1712 struct olympic_private *olympic_priv=netdev_priv(dev);
1713
1714 if (olympic_priv->olympic_network_monitor) {
1715 char proc_name[20] ;
1716 strcpy(proc_name,"olympic_") ;
1717 strcat(proc_name,dev->name) ;
1718 remove_proc_entry(proc_name,init_net.proc_net);
1719 }
1720 unregister_netdev(dev) ;
1721 iounmap(olympic_priv->olympic_mmio) ;
1722 iounmap(olympic_priv->olympic_lap) ;
1723 pci_release_regions(pdev) ;
1724 pci_set_drvdata(pdev,NULL) ;
1725 free_netdev(dev) ;
1726}
1727
1728static struct pci_driver olympic_driver = {
1729 .name = "olympic",
1730 .id_table = olympic_pci_tbl,
1731 .probe = olympic_probe,
1732 .remove = __devexit_p(olympic_remove_one),
1733};
1734
1735static int __init olympic_pci_init(void)
1736{
1737 return pci_register_driver(&olympic_driver) ;
1738}
1739
1740static void __exit olympic_pci_cleanup(void)
1741{
1742 pci_unregister_driver(&olympic_driver) ;
1743}
1744
1745
1746module_init(olympic_pci_init) ;
1747module_exit(olympic_pci_cleanup) ;
1748
1749MODULE_LICENSE("GPL");
diff --git a/drivers/net/tokenring/olympic.h b/drivers/net/tokenring/olympic.h
deleted file mode 100644
index 30631bae4c94..000000000000
--- a/drivers/net/tokenring/olympic.h
+++ /dev/null
@@ -1,321 +0,0 @@
1/*
2 * olympic.h (c) 1999 Peter De Schrijver All Rights Reserved
3 * 1999,2000 Mike Phillips (mikep@linuxtr.net)
4 *
5 * Linux driver for IBM PCI tokenring cards based on the olympic and the PIT/PHY chipset.
6 *
7 * Base Driver Skeleton:
8 * Written 1993-94 by Donald Becker.
9 *
10 * Copyright 1993 United States Government as represented by the
11 * Director, National Security Agency.
12 *
13 * This software may be used and distributed according to the terms
14 * of the GNU General Public License, incorporated herein by reference.
15 */
16
17#define CID 0x4e
18
19#define BCTL 0x70
20#define BCTL_SOFTRESET (1<<15)
21#define BCTL_MIMREB (1<<6)
22#define BCTL_MODE_INDICATOR (1<<5)
23
24#define GPR 0x4a
25#define GPR_OPTI_BF (1<<6)
26#define GPR_NEPTUNE_BF (1<<4)
27#define GPR_AUTOSENSE (1<<2)
28#define GPR_16MBPS (1<<3)
29
30#define PAG 0x85
31#define LBC 0x8e
32
33#define LISR 0x10
34#define LISR_SUM 0x14
35#define LISR_RWM 0x18
36
37#define LISR_LIE (1<<15)
38#define LISR_SLIM (1<<13)
39#define LISR_SLI (1<<12)
40#define LISR_PCMSRMASK (1<<11)
41#define LISR_PCMSRINT (1<<10)
42#define LISR_WOLMASK (1<<9)
43#define LISR_WOL (1<<8)
44#define LISR_SRB_CMD (1<<5)
45#define LISR_ASB_REPLY (1<<4)
46#define LISR_ASB_FREE_REQ (1<<2)
47#define LISR_ARB_FREE (1<<1)
48#define LISR_TRB_FRAME (1<<0)
49
50#define SISR 0x20
51#define SISR_SUM 0x24
52#define SISR_RWM 0x28
53#define SISR_RR 0x2C
54#define SISR_RESMASK 0x30
55#define SISR_MASK 0x54
56#define SISR_MASK_SUM 0x58
57#define SISR_MASK_RWM 0x5C
58
59#define SISR_TX2_IDLE (1<<31)
60#define SISR_TX2_HALT (1<<29)
61#define SISR_TX2_EOF (1<<28)
62#define SISR_TX1_IDLE (1<<27)
63#define SISR_TX1_HALT (1<<25)
64#define SISR_TX1_EOF (1<<24)
65#define SISR_TIMEOUT (1<<23)
66#define SISR_RX_NOBUF (1<<22)
67#define SISR_RX_STATUS (1<<21)
68#define SISR_RX_HALT (1<<18)
69#define SISR_RX_EOF_EARLY (1<<16)
70#define SISR_MI (1<<15)
71#define SISR_PI (1<<13)
72#define SISR_ERR (1<<9)
73#define SISR_ADAPTER_CHECK (1<<6)
74#define SISR_SRB_REPLY (1<<5)
75#define SISR_ASB_FREE (1<<4)
76#define SISR_ARB_CMD (1<<3)
77#define SISR_TRB_REPLY (1<<2)
78
79#define EISR 0x34
80#define EISR_RWM 0x38
81#define EISR_MASK 0x3c
82#define EISR_MASK_OPTIONS 0x001FFF7F
83
84#define LAPA 0x60
85#define LAPWWO 0x64
86#define LAPWWC 0x68
87#define LAPCTL 0x6C
88#define LAIPD 0x78
89#define LAIPDDINC 0x7C
90
91#define TIMER 0x50
92
93#define CLKCTL 0x74
94#define CLKCTL_PAUSE (1<<15)
95
96#define PM_CON 0x4
97
98#define BMCTL_SUM 0x40
99#define BMCTL_RWM 0x44
100#define BMCTL_TX2_DIS (1<<30)
101#define BMCTL_TX1_DIS (1<<26)
102#define BMCTL_RX_DIS (1<<22)
103
104#define BMASR 0xcc
105
106#define RXDESCQ 0x90
107#define RXDESCQCNT 0x94
108#define RXCDA 0x98
109#define RXENQ 0x9C
110#define RXSTATQ 0xA0
111#define RXSTATQCNT 0xA4
112#define RXCSA 0xA8
113#define RXCLEN 0xAC
114#define RXHLEN 0xAE
115
116#define TXDESCQ_1 0xb0
117#define TXDESCQ_2 0xd0
118#define TXDESCQCNT_1 0xb4
119#define TXDESCQCNT_2 0xd4
120#define TXCDA_1 0xb8
121#define TXCDA_2 0xd8
122#define TXENQ_1 0xbc
123#define TXENQ_2 0xdc
124#define TXSTATQ_1 0xc0
125#define TXSTATQ_2 0xe0
126#define TXSTATQCNT_1 0xc4
127#define TXSTATQCNT_2 0xe4
128#define TXCSA_1 0xc8
129#define TXCSA_2 0xe8
130/* Cardbus */
131#define FERMASK 0xf4
132#define FERMASK_INT_BIT (1<<15)
133
134#define OLYMPIC_IO_SPACE 256
135
136#define SRB_COMMAND_SIZE 50
137
138#define OLYMPIC_MAX_ADAPTERS 8 /* 0x08 __MODULE_STRING can't hand 0xnn */
139
140/* Defines for LAN STATUS CHANGE reports */
141#define LSC_SIG_LOSS 0x8000
142#define LSC_HARD_ERR 0x4000
143#define LSC_SOFT_ERR 0x2000
144#define LSC_TRAN_BCN 0x1000
145#define LSC_LWF 0x0800
146#define LSC_ARW 0x0400
147#define LSC_FPE 0x0200
148#define LSC_RR 0x0100
149#define LSC_CO 0x0080
150#define LSC_SS 0x0040
151#define LSC_RING_REC 0x0020
152#define LSC_SR_CO 0x0010
153#define LSC_FDX_MODE 0x0004
154
155/* Defines for OPEN ADAPTER command */
156
157#define OPEN_ADAPTER_EXT_WRAP (1<<15)
158#define OPEN_ADAPTER_DIS_HARDEE (1<<14)
159#define OPEN_ADAPTER_DIS_SOFTERR (1<<13)
160#define OPEN_ADAPTER_PASS_ADC_MAC (1<<12)
161#define OPEN_ADAPTER_PASS_ATT_MAC (1<<11)
162#define OPEN_ADAPTER_ENABLE_EC (1<<10)
163#define OPEN_ADAPTER_CONTENDER (1<<8)
164#define OPEN_ADAPTER_PASS_BEACON (1<<7)
165#define OPEN_ADAPTER_ENABLE_FDX (1<<6)
166#define OPEN_ADAPTER_ENABLE_RPL (1<<5)
167#define OPEN_ADAPTER_INHIBIT_ETR (1<<4)
168#define OPEN_ADAPTER_INTERNAL_WRAP (1<<3)
169#define OPEN_ADAPTER_USE_OPTS2 (1<<0)
170
171#define OPEN_ADAPTER_2_ENABLE_ONNOW (1<<15)
172
173/* Defines for SRB Commands */
174
175#define SRB_ACCESS_REGISTER 0x1f
176#define SRB_CLOSE_ADAPTER 0x04
177#define SRB_CONFIGURE_BRIDGE 0x0c
178#define SRB_CONFIGURE_WAKEUP_EVENT 0x1a
179#define SRB_MODIFY_BRIDGE_PARMS 0x15
180#define SRB_MODIFY_OPEN_OPTIONS 0x01
181#define SRB_MODIFY_RECEIVE_OPTIONS 0x17
182#define SRB_NO_OPERATION 0x00
183#define SRB_OPEN_ADAPTER 0x03
184#define SRB_READ_LOG 0x08
185#define SRB_READ_SR_COUNTERS 0x16
186#define SRB_RESET_GROUP_ADDRESS 0x02
187#define SRB_SAVE_CONFIGURATION 0x1b
188#define SRB_SET_BRIDGE_PARMS 0x09
189#define SRB_SET_BRIDGE_TARGETS 0x10
190#define SRB_SET_FUNC_ADDRESS 0x07
191#define SRB_SET_GROUP_ADDRESS 0x06
192#define SRB_SET_GROUP_ADDR_OPTIONS 0x11
193#define SRB_UPDATE_WAKEUP_PATTERN 0x19
194
195/* Clear return code */
196
197#define OLYMPIC_CLEAR_RET_CODE 0xfe
198
199/* ARB Commands */
200#define ARB_RECEIVE_DATA 0x81
201#define ARB_LAN_CHANGE_STATUS 0x84
202/* ASB Response commands */
203
204#define ASB_RECEIVE_DATA 0x81
205
206
207/* Olympic defaults for buffers */
208
209#define OLYMPIC_RX_RING_SIZE 16 /* should be a power of 2 */
210#define OLYMPIC_TX_RING_SIZE 8 /* should be a power of 2 */
211
212#define PKT_BUF_SZ 4096 /* Default packet size */
213
214/* Olympic data structures */
215
216/* xxxx These structures are all little endian in hardware. */
217
218struct olympic_tx_desc {
219 __le32 buffer;
220 __le32 status_length;
221};
222
223struct olympic_tx_status {
224 __le32 status;
225};
226
227struct olympic_rx_desc {
228 __le32 buffer;
229 __le32 res_length;
230};
231
232struct olympic_rx_status {
233 __le32 fragmentcnt_framelen;
234 __le32 status_buffercnt;
235};
236/* xxxx END These structures are all little endian in hardware. */
237/* xxxx There may be more, but I'm pretty sure about these */
238
239struct mac_receive_buffer {
240 __le16 next ;
241 u8 padding ;
242 u8 frame_status ;
243 __le16 buffer_length ;
244 u8 frame_data ;
245};
246
247struct olympic_private {
248
249 u16 srb; /* be16 */
250 u16 trb; /* be16 */
251 u16 arb; /* be16 */
252 u16 asb; /* be16 */
253
254 u8 __iomem *olympic_mmio;
255 u8 __iomem *olympic_lap;
256 struct pci_dev *pdev ;
257 const char *olympic_card_name;
258
259 spinlock_t olympic_lock ;
260
261 volatile int srb_queued; /* True if an SRB is still posted */
262 wait_queue_head_t srb_wait;
263
264 volatile int asb_queued; /* True if an ASB is posted */
265
266 volatile int trb_queued; /* True if a TRB is posted */
267 wait_queue_head_t trb_wait ;
268
269 /* These must be on a 4 byte boundary. */
270 struct olympic_rx_desc olympic_rx_ring[OLYMPIC_RX_RING_SIZE];
271 struct olympic_tx_desc olympic_tx_ring[OLYMPIC_TX_RING_SIZE];
272 struct olympic_rx_status olympic_rx_status_ring[OLYMPIC_RX_RING_SIZE];
273 struct olympic_tx_status olympic_tx_status_ring[OLYMPIC_TX_RING_SIZE];
274
275 struct sk_buff *tx_ring_skb[OLYMPIC_TX_RING_SIZE], *rx_ring_skb[OLYMPIC_RX_RING_SIZE];
276 int tx_ring_free, tx_ring_last_status, rx_ring_last_received,rx_status_last_received, free_tx_ring_entries;
277
278 u16 olympic_lan_status ;
279 u8 olympic_ring_speed ;
280 u16 pkt_buf_sz ;
281 u8 olympic_receive_options, olympic_copy_all_options,olympic_message_level, olympic_network_monitor;
282 u16 olympic_addr_table_addr, olympic_parms_addr ;
283 u8 olympic_laa[6] ;
284 u32 rx_ring_dma_addr;
285 u32 rx_status_ring_dma_addr;
286 u32 tx_ring_dma_addr;
287 u32 tx_status_ring_dma_addr;
288};
289
290struct olympic_adapter_addr_table {
291
292 u8 node_addr[6] ;
293 u8 reserved[4] ;
294 u8 func_addr[4] ;
295} ;
296
297struct olympic_parameters_table {
298
299 u8 phys_addr[4] ;
300 u8 up_node_addr[6] ;
301 u8 up_phys_addr[4] ;
302 u8 poll_addr[6] ;
303 u16 reserved ;
304 u16 acc_priority ;
305 u16 auth_source_class ;
306 u16 att_code ;
307 u8 source_addr[6] ;
308 u16 beacon_type ;
309 u16 major_vector ;
310 u16 lan_status ;
311 u16 soft_error_time ;
312 u16 reserved1 ;
313 u16 local_ring ;
314 u16 mon_error ;
315 u16 beacon_transmit ;
316 u16 beacon_receive ;
317 u16 frame_correl ;
318 u8 beacon_naun[6] ;
319 u32 reserved2 ;
320 u8 beacon_phys[4] ;
321};
diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
deleted file mode 100644
index 62d90e40f9ec..000000000000
--- a/drivers/net/tokenring/proteon.c
+++ /dev/null
@@ -1,422 +0,0 @@
1/*
2 * proteon.c: A network driver for Proteon ISA token ring cards.
3 *
4 * Based on tmspci written 1999 by Adam Fritzler
5 *
6 * Written 2003 by Jochen Friedrich
7 *
8 * This software may be used and distributed according to the terms
9 * of the GNU General Public License, incorporated herein by reference.
10 *
11 * This driver module supports the following cards:
12 * - Proteon 1392, 1392+
13 *
14 * Maintainer(s):
15 * AF Adam Fritzler
16 * JF Jochen Friedrich jochen@scram.de
17 *
18 * Modification History:
19 * 02-Jan-03 JF Created
20 *
21 */
22static const char version[] = "proteon.c: v1.00 02/01/2003 by Jochen Friedrich\n";
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/delay.h>
27#include <linux/errno.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/trdevice.h>
32#include <linux/platform_device.h>
33
34#include <asm/io.h>
35#include <asm/irq.h>
36#include <asm/pci.h>
37#include <asm/dma.h>
38
39#include "tms380tr.h"
40
41#define PROTEON_IO_EXTENT 32
42
43/* A zero-terminated list of I/O addresses to be probed. */
44static unsigned int portlist[] __initdata = {
45 0x0A20, 0x0E20, 0x1A20, 0x1E20, 0x2A20, 0x2E20, 0x3A20, 0x3E20,// Prot.
46 0x4A20, 0x4E20, 0x5A20, 0x5E20, 0x6A20, 0x6E20, 0x7A20, 0x7E20,// Prot.
47 0x8A20, 0x8E20, 0x9A20, 0x9E20, 0xAA20, 0xAE20, 0xBA20, 0xBE20,// Prot.
48 0xCA20, 0xCE20, 0xDA20, 0xDE20, 0xEA20, 0xEE20, 0xFA20, 0xFE20,// Prot.
49 0
50};
51
52/* A zero-terminated list of IRQs to be probed. */
53static unsigned short irqlist[] = {
54 7, 6, 5, 4, 3, 12, 11, 10, 9,
55 0
56};
57
58/* A zero-terminated list of DMAs to be probed. */
59static int dmalist[] __initdata = {
60 5, 6, 7,
61 0
62};
63
64static char cardname[] = "Proteon 1392\0";
65static u64 dma_mask = ISA_MAX_ADDRESS;
66static int proteon_open(struct net_device *dev);
67static void proteon_read_eeprom(struct net_device *dev);
68static unsigned short proteon_setnselout_pins(struct net_device *dev);
69
70static unsigned short proteon_sifreadb(struct net_device *dev, unsigned short reg)
71{
72 return inb(dev->base_addr + reg);
73}
74
75static unsigned short proteon_sifreadw(struct net_device *dev, unsigned short reg)
76{
77 return inw(dev->base_addr + reg);
78}
79
80static void proteon_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
81{
82 outb(val, dev->base_addr + reg);
83}
84
85static void proteon_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
86{
87 outw(val, dev->base_addr + reg);
88}
89
90static int __init proteon_probe1(struct net_device *dev, int ioaddr)
91{
92 unsigned char chk1, chk2;
93 int i;
94
95 if (!request_region(ioaddr, PROTEON_IO_EXTENT, cardname))
96 return -ENODEV;
97
98
99 chk1 = inb(ioaddr + 0x1f); /* Get Proteon ID reg 1 */
100 if (chk1 != 0x1f)
101 goto nodev;
102
103 chk1 = inb(ioaddr + 0x1e) & 0x07; /* Get Proteon ID reg 0 */
104 for (i=0; i<16; i++) {
105 chk2 = inb(ioaddr + 0x1e) & 0x07;
106 if (((chk1 + 1) & 0x07) != chk2)
107 goto nodev;
108 chk1 = chk2;
109 }
110
111 dev->base_addr = ioaddr;
112 return 0;
113nodev:
114 release_region(ioaddr, PROTEON_IO_EXTENT);
115 return -ENODEV;
116}
117
118static struct net_device_ops proteon_netdev_ops __read_mostly;
119
120static int __init setup_card(struct net_device *dev, struct device *pdev)
121{
122 struct net_local *tp;
123 static int versionprinted;
124 const unsigned *port;
125 int j,err = 0;
126
127 if (!dev)
128 return -ENOMEM;
129
130 if (dev->base_addr) /* probe specific location */
131 err = proteon_probe1(dev, dev->base_addr);
132 else {
133 for (port = portlist; *port; port++) {
134 err = proteon_probe1(dev, *port);
135 if (!err)
136 break;
137 }
138 }
139 if (err)
140 goto out5;
141
142 /* At this point we have found a valid card. */
143
144 if (versionprinted++ == 0)
145 printk(KERN_DEBUG "%s", version);
146
147 err = -EIO;
148 pdev->dma_mask = &dma_mask;
149 if (tmsdev_init(dev, pdev))
150 goto out4;
151
152 dev->base_addr &= ~3;
153
154 proteon_read_eeprom(dev);
155
156 printk(KERN_DEBUG "proteon.c: Ring Station Address: %pM\n",
157 dev->dev_addr);
158
159 tp = netdev_priv(dev);
160 tp->setnselout = proteon_setnselout_pins;
161
162 tp->sifreadb = proteon_sifreadb;
163 tp->sifreadw = proteon_sifreadw;
164 tp->sifwriteb = proteon_sifwriteb;
165 tp->sifwritew = proteon_sifwritew;
166
167 memcpy(tp->ProductID, cardname, PROD_ID_SIZE + 1);
168
169 tp->tmspriv = NULL;
170
171 dev->netdev_ops = &proteon_netdev_ops;
172
173 if (dev->irq == 0)
174 {
175 for(j = 0; irqlist[j] != 0; j++)
176 {
177 dev->irq = irqlist[j];
178 if (!request_irq(dev->irq, tms380tr_interrupt, 0,
179 cardname, dev))
180 break;
181 }
182
183 if(irqlist[j] == 0)
184 {
185 printk(KERN_INFO "proteon.c: AutoSelect no IRQ available\n");
186 goto out3;
187 }
188 }
189 else
190 {
191 for(j = 0; irqlist[j] != 0; j++)
192 if (irqlist[j] == dev->irq)
193 break;
194 if (irqlist[j] == 0)
195 {
196 printk(KERN_INFO "proteon.c: Illegal IRQ %d specified\n",
197 dev->irq);
198 goto out3;
199 }
200 if (request_irq(dev->irq, tms380tr_interrupt, 0,
201 cardname, dev))
202 {
203 printk(KERN_INFO "proteon.c: Selected IRQ %d not available\n",
204 dev->irq);
205 goto out3;
206 }
207 }
208
209 if (dev->dma == 0)
210 {
211 for(j = 0; dmalist[j] != 0; j++)
212 {
213 dev->dma = dmalist[j];
214 if (!request_dma(dev->dma, cardname))
215 break;
216 }
217
218 if(dmalist[j] == 0)
219 {
220 printk(KERN_INFO "proteon.c: AutoSelect no DMA available\n");
221 goto out2;
222 }
223 }
224 else
225 {
226 for(j = 0; dmalist[j] != 0; j++)
227 if (dmalist[j] == dev->dma)
228 break;
229 if (dmalist[j] == 0)
230 {
231 printk(KERN_INFO "proteon.c: Illegal DMA %d specified\n",
232 dev->dma);
233 goto out2;
234 }
235 if (request_dma(dev->dma, cardname))
236 {
237 printk(KERN_INFO "proteon.c: Selected DMA %d not available\n",
238 dev->dma);
239 goto out2;
240 }
241 }
242
243 err = register_netdev(dev);
244 if (err)
245 goto out;
246
247 printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
248 dev->name, dev->base_addr, dev->irq, dev->dma);
249
250 return 0;
251out:
252 free_dma(dev->dma);
253out2:
254 free_irq(dev->irq, dev);
255out3:
256 tmsdev_term(dev);
257out4:
258 release_region(dev->base_addr, PROTEON_IO_EXTENT);
259out5:
260 return err;
261}
262
263/*
264 * Reads MAC address from adapter RAM, which should've read it from
265 * the onboard ROM.
266 *
267 * Calling this on a board that does not support it can be a very
268 * dangerous thing. The Madge board, for instance, will lock your
269 * machine hard when this is called. Luckily, its supported in a
270 * separate driver. --ASF
271 */
272static void proteon_read_eeprom(struct net_device *dev)
273{
274 int i;
275
276 /* Address: 0000:0000 */
277 proteon_sifwritew(dev, 0, SIFADX);
278 proteon_sifwritew(dev, 0, SIFADR);
279
280 /* Read six byte MAC address data */
281 dev->addr_len = 6;
282 for(i = 0; i < 6; i++)
283 dev->dev_addr[i] = proteon_sifreadw(dev, SIFINC) >> 8;
284}
285
286static unsigned short proteon_setnselout_pins(struct net_device *dev)
287{
288 return 0;
289}
290
291static int proteon_open(struct net_device *dev)
292{
293 struct net_local *tp = netdev_priv(dev);
294 unsigned short val = 0;
295 int i;
296
297 /* Proteon reset sequence */
298 outb(0, dev->base_addr + 0x11);
299 mdelay(20);
300 outb(0x04, dev->base_addr + 0x11);
301 mdelay(20);
302 outb(0, dev->base_addr + 0x11);
303 mdelay(100);
304
305 /* set control/status reg */
306 val = inb(dev->base_addr + 0x11);
307 val |= 0x78;
308 val &= 0xf9;
309 if(tp->DataRate == SPEED_4)
310 val |= 0x20;
311 else
312 val &= ~0x20;
313
314 outb(val, dev->base_addr + 0x11);
315 outb(0xff, dev->base_addr + 0x12);
316 for(i = 0; irqlist[i] != 0; i++)
317 {
318 if(irqlist[i] == dev->irq)
319 break;
320 }
321 val = i;
322 i = (7 - dev->dma) << 4;
323 val |= i;
324 outb(val, dev->base_addr + 0x13);
325
326 return tms380tr_open(dev);
327}
328
329#define ISATR_MAX_ADAPTERS 3
330
331static int io[ISATR_MAX_ADAPTERS];
332static int irq[ISATR_MAX_ADAPTERS];
333static int dma[ISATR_MAX_ADAPTERS];
334
335MODULE_LICENSE("GPL");
336
337module_param_array(io, int, NULL, 0);
338module_param_array(irq, int, NULL, 0);
339module_param_array(dma, int, NULL, 0);
340
341static struct platform_device *proteon_dev[ISATR_MAX_ADAPTERS];
342
343static struct platform_driver proteon_driver = {
344 .driver = {
345 .name = "proteon",
346 },
347};
348
349static int __init proteon_init(void)
350{
351 struct net_device *dev;
352 struct platform_device *pdev;
353 int i, num = 0, err = 0;
354
355 proteon_netdev_ops = tms380tr_netdev_ops;
356 proteon_netdev_ops.ndo_open = proteon_open;
357 proteon_netdev_ops.ndo_stop = tms380tr_close;
358
359 err = platform_driver_register(&proteon_driver);
360 if (err)
361 return err;
362
363 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
364 dev = alloc_trdev(sizeof(struct net_local));
365 if (!dev)
366 continue;
367
368 dev->base_addr = io[i];
369 dev->irq = irq[i];
370 dev->dma = dma[i];
371 pdev = platform_device_register_simple("proteon",
372 i, NULL, 0);
373 if (IS_ERR(pdev)) {
374 free_netdev(dev);
375 continue;
376 }
377 err = setup_card(dev, &pdev->dev);
378 if (!err) {
379 proteon_dev[i] = pdev;
380 platform_set_drvdata(pdev, dev);
381 ++num;
382 } else {
383 platform_device_unregister(pdev);
384 free_netdev(dev);
385 }
386 }
387
388 printk(KERN_NOTICE "proteon.c: %d cards found.\n", num);
389 /* Probe for cards. */
390 if (num == 0) {
391 printk(KERN_NOTICE "proteon.c: No cards found.\n");
392 platform_driver_unregister(&proteon_driver);
393 return -ENODEV;
394 }
395 return 0;
396}
397
398static void __exit proteon_cleanup(void)
399{
400 struct net_device *dev;
401 int i;
402
403 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
404 struct platform_device *pdev = proteon_dev[i];
405
406 if (!pdev)
407 continue;
408 dev = platform_get_drvdata(pdev);
409 unregister_netdev(dev);
410 release_region(dev->base_addr, PROTEON_IO_EXTENT);
411 free_irq(dev->irq, dev);
412 free_dma(dev->dma);
413 tmsdev_term(dev);
414 free_netdev(dev);
415 platform_set_drvdata(pdev, NULL);
416 platform_device_unregister(pdev);
417 }
418 platform_driver_unregister(&proteon_driver);
419}
420
421module_init(proteon_init);
422module_exit(proteon_cleanup);
diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
deleted file mode 100644
index ee11e93dc30e..000000000000
--- a/drivers/net/tokenring/skisa.c
+++ /dev/null
@@ -1,432 +0,0 @@
1/*
2 * skisa.c: A network driver for SK-NET TMS380-based ISA token ring cards.
3 *
4 * Based on tmspci written 1999 by Adam Fritzler
5 *
6 * Written 2000 by Jochen Friedrich
7 * Dedicated to my girlfriend Steffi Bopp
8 *
9 * This software may be used and distributed according to the terms
10 * of the GNU General Public License, incorporated herein by reference.
11 *
12 * This driver module supports the following cards:
13 * - SysKonnect TR4/16(+) ISA (SK-4190)
14 *
15 * Maintainer(s):
16 * AF Adam Fritzler
17 * JF Jochen Friedrich jochen@scram.de
18 *
19 * Modification History:
20 * 14-Jan-01 JF Created
21 * 28-Oct-02 JF Fixed probe of card for static compilation.
22 * Fixed module init to not make hotplug go wild.
23 * 09-Nov-02 JF Fixed early bail out on out of memory
24 * situations if multiple cards are found.
25 * Cleaned up some unnecessary console SPAM.
26 * 09-Dec-02 JF Fixed module reference counting.
27 * 02-Jan-03 JF Renamed to skisa.c
28 *
29 */
30static const char version[] = "skisa.c: v1.03 09/12/2002 by Jochen Friedrich\n";
31
32#include <linux/module.h>
33#include <linux/kernel.h>
34#include <linux/errno.h>
35#include <linux/pci.h>
36#include <linux/init.h>
37#include <linux/netdevice.h>
38#include <linux/trdevice.h>
39#include <linux/platform_device.h>
40
41#include <asm/io.h>
42#include <asm/irq.h>
43#include <asm/pci.h>
44#include <asm/dma.h>
45
46#include "tms380tr.h"
47
48#define SK_ISA_IO_EXTENT 32
49
50/* A zero-terminated list of I/O addresses to be probed. */
51static unsigned int portlist[] __initdata = {
52 0x0A20, 0x1A20, 0x0B20, 0x1B20, 0x0980, 0x1980, 0x0900, 0x1900,// SK
53 0
54};
55
56/* A zero-terminated list of IRQs to be probed.
57 * Used again after initial probe for sktr_chipset_init, called from sktr_open.
58 */
59static const unsigned short irqlist[] = {
60 3, 5, 9, 10, 11, 12, 15,
61 0
62};
63
64/* A zero-terminated list of DMAs to be probed. */
65static int dmalist[] __initdata = {
66 5, 6, 7,
67 0
68};
69
70static char isa_cardname[] = "SK NET TR 4/16 ISA\0";
71static u64 dma_mask = ISA_MAX_ADDRESS;
72static int sk_isa_open(struct net_device *dev);
73static void sk_isa_read_eeprom(struct net_device *dev);
74static unsigned short sk_isa_setnselout_pins(struct net_device *dev);
75
76static unsigned short sk_isa_sifreadb(struct net_device *dev, unsigned short reg)
77{
78 return inb(dev->base_addr + reg);
79}
80
81static unsigned short sk_isa_sifreadw(struct net_device *dev, unsigned short reg)
82{
83 return inw(dev->base_addr + reg);
84}
85
86static void sk_isa_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
87{
88 outb(val, dev->base_addr + reg);
89}
90
91static void sk_isa_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
92{
93 outw(val, dev->base_addr + reg);
94}
95
96
97static int __init sk_isa_probe1(struct net_device *dev, int ioaddr)
98{
99 unsigned char old, chk1, chk2;
100
101 if (!request_region(ioaddr, SK_ISA_IO_EXTENT, isa_cardname))
102 return -ENODEV;
103
104 old = inb(ioaddr + SIFADR); /* Get the old SIFADR value */
105
106 chk1 = 0; /* Begin with check value 0 */
107 do {
108 /* Write new SIFADR value */
109 outb(chk1, ioaddr + SIFADR);
110
111 /* Read, invert and write */
112 chk2 = inb(ioaddr + SIFADD);
113 chk2 ^= 0x0FE;
114 outb(chk2, ioaddr + SIFADR);
115
116 /* Read, invert and compare */
117 chk2 = inb(ioaddr + SIFADD);
118 chk2 ^= 0x0FE;
119
120 if(chk1 != chk2) {
121 release_region(ioaddr, SK_ISA_IO_EXTENT);
122 return -ENODEV;
123 }
124
125 chk1 -= 2;
126 } while(chk1 != 0); /* Repeat 128 times (all byte values) */
127
128 /* Restore the SIFADR value */
129 outb(old, ioaddr + SIFADR);
130
131 dev->base_addr = ioaddr;
132 return 0;
133}
134
135static struct net_device_ops sk_isa_netdev_ops __read_mostly;
136
137static int __init setup_card(struct net_device *dev, struct device *pdev)
138{
139 struct net_local *tp;
140 static int versionprinted;
141 const unsigned *port;
142 int j, err = 0;
143
144 if (!dev)
145 return -ENOMEM;
146
147 if (dev->base_addr) /* probe specific location */
148 err = sk_isa_probe1(dev, dev->base_addr);
149 else {
150 for (port = portlist; *port; port++) {
151 err = sk_isa_probe1(dev, *port);
152 if (!err)
153 break;
154 }
155 }
156 if (err)
157 goto out5;
158
159 /* At this point we have found a valid card. */
160
161 if (versionprinted++ == 0)
162 printk(KERN_DEBUG "%s", version);
163
164 err = -EIO;
165 pdev->dma_mask = &dma_mask;
166 if (tmsdev_init(dev, pdev))
167 goto out4;
168
169 dev->base_addr &= ~3;
170
171 sk_isa_read_eeprom(dev);
172
173 printk(KERN_DEBUG "skisa.c: Ring Station Address: %pM\n",
174 dev->dev_addr);
175
176 tp = netdev_priv(dev);
177 tp->setnselout = sk_isa_setnselout_pins;
178
179 tp->sifreadb = sk_isa_sifreadb;
180 tp->sifreadw = sk_isa_sifreadw;
181 tp->sifwriteb = sk_isa_sifwriteb;
182 tp->sifwritew = sk_isa_sifwritew;
183
184 memcpy(tp->ProductID, isa_cardname, PROD_ID_SIZE + 1);
185
186 tp->tmspriv = NULL;
187
188 dev->netdev_ops = &sk_isa_netdev_ops;
189
190 if (dev->irq == 0)
191 {
192 for(j = 0; irqlist[j] != 0; j++)
193 {
194 dev->irq = irqlist[j];
195 if (!request_irq(dev->irq, tms380tr_interrupt, 0,
196 isa_cardname, dev))
197 break;
198 }
199
200 if(irqlist[j] == 0)
201 {
202 printk(KERN_INFO "skisa.c: AutoSelect no IRQ available\n");
203 goto out3;
204 }
205 }
206 else
207 {
208 for(j = 0; irqlist[j] != 0; j++)
209 if (irqlist[j] == dev->irq)
210 break;
211 if (irqlist[j] == 0)
212 {
213 printk(KERN_INFO "skisa.c: Illegal IRQ %d specified\n",
214 dev->irq);
215 goto out3;
216 }
217 if (request_irq(dev->irq, tms380tr_interrupt, 0,
218 isa_cardname, dev))
219 {
220 printk(KERN_INFO "skisa.c: Selected IRQ %d not available\n",
221 dev->irq);
222 goto out3;
223 }
224 }
225
226 if (dev->dma == 0)
227 {
228 for(j = 0; dmalist[j] != 0; j++)
229 {
230 dev->dma = dmalist[j];
231 if (!request_dma(dev->dma, isa_cardname))
232 break;
233 }
234
235 if(dmalist[j] == 0)
236 {
237 printk(KERN_INFO "skisa.c: AutoSelect no DMA available\n");
238 goto out2;
239 }
240 }
241 else
242 {
243 for(j = 0; dmalist[j] != 0; j++)
244 if (dmalist[j] == dev->dma)
245 break;
246 if (dmalist[j] == 0)
247 {
248 printk(KERN_INFO "skisa.c: Illegal DMA %d specified\n",
249 dev->dma);
250 goto out2;
251 }
252 if (request_dma(dev->dma, isa_cardname))
253 {
254 printk(KERN_INFO "skisa.c: Selected DMA %d not available\n",
255 dev->dma);
256 goto out2;
257 }
258 }
259
260 err = register_netdev(dev);
261 if (err)
262 goto out;
263
264 printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
265 dev->name, dev->base_addr, dev->irq, dev->dma);
266
267 return 0;
268out:
269 free_dma(dev->dma);
270out2:
271 free_irq(dev->irq, dev);
272out3:
273 tmsdev_term(dev);
274out4:
275 release_region(dev->base_addr, SK_ISA_IO_EXTENT);
276out5:
277 return err;
278}
279
280/*
281 * Reads MAC address from adapter RAM, which should've read it from
282 * the onboard ROM.
283 *
284 * Calling this on a board that does not support it can be a very
285 * dangerous thing. The Madge board, for instance, will lock your
286 * machine hard when this is called. Luckily, its supported in a
287 * separate driver. --ASF
288 */
289static void sk_isa_read_eeprom(struct net_device *dev)
290{
291 int i;
292
293 /* Address: 0000:0000 */
294 sk_isa_sifwritew(dev, 0, SIFADX);
295 sk_isa_sifwritew(dev, 0, SIFADR);
296
297 /* Read six byte MAC address data */
298 dev->addr_len = 6;
299 for(i = 0; i < 6; i++)
300 dev->dev_addr[i] = sk_isa_sifreadw(dev, SIFINC) >> 8;
301}
302
303static unsigned short sk_isa_setnselout_pins(struct net_device *dev)
304{
305 return 0;
306}
307
308static int sk_isa_open(struct net_device *dev)
309{
310 struct net_local *tp = netdev_priv(dev);
311 unsigned short val = 0;
312 unsigned short oldval;
313 int i;
314
315 val = 0;
316 for(i = 0; irqlist[i] != 0; i++)
317 {
318 if(irqlist[i] == dev->irq)
319 break;
320 }
321
322 val |= CYCLE_TIME << 2;
323 val |= i << 4;
324 i = dev->dma - 5;
325 val |= i;
326 if(tp->DataRate == SPEED_4)
327 val |= LINE_SPEED_BIT;
328 else
329 val &= ~LINE_SPEED_BIT;
330 oldval = sk_isa_sifreadb(dev, POSREG);
331 /* Leave cycle bits alone */
332 oldval |= 0xf3;
333 val &= oldval;
334 sk_isa_sifwriteb(dev, val, POSREG);
335
336 return tms380tr_open(dev);
337}
338
339#define ISATR_MAX_ADAPTERS 3
340
341static int io[ISATR_MAX_ADAPTERS];
342static int irq[ISATR_MAX_ADAPTERS];
343static int dma[ISATR_MAX_ADAPTERS];
344
345MODULE_LICENSE("GPL");
346
347module_param_array(io, int, NULL, 0);
348module_param_array(irq, int, NULL, 0);
349module_param_array(dma, int, NULL, 0);
350
351static struct platform_device *sk_isa_dev[ISATR_MAX_ADAPTERS];
352
353static struct platform_driver sk_isa_driver = {
354 .driver = {
355 .name = "skisa",
356 },
357};
358
359static int __init sk_isa_init(void)
360{
361 struct net_device *dev;
362 struct platform_device *pdev;
363 int i, num = 0, err = 0;
364
365 sk_isa_netdev_ops = tms380tr_netdev_ops;
366 sk_isa_netdev_ops.ndo_open = sk_isa_open;
367 sk_isa_netdev_ops.ndo_stop = tms380tr_close;
368
369 err = platform_driver_register(&sk_isa_driver);
370 if (err)
371 return err;
372
373 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
374 dev = alloc_trdev(sizeof(struct net_local));
375 if (!dev)
376 continue;
377
378 dev->base_addr = io[i];
379 dev->irq = irq[i];
380 dev->dma = dma[i];
381 pdev = platform_device_register_simple("skisa",
382 i, NULL, 0);
383 if (IS_ERR(pdev)) {
384 free_netdev(dev);
385 continue;
386 }
387 err = setup_card(dev, &pdev->dev);
388 if (!err) {
389 sk_isa_dev[i] = pdev;
390 platform_set_drvdata(sk_isa_dev[i], dev);
391 ++num;
392 } else {
393 platform_device_unregister(pdev);
394 free_netdev(dev);
395 }
396 }
397
398 printk(KERN_NOTICE "skisa.c: %d cards found.\n", num);
399 /* Probe for cards. */
400 if (num == 0) {
401 printk(KERN_NOTICE "skisa.c: No cards found.\n");
402 platform_driver_unregister(&sk_isa_driver);
403 return -ENODEV;
404 }
405 return 0;
406}
407
408static void __exit sk_isa_cleanup(void)
409{
410 struct net_device *dev;
411 int i;
412
413 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
414 struct platform_device *pdev = sk_isa_dev[i];
415
416 if (!pdev)
417 continue;
418 dev = platform_get_drvdata(pdev);
419 unregister_netdev(dev);
420 release_region(dev->base_addr, SK_ISA_IO_EXTENT);
421 free_irq(dev->irq, dev);
422 free_dma(dev->dma);
423 tmsdev_term(dev);
424 free_netdev(dev);
425 platform_set_drvdata(pdev, NULL);
426 platform_device_unregister(pdev);
427 }
428 platform_driver_unregister(&sk_isa_driver);
429}
430
431module_init(sk_isa_init);
432module_exit(sk_isa_cleanup);
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
deleted file mode 100644
index cb35fb79e016..000000000000
--- a/drivers/net/tokenring/smctr.c
+++ /dev/null
@@ -1,5717 +0,0 @@
1/*
2 * smctr.c: A network driver for the SMC Token Ring Adapters.
3 *
4 * Written by Jay Schulist <jschlst@samba.org>
5 *
6 * This software may be used and distributed according to the terms
7 * of the GNU General Public License, incorporated herein by reference.
8 *
9 * This device driver works with the following SMC adapters:
10 * - SMC TokenCard Elite (8115T, chips 825/584)
11 * - SMC TokenCard Elite/A MCA (8115T/A, chips 825/594)
12 *
13 * Source(s):
14 * - SMC TokenCard SDK.
15 *
16 * Maintainer(s):
17 * JS Jay Schulist <jschlst@samba.org>
18 *
19 * Changes:
20 * 07102000 JS Fixed a timing problem in smctr_wait_cmd();
21 * Also added a bit more discriptive error msgs.
22 * 07122000 JS Fixed problem with detecting a card with
23 * module io/irq/mem specified.
24 *
25 * To do:
26 * 1. Multicast support.
27 *
28 * Initial 2.5 cleanup Alan Cox <alan@lxorguk.ukuu.org.uk> 2002/10/28
29 */
30
31#include <linux/module.h>
32#include <linux/kernel.h>
33#include <linux/types.h>
34#include <linux/fcntl.h>
35#include <linux/interrupt.h>
36#include <linux/ptrace.h>
37#include <linux/ioport.h>
38#include <linux/in.h>
39#include <linux/string.h>
40#include <linux/time.h>
41#include <linux/errno.h>
42#include <linux/init.h>
43#include <linux/mca-legacy.h>
44#include <linux/delay.h>
45#include <linux/netdevice.h>
46#include <linux/etherdevice.h>
47#include <linux/skbuff.h>
48#include <linux/trdevice.h>
49#include <linux/bitops.h>
50#include <linux/firmware.h>
51
52#include <asm/io.h>
53#include <asm/dma.h>
54#include <asm/irq.h>
55
56#if BITS_PER_LONG == 64
57#error FIXME: driver does not support 64-bit platforms
58#endif
59
60#include "smctr.h" /* Our Stuff */
61
62static const char version[] __initdata =
63 KERN_INFO "smctr.c: v1.4 7/12/00 by jschlst@samba.org\n";
64static const char cardname[] = "smctr";
65
66
67#define SMCTR_IO_EXTENT 20
68
69#ifdef CONFIG_MCA_LEGACY
70static unsigned int smctr_posid = 0x6ec6;
71#endif
72
73static int ringspeed;
74
75/* SMC Name of the Adapter. */
76static char smctr_name[] = "SMC TokenCard";
77static char *smctr_model = "Unknown";
78
79/* Use 0 for production, 1 for verification, 2 for debug, and
80 * 3 for very verbose debug.
81 */
82#ifndef SMCTR_DEBUG
83#define SMCTR_DEBUG 1
84#endif
85static unsigned int smctr_debug = SMCTR_DEBUG;
86
87/* smctr.c prototypes and functions are arranged alphabeticly
88 * for clearity, maintainability and pure old fashion fun.
89 */
90/* A */
91static int smctr_alloc_shared_memory(struct net_device *dev);
92
93/* B */
94static int smctr_bypass_state(struct net_device *dev);
95
96/* C */
97static int smctr_checksum_firmware(struct net_device *dev);
98static int __init smctr_chk_isa(struct net_device *dev);
99static int smctr_chg_rx_mask(struct net_device *dev);
100static int smctr_clear_int(struct net_device *dev);
101static int smctr_clear_trc_reset(int ioaddr);
102static int smctr_close(struct net_device *dev);
103
104/* D */
105static int smctr_decode_firmware(struct net_device *dev,
106 const struct firmware *fw);
107static int smctr_disable_16bit(struct net_device *dev);
108static int smctr_disable_adapter_ctrl_store(struct net_device *dev);
109static int smctr_disable_bic_int(struct net_device *dev);
110
111/* E */
112static int smctr_enable_16bit(struct net_device *dev);
113static int smctr_enable_adapter_ctrl_store(struct net_device *dev);
114static int smctr_enable_adapter_ram(struct net_device *dev);
115static int smctr_enable_bic_int(struct net_device *dev);
116
117/* G */
118static int __init smctr_get_boardid(struct net_device *dev, int mca);
119static int smctr_get_group_address(struct net_device *dev);
120static int smctr_get_functional_address(struct net_device *dev);
121static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev);
122static int smctr_get_physical_drop_number(struct net_device *dev);
123static __u8 *smctr_get_rx_pointer(struct net_device *dev, short queue);
124static int smctr_get_station_id(struct net_device *dev);
125static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
126 __u16 bytes_count);
127static int smctr_get_upstream_neighbor_addr(struct net_device *dev);
128
129/* H */
130static int smctr_hardware_send_packet(struct net_device *dev,
131 struct net_local *tp);
132/* I */
133static int smctr_init_acbs(struct net_device *dev);
134static int smctr_init_adapter(struct net_device *dev);
135static int smctr_init_card_real(struct net_device *dev);
136static int smctr_init_rx_bdbs(struct net_device *dev);
137static int smctr_init_rx_fcbs(struct net_device *dev);
138static int smctr_init_shared_memory(struct net_device *dev);
139static int smctr_init_tx_bdbs(struct net_device *dev);
140static int smctr_init_tx_fcbs(struct net_device *dev);
141static int smctr_internal_self_test(struct net_device *dev);
142static irqreturn_t smctr_interrupt(int irq, void *dev_id);
143static int smctr_issue_enable_int_cmd(struct net_device *dev,
144 __u16 interrupt_enable_mask);
145static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code,
146 __u16 ibits);
147static int smctr_issue_init_timers_cmd(struct net_device *dev);
148static int smctr_issue_init_txrx_cmd(struct net_device *dev);
149static int smctr_issue_insert_cmd(struct net_device *dev);
150static int smctr_issue_read_ring_status_cmd(struct net_device *dev);
151static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt);
152static int smctr_issue_remove_cmd(struct net_device *dev);
153static int smctr_issue_resume_acb_cmd(struct net_device *dev);
154static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue);
155static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue);
156static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue);
157static int smctr_issue_test_internal_rom_cmd(struct net_device *dev);
158static int smctr_issue_test_hic_cmd(struct net_device *dev);
159static int smctr_issue_test_mac_reg_cmd(struct net_device *dev);
160static int smctr_issue_trc_loopback_cmd(struct net_device *dev);
161static int smctr_issue_tri_loopback_cmd(struct net_device *dev);
162static int smctr_issue_write_byte_cmd(struct net_device *dev,
163 short aword_cnt, void *byte);
164static int smctr_issue_write_word_cmd(struct net_device *dev,
165 short aword_cnt, void *word);
166
167/* J */
168static int smctr_join_complete_state(struct net_device *dev);
169
170/* L */
171static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev);
172static int smctr_load_firmware(struct net_device *dev);
173static int smctr_load_node_addr(struct net_device *dev);
174static int smctr_lobe_media_test(struct net_device *dev);
175static int smctr_lobe_media_test_cmd(struct net_device *dev);
176static int smctr_lobe_media_test_state(struct net_device *dev);
177
178/* M */
179static int smctr_make_8025_hdr(struct net_device *dev,
180 MAC_HEADER *rmf, MAC_HEADER *tmf, __u16 ac_fc);
181static int smctr_make_access_pri(struct net_device *dev,
182 MAC_SUB_VECTOR *tsv);
183static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv);
184static int smctr_make_auth_funct_class(struct net_device *dev,
185 MAC_SUB_VECTOR *tsv);
186static int smctr_make_corr(struct net_device *dev,
187 MAC_SUB_VECTOR *tsv, __u16 correlator);
188static int smctr_make_funct_addr(struct net_device *dev,
189 MAC_SUB_VECTOR *tsv);
190static int smctr_make_group_addr(struct net_device *dev,
191 MAC_SUB_VECTOR *tsv);
192static int smctr_make_phy_drop_num(struct net_device *dev,
193 MAC_SUB_VECTOR *tsv);
194static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv);
195static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv);
196static int smctr_make_ring_station_status(struct net_device *dev,
197 MAC_SUB_VECTOR *tsv);
198static int smctr_make_ring_station_version(struct net_device *dev,
199 MAC_SUB_VECTOR *tsv);
200static int smctr_make_tx_status_code(struct net_device *dev,
201 MAC_SUB_VECTOR *tsv, __u16 tx_fstatus);
202static int smctr_make_upstream_neighbor_addr(struct net_device *dev,
203 MAC_SUB_VECTOR *tsv);
204static int smctr_make_wrap_data(struct net_device *dev,
205 MAC_SUB_VECTOR *tsv);
206
207/* O */
208static int smctr_open(struct net_device *dev);
209static int smctr_open_tr(struct net_device *dev);
210
211/* P */
212struct net_device *smctr_probe(int unit);
213static int __init smctr_probe1(struct net_device *dev, int ioaddr);
214static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
215 struct net_device *dev, __u16 rx_status);
216
217/* R */
218static int smctr_ram_memory_test(struct net_device *dev);
219static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
220 __u16 *correlator);
221static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
222 __u16 *correlator);
223static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf);
224static int smctr_rcv_rq_addr_state_attch(struct net_device *dev,
225 MAC_HEADER *rmf, __u16 *correlator);
226static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf,
227 __u16 *correlator);
228static int smctr_reset_adapter(struct net_device *dev);
229static int smctr_restart_tx_chain(struct net_device *dev, short queue);
230static int smctr_ring_status_chg(struct net_device *dev);
231static int smctr_rx_frame(struct net_device *dev);
232
233/* S */
234static int smctr_send_dat(struct net_device *dev);
235static netdev_tx_t smctr_send_packet(struct sk_buff *skb,
236 struct net_device *dev);
237static int smctr_send_lobe_media_test(struct net_device *dev);
238static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
239 __u16 correlator);
240static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
241 __u16 correlator);
242static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
243 __u16 correlator);
244static int smctr_send_rpt_tx_forward(struct net_device *dev,
245 MAC_HEADER *rmf, __u16 tx_fstatus);
246static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
247 __u16 rcode, __u16 correlator);
248static int smctr_send_rq_init(struct net_device *dev);
249static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
250 __u16 *tx_fstatus);
251static int smctr_set_auth_access_pri(struct net_device *dev,
252 MAC_SUB_VECTOR *rsv);
253static int smctr_set_auth_funct_class(struct net_device *dev,
254 MAC_SUB_VECTOR *rsv);
255static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv,
256 __u16 *correlator);
257static int smctr_set_error_timer_value(struct net_device *dev,
258 MAC_SUB_VECTOR *rsv);
259static int smctr_set_frame_forward(struct net_device *dev,
260 MAC_SUB_VECTOR *rsv, __u8 dc_sc);
261static int smctr_set_local_ring_num(struct net_device *dev,
262 MAC_SUB_VECTOR *rsv);
263static unsigned short smctr_set_ctrl_attention(struct net_device *dev);
264static void smctr_set_multicast_list(struct net_device *dev);
265static int smctr_set_page(struct net_device *dev, __u8 *buf);
266static int smctr_set_phy_drop(struct net_device *dev,
267 MAC_SUB_VECTOR *rsv);
268static int smctr_set_ring_speed(struct net_device *dev);
269static int smctr_set_rx_look_ahead(struct net_device *dev);
270static int smctr_set_trc_reset(int ioaddr);
271static int smctr_setup_single_cmd(struct net_device *dev,
272 __u16 command, __u16 subcommand);
273static int smctr_setup_single_cmd_w_data(struct net_device *dev,
274 __u16 command, __u16 subcommand);
275static char *smctr_malloc(struct net_device *dev, __u16 size);
276static int smctr_status_chg(struct net_device *dev);
277
278/* T */
279static void smctr_timeout(struct net_device *dev);
280static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb,
281 __u16 queue);
282static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue);
283static unsigned short smctr_tx_move_frame(struct net_device *dev,
284 struct sk_buff *skb, __u8 *pbuff, unsigned int bytes);
285
286/* U */
287static int smctr_update_err_stats(struct net_device *dev);
288static int smctr_update_rx_chain(struct net_device *dev, __u16 queue);
289static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
290 __u16 queue);
291
292/* W */
293static int smctr_wait_cmd(struct net_device *dev);
294static int smctr_wait_while_cbusy(struct net_device *dev);
295
296#define TO_256_BYTE_BOUNDRY(X) (((X + 0xff) & 0xff00) - X)
297#define TO_PARAGRAPH_BOUNDRY(X) (((X + 0x0f) & 0xfff0) - X)
298#define PARAGRAPH_BOUNDRY(X) smctr_malloc(dev, TO_PARAGRAPH_BOUNDRY(X))
299
300/* Allocate Adapter Shared Memory.
301 * IMPORTANT NOTE: Any changes to this function MUST be mirrored in the
302 * function "get_num_rx_bdbs" below!!!
303 *
304 * Order of memory allocation:
305 *
306 * 0. Initial System Configuration Block Pointer
307 * 1. System Configuration Block
308 * 2. System Control Block
309 * 3. Action Command Block
310 * 4. Interrupt Status Block
311 *
312 * 5. MAC TX FCB'S
313 * 6. NON-MAC TX FCB'S
314 * 7. MAC TX BDB'S
315 * 8. NON-MAC TX BDB'S
316 * 9. MAC RX FCB'S
317 * 10. NON-MAC RX FCB'S
318 * 11. MAC RX BDB'S
319 * 12. NON-MAC RX BDB'S
320 * 13. MAC TX Data Buffer( 1, 256 byte buffer)
321 * 14. MAC RX Data Buffer( 1, 256 byte buffer)
322 *
323 * 15. NON-MAC TX Data Buffer
324 * 16. NON-MAC RX Data Buffer
325 */
326static int smctr_alloc_shared_memory(struct net_device *dev)
327{
328 struct net_local *tp = netdev_priv(dev);
329
330 if(smctr_debug > 10)
331 printk(KERN_DEBUG "%s: smctr_alloc_shared_memory\n", dev->name);
332
333 /* Allocate initial System Control Block pointer.
334 * This pointer is located in the last page, last offset - 4.
335 */
336 tp->iscpb_ptr = (ISCPBlock *)(tp->ram_access + ((__u32)64 * 0x400)
337 - (long)ISCP_BLOCK_SIZE);
338
339 /* Allocate System Control Blocks. */
340 tp->scgb_ptr = (SCGBlock *)smctr_malloc(dev, sizeof(SCGBlock));
341 PARAGRAPH_BOUNDRY(tp->sh_mem_used);
342
343 tp->sclb_ptr = (SCLBlock *)smctr_malloc(dev, sizeof(SCLBlock));
344 PARAGRAPH_BOUNDRY(tp->sh_mem_used);
345
346 tp->acb_head = (ACBlock *)smctr_malloc(dev,
347 sizeof(ACBlock)*tp->num_acbs);
348 PARAGRAPH_BOUNDRY(tp->sh_mem_used);
349
350 tp->isb_ptr = (ISBlock *)smctr_malloc(dev, sizeof(ISBlock));
351 PARAGRAPH_BOUNDRY(tp->sh_mem_used);
352
353 tp->misc_command_data = (__u16 *)smctr_malloc(dev, MISC_DATA_SIZE);
354 PARAGRAPH_BOUNDRY(tp->sh_mem_used);
355
356 /* Allocate transmit FCBs. */
357 tp->tx_fcb_head[MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
358 sizeof(FCBlock) * tp->num_tx_fcbs[MAC_QUEUE]);
359
360 tp->tx_fcb_head[NON_MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
361 sizeof(FCBlock) * tp->num_tx_fcbs[NON_MAC_QUEUE]);
362
363 tp->tx_fcb_head[BUG_QUEUE] = (FCBlock *)smctr_malloc(dev,
364 sizeof(FCBlock) * tp->num_tx_fcbs[BUG_QUEUE]);
365
366 /* Allocate transmit BDBs. */
367 tp->tx_bdb_head[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
368 sizeof(BDBlock) * tp->num_tx_bdbs[MAC_QUEUE]);
369
370 tp->tx_bdb_head[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
371 sizeof(BDBlock) * tp->num_tx_bdbs[NON_MAC_QUEUE]);
372
373 tp->tx_bdb_head[BUG_QUEUE] = (BDBlock *)smctr_malloc(dev,
374 sizeof(BDBlock) * tp->num_tx_bdbs[BUG_QUEUE]);
375
376 /* Allocate receive FCBs. */
377 tp->rx_fcb_head[MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
378 sizeof(FCBlock) * tp->num_rx_fcbs[MAC_QUEUE]);
379
380 tp->rx_fcb_head[NON_MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
381 sizeof(FCBlock) * tp->num_rx_fcbs[NON_MAC_QUEUE]);
382
383 /* Allocate receive BDBs. */
384 tp->rx_bdb_head[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
385 sizeof(BDBlock) * tp->num_rx_bdbs[MAC_QUEUE]);
386
387 tp->rx_bdb_end[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, 0);
388
389 tp->rx_bdb_head[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
390 sizeof(BDBlock) * tp->num_rx_bdbs[NON_MAC_QUEUE]);
391
392 tp->rx_bdb_end[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, 0);
393
394 /* Allocate MAC transmit buffers.
395 * MAC Tx Buffers doen't have to be on an ODD Boundary.
396 */
397 tp->tx_buff_head[MAC_QUEUE]
398 = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[MAC_QUEUE]);
399 tp->tx_buff_curr[MAC_QUEUE] = tp->tx_buff_head[MAC_QUEUE];
400 tp->tx_buff_end [MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
401
402 /* Allocate BUG transmit buffers. */
403 tp->tx_buff_head[BUG_QUEUE]
404 = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[BUG_QUEUE]);
405 tp->tx_buff_curr[BUG_QUEUE] = tp->tx_buff_head[BUG_QUEUE];
406 tp->tx_buff_end[BUG_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
407
408 /* Allocate MAC receive data buffers.
409 * MAC Rx buffer doesn't have to be on a 256 byte boundary.
410 */
411 tp->rx_buff_head[MAC_QUEUE] = (__u16 *)smctr_malloc(dev,
412 RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE]);
413 tp->rx_buff_end[MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
414
415 /* Allocate Non-MAC transmit buffers.
416 * ?? For maximum Netware performance, put Tx Buffers on
417 * ODD Boundary and then restore malloc to Even Boundrys.
418 */
419 smctr_malloc(dev, 1L);
420 tp->tx_buff_head[NON_MAC_QUEUE]
421 = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[NON_MAC_QUEUE]);
422 tp->tx_buff_curr[NON_MAC_QUEUE] = tp->tx_buff_head[NON_MAC_QUEUE];
423 tp->tx_buff_end [NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
424 smctr_malloc(dev, 1L);
425
426 /* Allocate Non-MAC receive data buffers.
427 * To guarantee a minimum of 256 contiguous memory to
428 * UM_Receive_Packet's lookahead pointer, before a page
429 * change or ring end is encountered, place each rx buffer on
430 * a 256 byte boundary.
431 */
432 smctr_malloc(dev, TO_256_BYTE_BOUNDRY(tp->sh_mem_used));
433 tp->rx_buff_head[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev,
434 RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[NON_MAC_QUEUE]);
435 tp->rx_buff_end[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
436
437 return 0;
438}
439
440/* Enter Bypass state. */
441static int smctr_bypass_state(struct net_device *dev)
442{
443 int err;
444
445 if(smctr_debug > 10)
446 printk(KERN_DEBUG "%s: smctr_bypass_state\n", dev->name);
447
448 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, JS_BYPASS_STATE);
449
450 return err;
451}
452
453static int smctr_checksum_firmware(struct net_device *dev)
454{
455 struct net_local *tp = netdev_priv(dev);
456 __u16 i, checksum = 0;
457
458 if(smctr_debug > 10)
459 printk(KERN_DEBUG "%s: smctr_checksum_firmware\n", dev->name);
460
461 smctr_enable_adapter_ctrl_store(dev);
462
463 for(i = 0; i < CS_RAM_SIZE; i += 2)
464 checksum += *((__u16 *)(tp->ram_access + i));
465
466 tp->microcode_version = *(__u16 *)(tp->ram_access
467 + CS_RAM_VERSION_OFFSET);
468 tp->microcode_version >>= 8;
469
470 smctr_disable_adapter_ctrl_store(dev);
471
472 if(checksum)
473 return checksum;
474
475 return 0;
476}
477
478static int __init smctr_chk_mca(struct net_device *dev)
479{
480#ifdef CONFIG_MCA_LEGACY
481 struct net_local *tp = netdev_priv(dev);
482 int current_slot;
483 __u8 r1, r2, r3, r4, r5;
484
485 current_slot = mca_find_unused_adapter(smctr_posid, 0);
486 if(current_slot == MCA_NOTFOUND)
487 return -ENODEV;
488
489 mca_set_adapter_name(current_slot, smctr_name);
490 mca_mark_as_used(current_slot);
491 tp->slot_num = current_slot;
492
493 r1 = mca_read_stored_pos(tp->slot_num, 2);
494 r2 = mca_read_stored_pos(tp->slot_num, 3);
495
496 if(tp->slot_num)
497 outb(CNFG_POS_CONTROL_REG, (__u8)((tp->slot_num - 1) | CNFG_SLOT_ENABLE_BIT));
498 else
499 outb(CNFG_POS_CONTROL_REG, (__u8)((tp->slot_num) | CNFG_SLOT_ENABLE_BIT));
500
501 r1 = inb(CNFG_POS_REG1);
502 r2 = inb(CNFG_POS_REG0);
503
504 tp->bic_type = BIC_594_CHIP;
505
506 /* IO */
507 r2 = mca_read_stored_pos(tp->slot_num, 2);
508 r2 &= 0xF0;
509 dev->base_addr = ((__u16)r2 << 8) + (__u16)0x800;
510 request_region(dev->base_addr, SMCTR_IO_EXTENT, smctr_name);
511
512 /* IRQ */
513 r5 = mca_read_stored_pos(tp->slot_num, 5);
514 r5 &= 0xC;
515 switch(r5)
516 {
517 case 0:
518 dev->irq = 3;
519 break;
520
521 case 0x4:
522 dev->irq = 4;
523 break;
524
525 case 0x8:
526 dev->irq = 10;
527 break;
528
529 default:
530 dev->irq = 15;
531 break;
532 }
533 if (request_irq(dev->irq, smctr_interrupt, IRQF_SHARED, smctr_name, dev)) {
534 release_region(dev->base_addr, SMCTR_IO_EXTENT);
535 return -ENODEV;
536 }
537
538 /* Get RAM base */
539 r3 = mca_read_stored_pos(tp->slot_num, 3);
540 tp->ram_base = ((__u32)(r3 & 0x7) << 13) + 0x0C0000;
541 if (r3 & 0x8)
542 tp->ram_base += 0x010000;
543 if (r3 & 0x80)
544 tp->ram_base += 0xF00000;
545
546 /* Get Ram Size */
547 r3 &= 0x30;
548 r3 >>= 4;
549
550 tp->ram_usable = (__u16)CNFG_SIZE_8KB << r3;
551 tp->ram_size = (__u16)CNFG_SIZE_64KB;
552 tp->board_id |= TOKEN_MEDIA;
553
554 r4 = mca_read_stored_pos(tp->slot_num, 4);
555 tp->rom_base = ((__u32)(r4 & 0x7) << 13) + 0x0C0000;
556 if (r4 & 0x8)
557 tp->rom_base += 0x010000;
558
559 /* Get ROM size. */
560 r4 >>= 4;
561 switch (r4) {
562 case 0:
563 tp->rom_size = CNFG_SIZE_8KB;
564 break;
565 case 1:
566 tp->rom_size = CNFG_SIZE_16KB;
567 break;
568 case 2:
569 tp->rom_size = CNFG_SIZE_32KB;
570 break;
571 default:
572 tp->rom_size = ROM_DISABLE;
573 }
574
575 /* Get Media Type. */
576 r5 = mca_read_stored_pos(tp->slot_num, 5);
577 r5 &= CNFG_MEDIA_TYPE_MASK;
578 switch(r5)
579 {
580 case (0):
581 tp->media_type = MEDIA_STP_4;
582 break;
583
584 case (1):
585 tp->media_type = MEDIA_STP_16;
586 break;
587
588 case (3):
589 tp->media_type = MEDIA_UTP_16;
590 break;
591
592 default:
593 tp->media_type = MEDIA_UTP_4;
594 break;
595 }
596 tp->media_menu = 14;
597
598 r2 = mca_read_stored_pos(tp->slot_num, 2);
599 if(!(r2 & 0x02))
600 tp->mode_bits |= EARLY_TOKEN_REL;
601
602 /* Disable slot */
603 outb(CNFG_POS_CONTROL_REG, 0);
604
605 tp->board_id = smctr_get_boardid(dev, 1);
606 switch(tp->board_id & 0xffff)
607 {
608 case WD8115TA:
609 smctr_model = "8115T/A";
610 break;
611
612 case WD8115T:
613 if(tp->extra_info & CHIP_REV_MASK)
614 smctr_model = "8115T rev XE";
615 else
616 smctr_model = "8115T rev XD";
617 break;
618
619 default:
620 smctr_model = "Unknown";
621 break;
622 }
623
624 return 0;
625#else
626 return -1;
627#endif /* CONFIG_MCA_LEGACY */
628}
629
630static int smctr_chg_rx_mask(struct net_device *dev)
631{
632 struct net_local *tp = netdev_priv(dev);
633 int err = 0;
634
635 if(smctr_debug > 10)
636 printk(KERN_DEBUG "%s: smctr_chg_rx_mask\n", dev->name);
637
638 smctr_enable_16bit(dev);
639 smctr_set_page(dev, (__u8 *)tp->ram_access);
640
641 if(tp->mode_bits & LOOPING_MODE_MASK)
642 tp->config_word0 |= RX_OWN_BIT;
643 else
644 tp->config_word0 &= ~RX_OWN_BIT;
645
646 if(tp->receive_mask & PROMISCUOUS_MODE)
647 tp->config_word0 |= PROMISCUOUS_BIT;
648 else
649 tp->config_word0 &= ~PROMISCUOUS_BIT;
650
651 if(tp->receive_mask & ACCEPT_ERR_PACKETS)
652 tp->config_word0 |= SAVBAD_BIT;
653 else
654 tp->config_word0 &= ~SAVBAD_BIT;
655
656 if(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES)
657 tp->config_word0 |= RXATMAC;
658 else
659 tp->config_word0 &= ~RXATMAC;
660
661 if(tp->receive_mask & ACCEPT_MULTI_PROM)
662 tp->config_word1 |= MULTICAST_ADDRESS_BIT;
663 else
664 tp->config_word1 &= ~MULTICAST_ADDRESS_BIT;
665
666 if(tp->receive_mask & ACCEPT_SOURCE_ROUTING_SPANNING)
667 tp->config_word1 |= SOURCE_ROUTING_SPANNING_BITS;
668 else
669 {
670 if(tp->receive_mask & ACCEPT_SOURCE_ROUTING)
671 tp->config_word1 |= SOURCE_ROUTING_EXPLORER_BIT;
672 else
673 tp->config_word1 &= ~SOURCE_ROUTING_SPANNING_BITS;
674 }
675
676 if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_0,
677 &tp->config_word0)))
678 {
679 return err;
680 }
681
682 if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_1,
683 &tp->config_word1)))
684 {
685 return err;
686 }
687
688 smctr_disable_16bit(dev);
689
690 return 0;
691}
692
693static int smctr_clear_int(struct net_device *dev)
694{
695 struct net_local *tp = netdev_priv(dev);
696
697 outb((tp->trc_mask | CSR_CLRTINT), dev->base_addr + CSR);
698
699 return 0;
700}
701
702static int smctr_clear_trc_reset(int ioaddr)
703{
704 __u8 r;
705
706 r = inb(ioaddr + MSR);
707 outb(~MSR_RST & r, ioaddr + MSR);
708
709 return 0;
710}
711
712/*
713 * The inverse routine to smctr_open().
714 */
715static int smctr_close(struct net_device *dev)
716{
717 struct net_local *tp = netdev_priv(dev);
718 struct sk_buff *skb;
719 int err;
720
721 netif_stop_queue(dev);
722
723 tp->cleanup = 1;
724
725 /* Check to see if adapter is already in a closed state. */
726 if(tp->status != OPEN)
727 return 0;
728
729 smctr_enable_16bit(dev);
730 smctr_set_page(dev, (__u8 *)tp->ram_access);
731
732 if((err = smctr_issue_remove_cmd(dev)))
733 {
734 smctr_disable_16bit(dev);
735 return err;
736 }
737
738 for(;;)
739 {
740 skb = skb_dequeue(&tp->SendSkbQueue);
741 if(skb == NULL)
742 break;
743 tp->QueueSkb++;
744 dev_kfree_skb(skb);
745 }
746
747
748 return 0;
749}
750
751static int smctr_decode_firmware(struct net_device *dev,
752 const struct firmware *fw)
753{
754 struct net_local *tp = netdev_priv(dev);
755 short bit = 0x80, shift = 12;
756 DECODE_TREE_NODE *tree;
757 short branch, tsize;
758 __u16 buff = 0;
759 long weight;
760 __u8 *ucode;
761 __u16 *mem;
762
763 if(smctr_debug > 10)
764 printk(KERN_DEBUG "%s: smctr_decode_firmware\n", dev->name);
765
766 weight = *(long *)(fw->data + WEIGHT_OFFSET);
767 tsize = *(__u8 *)(fw->data + TREE_SIZE_OFFSET);
768 tree = (DECODE_TREE_NODE *)(fw->data + TREE_OFFSET);
769 ucode = (__u8 *)(fw->data + TREE_OFFSET
770 + (tsize * sizeof(DECODE_TREE_NODE)));
771 mem = (__u16 *)(tp->ram_access);
772
773 while(weight)
774 {
775 branch = ROOT;
776 while((tree + branch)->tag != LEAF && weight)
777 {
778 branch = *ucode & bit ? (tree + branch)->llink
779 : (tree + branch)->rlink;
780
781 bit >>= 1;
782 weight--;
783
784 if(bit == 0)
785 {
786 bit = 0x80;
787 ucode++;
788 }
789 }
790
791 buff |= (tree + branch)->info << shift;
792 shift -= 4;
793
794 if(shift < 0)
795 {
796 *(mem++) = SWAP_BYTES(buff);
797 buff = 0;
798 shift = 12;
799 }
800 }
801
802 /* The following assumes the Control Store Memory has
803 * been initialized to zero. If the last partial word
804 * is zero, it will not be written.
805 */
806 if(buff)
807 *(mem++) = SWAP_BYTES(buff);
808
809 return 0;
810}
811
812static int smctr_disable_16bit(struct net_device *dev)
813{
814 return 0;
815}
816
817/*
818 * On Exit, Adapter is:
819 * 1. TRC is in a reset state and un-initialized.
820 * 2. Adapter memory is enabled.
821 * 3. Control Store memory is out of context (-WCSS is 1).
822 */
823static int smctr_disable_adapter_ctrl_store(struct net_device *dev)
824{
825 struct net_local *tp = netdev_priv(dev);
826 int ioaddr = dev->base_addr;
827
828 if(smctr_debug > 10)
829 printk(KERN_DEBUG "%s: smctr_disable_adapter_ctrl_store\n", dev->name);
830
831 tp->trc_mask |= CSR_WCSS;
832 outb(tp->trc_mask, ioaddr + CSR);
833
834 return 0;
835}
836
837static int smctr_disable_bic_int(struct net_device *dev)
838{
839 struct net_local *tp = netdev_priv(dev);
840 int ioaddr = dev->base_addr;
841
842 tp->trc_mask = CSR_MSK_ALL | CSR_MSKCBUSY
843 | CSR_MSKTINT | CSR_WCSS;
844 outb(tp->trc_mask, ioaddr + CSR);
845
846 return 0;
847}
848
849static int smctr_enable_16bit(struct net_device *dev)
850{
851 struct net_local *tp = netdev_priv(dev);
852 __u8 r;
853
854 if(tp->adapter_bus == BUS_ISA16_TYPE)
855 {
856 r = inb(dev->base_addr + LAAR);
857 outb((r | LAAR_MEM16ENB), dev->base_addr + LAAR);
858 }
859
860 return 0;
861}
862
863/*
864 * To enable the adapter control store memory:
865 * 1. Adapter must be in a RESET state.
866 * 2. Adapter memory must be enabled.
867 * 3. Control Store Memory is in context (-WCSS is 0).
868 */
869static int smctr_enable_adapter_ctrl_store(struct net_device *dev)
870{
871 struct net_local *tp = netdev_priv(dev);
872 int ioaddr = dev->base_addr;
873
874 if(smctr_debug > 10)
875 printk(KERN_DEBUG "%s: smctr_enable_adapter_ctrl_store\n", dev->name);
876
877 smctr_set_trc_reset(ioaddr);
878 smctr_enable_adapter_ram(dev);
879
880 tp->trc_mask &= ~CSR_WCSS;
881 outb(tp->trc_mask, ioaddr + CSR);
882
883 return 0;
884}
885
886static int smctr_enable_adapter_ram(struct net_device *dev)
887{
888 int ioaddr = dev->base_addr;
889 __u8 r;
890
891 if(smctr_debug > 10)
892 printk(KERN_DEBUG "%s: smctr_enable_adapter_ram\n", dev->name);
893
894 r = inb(ioaddr + MSR);
895 outb(MSR_MEMB | r, ioaddr + MSR);
896
897 return 0;
898}
899
900static int smctr_enable_bic_int(struct net_device *dev)
901{
902 struct net_local *tp = netdev_priv(dev);
903 int ioaddr = dev->base_addr;
904 __u8 r;
905
906 switch(tp->bic_type)
907 {
908 case (BIC_584_CHIP):
909 tp->trc_mask = CSR_MSKCBUSY | CSR_WCSS;
910 outb(tp->trc_mask, ioaddr + CSR);
911 r = inb(ioaddr + IRR);
912 outb(r | IRR_IEN, ioaddr + IRR);
913 break;
914
915 case (BIC_594_CHIP):
916 tp->trc_mask = CSR_MSKCBUSY | CSR_WCSS;
917 outb(tp->trc_mask, ioaddr + CSR);
918 r = inb(ioaddr + IMCCR);
919 outb(r | IMCCR_EIL, ioaddr + IMCCR);
920 break;
921 }
922
923 return 0;
924}
925
926static int __init smctr_chk_isa(struct net_device *dev)
927{
928 struct net_local *tp = netdev_priv(dev);
929 int ioaddr = dev->base_addr;
930 __u8 r1, r2, b, chksum = 0;
931 __u16 r;
932 int i;
933 int err = -ENODEV;
934
935 if(smctr_debug > 10)
936 printk(KERN_DEBUG "%s: smctr_chk_isa %#4x\n", dev->name, ioaddr);
937
938 if((ioaddr & 0x1F) != 0)
939 goto out;
940
941 /* Grab the region so that no one else tries to probe our ioports. */
942 if (!request_region(ioaddr, SMCTR_IO_EXTENT, smctr_name)) {
943 err = -EBUSY;
944 goto out;
945 }
946
947 /* Checksum SMC node address */
948 for(i = 0; i < 8; i++)
949 {
950 b = inb(ioaddr + LAR0 + i);
951 chksum += b;
952 }
953
954 if (chksum != NODE_ADDR_CKSUM)
955 goto out2;
956
957 b = inb(ioaddr + BDID);
958 if(b != BRD_ID_8115T)
959 {
960 printk(KERN_ERR "%s: The adapter found is not supported\n", dev->name);
961 goto out2;
962 }
963
964 /* Check for 8115T Board ID */
965 r2 = 0;
966 for(r = 0; r < 8; r++)
967 {
968 r1 = inb(ioaddr + 0x8 + r);
969 r2 += r1;
970 }
971
972 /* value of RegF adds up the sum to 0xFF */
973 if((r2 != 0xFF) && (r2 != 0xEE))
974 goto out2;
975
976 /* Get adapter ID */
977 tp->board_id = smctr_get_boardid(dev, 0);
978 switch(tp->board_id & 0xffff)
979 {
980 case WD8115TA:
981 smctr_model = "8115T/A";
982 break;
983
984 case WD8115T:
985 if(tp->extra_info & CHIP_REV_MASK)
986 smctr_model = "8115T rev XE";
987 else
988 smctr_model = "8115T rev XD";
989 break;
990
991 default:
992 smctr_model = "Unknown";
993 break;
994 }
995
996 /* Store BIC type. */
997 tp->bic_type = BIC_584_CHIP;
998 tp->nic_type = NIC_825_CHIP;
999
1000 /* Copy Ram Size */
1001 tp->ram_usable = CNFG_SIZE_16KB;
1002 tp->ram_size = CNFG_SIZE_64KB;
1003
1004 /* Get 58x Ram Base */
1005 r1 = inb(ioaddr);
1006 r1 &= 0x3F;
1007
1008 r2 = inb(ioaddr + CNFG_LAAR_584);
1009 r2 &= CNFG_LAAR_MASK;
1010 r2 <<= 3;
1011 r2 |= ((r1 & 0x38) >> 3);
1012
1013 tp->ram_base = ((__u32)r2 << 16) + (((__u32)(r1 & 0x7)) << 13);
1014
1015 /* Get 584 Irq */
1016 r1 = 0;
1017 r1 = inb(ioaddr + CNFG_ICR_583);
1018 r1 &= CNFG_ICR_IR2_584;
1019
1020 r2 = inb(ioaddr + CNFG_IRR_583);
1021 r2 &= CNFG_IRR_IRQS; /* 0x60 */
1022 r2 >>= 5;
1023
1024 switch(r2)
1025 {
1026 case 0:
1027 if(r1 == 0)
1028 dev->irq = 2;
1029 else
1030 dev->irq = 10;
1031 break;
1032
1033 case 1:
1034 if(r1 == 0)
1035 dev->irq = 3;
1036 else
1037 dev->irq = 11;
1038 break;
1039
1040 case 2:
1041 if(r1 == 0)
1042 {
1043 if(tp->extra_info & ALTERNATE_IRQ_BIT)
1044 dev->irq = 5;
1045 else
1046 dev->irq = 4;
1047 }
1048 else
1049 dev->irq = 15;
1050 break;
1051
1052 case 3:
1053 if(r1 == 0)
1054 dev->irq = 7;
1055 else
1056 dev->irq = 4;
1057 break;
1058
1059 default:
1060 printk(KERN_ERR "%s: No IRQ found aborting\n", dev->name);
1061 goto out2;
1062 }
1063
1064 if (request_irq(dev->irq, smctr_interrupt, IRQF_SHARED, smctr_name, dev))
1065 goto out2;
1066
1067 /* Get 58x Rom Base */
1068 r1 = inb(ioaddr + CNFG_BIO_583);
1069 r1 &= 0x3E;
1070 r1 |= 0x40;
1071
1072 tp->rom_base = (__u32)r1 << 13;
1073
1074 /* Get 58x Rom Size */
1075 r1 = inb(ioaddr + CNFG_BIO_583);
1076 r1 &= 0xC0;
1077 if(r1 == 0)
1078 tp->rom_size = ROM_DISABLE;
1079 else
1080 {
1081 r1 >>= 6;
1082 tp->rom_size = (__u16)CNFG_SIZE_8KB << r1;
1083 }
1084
1085 /* Get 58x Boot Status */
1086 r1 = inb(ioaddr + CNFG_GP2);
1087
1088 tp->mode_bits &= (~BOOT_STATUS_MASK);
1089
1090 if(r1 & CNFG_GP2_BOOT_NIBBLE)
1091 tp->mode_bits |= BOOT_TYPE_1;
1092
1093 /* Get 58x Zero Wait State */
1094 tp->mode_bits &= (~ZERO_WAIT_STATE_MASK);
1095
1096 r1 = inb(ioaddr + CNFG_IRR_583);
1097
1098 if(r1 & CNFG_IRR_ZWS)
1099 tp->mode_bits |= ZERO_WAIT_STATE_8_BIT;
1100
1101 if(tp->board_id & BOARD_16BIT)
1102 {
1103 r1 = inb(ioaddr + CNFG_LAAR_584);
1104
1105 if(r1 & CNFG_LAAR_ZWS)
1106 tp->mode_bits |= ZERO_WAIT_STATE_16_BIT;
1107 }
1108
1109 /* Get 584 Media Menu */
1110 tp->media_menu = 14;
1111 r1 = inb(ioaddr + CNFG_IRR_583);
1112
1113 tp->mode_bits &= 0xf8ff; /* (~CNFG_INTERFACE_TYPE_MASK) */
1114 if((tp->board_id & TOKEN_MEDIA) == TOKEN_MEDIA)
1115 {
1116 /* Get Advanced Features */
1117 if(((r1 & 0x6) >> 1) == 0x3)
1118 tp->media_type |= MEDIA_UTP_16;
1119 else
1120 {
1121 if(((r1 & 0x6) >> 1) == 0x2)
1122 tp->media_type |= MEDIA_STP_16;
1123 else
1124 {
1125 if(((r1 & 0x6) >> 1) == 0x1)
1126 tp->media_type |= MEDIA_UTP_4;
1127
1128 else
1129 tp->media_type |= MEDIA_STP_4;
1130 }
1131 }
1132
1133 r1 = inb(ioaddr + CNFG_GP2);
1134 if(!(r1 & 0x2) ) /* GP2_ETRD */
1135 tp->mode_bits |= EARLY_TOKEN_REL;
1136
1137 /* see if the chip is corrupted
1138 if(smctr_read_584_chksum(ioaddr))
1139 {
1140 printk(KERN_ERR "%s: EEPROM Checksum Failure\n", dev->name);
1141 free_irq(dev->irq, dev);
1142 goto out2;
1143 }
1144 */
1145 }
1146
1147 return 0;
1148
1149out2:
1150 release_region(ioaddr, SMCTR_IO_EXTENT);
1151out:
1152 return err;
1153}
1154
1155static int __init smctr_get_boardid(struct net_device *dev, int mca)
1156{
1157 struct net_local *tp = netdev_priv(dev);
1158 int ioaddr = dev->base_addr;
1159 __u8 r, r1, IdByte;
1160 __u16 BoardIdMask;
1161
1162 tp->board_id = BoardIdMask = 0;
1163
1164 if(mca)
1165 {
1166 BoardIdMask |= (MICROCHANNEL+INTERFACE_CHIP+TOKEN_MEDIA+PAGED_RAM+BOARD_16BIT);
1167 tp->extra_info |= (INTERFACE_594_CHIP+RAM_SIZE_64K+NIC_825_BIT+ALTERNATE_IRQ_BIT+SLOT_16BIT);
1168 }
1169 else
1170 {
1171 BoardIdMask|=(INTERFACE_CHIP+TOKEN_MEDIA+PAGED_RAM+BOARD_16BIT);
1172 tp->extra_info |= (INTERFACE_584_CHIP + RAM_SIZE_64K
1173 + NIC_825_BIT + ALTERNATE_IRQ_BIT);
1174 }
1175
1176 if(!mca)
1177 {
1178 r = inb(ioaddr + BID_REG_1);
1179 r &= 0x0c;
1180 outb(r, ioaddr + BID_REG_1);
1181 r = inb(ioaddr + BID_REG_1);
1182
1183 if(r & BID_SIXTEEN_BIT_BIT)
1184 {
1185 tp->extra_info |= SLOT_16BIT;
1186 tp->adapter_bus = BUS_ISA16_TYPE;
1187 }
1188 else
1189 tp->adapter_bus = BUS_ISA8_TYPE;
1190 }
1191 else
1192 tp->adapter_bus = BUS_MCA_TYPE;
1193
1194 /* Get Board Id Byte */
1195 IdByte = inb(ioaddr + BID_BOARD_ID_BYTE);
1196
1197 /* if Major version > 1.0 then
1198 * return;
1199 */
1200 if(IdByte & 0xF8)
1201 return -1;
1202
1203 r1 = inb(ioaddr + BID_REG_1);
1204 r1 &= BID_ICR_MASK;
1205 r1 |= BID_OTHER_BIT;
1206
1207 outb(r1, ioaddr + BID_REG_1);
1208 r1 = inb(ioaddr + BID_REG_3);
1209
1210 r1 &= BID_EAR_MASK;
1211 r1 |= BID_ENGR_PAGE;
1212
1213 outb(r1, ioaddr + BID_REG_3);
1214 r1 = inb(ioaddr + BID_REG_1);
1215 r1 &= BID_ICR_MASK;
1216 r1 |= (BID_RLA | BID_OTHER_BIT);
1217
1218 outb(r1, ioaddr + BID_REG_1);
1219
1220 r1 = inb(ioaddr + BID_REG_1);
1221 while(r1 & BID_RECALL_DONE_MASK)
1222 r1 = inb(ioaddr + BID_REG_1);
1223
1224 r = inb(ioaddr + BID_LAR_0 + BID_REG_6);
1225
1226 /* clear chip rev bits */
1227 tp->extra_info &= ~CHIP_REV_MASK;
1228 tp->extra_info |= ((r & BID_EEPROM_CHIP_REV_MASK) << 6);
1229
1230 r1 = inb(ioaddr + BID_REG_1);
1231 r1 &= BID_ICR_MASK;
1232 r1 |= BID_OTHER_BIT;
1233
1234 outb(r1, ioaddr + BID_REG_1);
1235 r1 = inb(ioaddr + BID_REG_3);
1236
1237 r1 &= BID_EAR_MASK;
1238 r1 |= BID_EA6;
1239
1240 outb(r1, ioaddr + BID_REG_3);
1241 r1 = inb(ioaddr + BID_REG_1);
1242
1243 r1 &= BID_ICR_MASK;
1244 r1 |= BID_RLA;
1245
1246 outb(r1, ioaddr + BID_REG_1);
1247 r1 = inb(ioaddr + BID_REG_1);
1248
1249 while(r1 & BID_RECALL_DONE_MASK)
1250 r1 = inb(ioaddr + BID_REG_1);
1251
1252 return BoardIdMask;
1253}
1254
1255static int smctr_get_group_address(struct net_device *dev)
1256{
1257 smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_GROUP_ADDR);
1258
1259 return smctr_wait_cmd(dev);
1260}
1261
1262static int smctr_get_functional_address(struct net_device *dev)
1263{
1264 smctr_issue_read_word_cmd(dev, RW_FUNCTIONAL_ADDR);
1265
1266 return smctr_wait_cmd(dev);
1267}
1268
1269/* Calculate number of Non-MAC receive BDB's and data buffers.
1270 * This function must simulate allocateing shared memory exactly
1271 * as the allocate_shared_memory function above.
1272 */
1273static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev)
1274{
1275 struct net_local *tp = netdev_priv(dev);
1276 unsigned int mem_used = 0;
1277
1278 /* Allocate System Control Blocks. */
1279 mem_used += sizeof(SCGBlock);
1280
1281 mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
1282 mem_used += sizeof(SCLBlock);
1283
1284 mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
1285 mem_used += sizeof(ACBlock) * tp->num_acbs;
1286
1287 mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
1288 mem_used += sizeof(ISBlock);
1289
1290 mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
1291 mem_used += MISC_DATA_SIZE;
1292
1293 /* Allocate transmit FCB's. */
1294 mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
1295
1296 mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[MAC_QUEUE];
1297 mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[NON_MAC_QUEUE];
1298 mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[BUG_QUEUE];
1299
1300 /* Allocate transmit BDBs. */
1301 mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[MAC_QUEUE];
1302 mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[NON_MAC_QUEUE];
1303 mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[BUG_QUEUE];
1304
1305 /* Allocate receive FCBs. */
1306 mem_used += sizeof(FCBlock) * tp->num_rx_fcbs[MAC_QUEUE];
1307 mem_used += sizeof(FCBlock) * tp->num_rx_fcbs[NON_MAC_QUEUE];
1308
1309 /* Allocate receive BDBs. */
1310 mem_used += sizeof(BDBlock) * tp->num_rx_bdbs[MAC_QUEUE];
1311
1312 /* Allocate MAC transmit buffers.
1313 * MAC transmit buffers don't have to be on an ODD Boundary.
1314 */
1315 mem_used += tp->tx_buff_size[MAC_QUEUE];
1316
1317 /* Allocate BUG transmit buffers. */
1318 mem_used += tp->tx_buff_size[BUG_QUEUE];
1319
1320 /* Allocate MAC receive data buffers.
1321 * MAC receive buffers don't have to be on a 256 byte boundary.
1322 */
1323 mem_used += RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE];
1324
1325 /* Allocate Non-MAC transmit buffers.
1326 * For maximum Netware performance, put Tx Buffers on
1327 * ODD Boundary,and then restore malloc to Even Boundrys.
1328 */
1329 mem_used += 1L;
1330 mem_used += tp->tx_buff_size[NON_MAC_QUEUE];
1331 mem_used += 1L;
1332
1333 /* CALCULATE NUMBER OF NON-MAC RX BDB'S
1334 * AND NON-MAC RX DATA BUFFERS
1335 *
1336 * Make sure the mem_used offset at this point is the
1337 * same as in allocate_shared memory or the following
1338 * boundary adjustment will be incorrect (i.e. not allocating
1339 * the non-mac receive buffers above cannot change the 256
1340 * byte offset).
1341 *
1342 * Since this cannot be guaranteed, adding the full 256 bytes
1343 * to the amount of shared memory used at this point will guaranteed
1344 * that the rx data buffers do not overflow shared memory.
1345 */
1346 mem_used += 0x100;
1347
1348 return (0xffff - mem_used) / (RX_DATA_BUFFER_SIZE + sizeof(BDBlock));
1349}
1350
1351static int smctr_get_physical_drop_number(struct net_device *dev)
1352{
1353 smctr_issue_read_word_cmd(dev, RW_PHYSICAL_DROP_NUMBER);
1354
1355 return smctr_wait_cmd(dev);
1356}
1357
1358static __u8 * smctr_get_rx_pointer(struct net_device *dev, short queue)
1359{
1360 struct net_local *tp = netdev_priv(dev);
1361 BDBlock *bdb;
1362
1363 bdb = (BDBlock *)((__u32)tp->ram_access
1364 + (__u32)(tp->rx_fcb_curr[queue]->trc_bdb_ptr));
1365
1366 tp->rx_fcb_curr[queue]->bdb_ptr = bdb;
1367
1368 return (__u8 *)bdb->data_block_ptr;
1369}
1370
1371static int smctr_get_station_id(struct net_device *dev)
1372{
1373 smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_MAC_ADDRESS);
1374
1375 return smctr_wait_cmd(dev);
1376}
1377
1378/*
1379 * Get the current statistics. This may be called with the card open
1380 * or closed.
1381 */
1382static struct net_device_stats *smctr_get_stats(struct net_device *dev)
1383{
1384 struct net_local *tp = netdev_priv(dev);
1385
1386 return (struct net_device_stats *)&tp->MacStat;
1387}
1388
1389static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
1390 __u16 bytes_count)
1391{
1392 struct net_local *tp = netdev_priv(dev);
1393 FCBlock *pFCB;
1394 BDBlock *pbdb;
1395 unsigned short alloc_size;
1396 unsigned short *temp;
1397
1398 if(smctr_debug > 20)
1399 printk(KERN_DEBUG "smctr_get_tx_fcb\n");
1400
1401 /* check if there is enough FCB blocks */
1402 if(tp->num_tx_fcbs_used[queue] >= tp->num_tx_fcbs[queue])
1403 return (FCBlock *)(-1L);
1404
1405 /* round off the input pkt size to the nearest even number */
1406 alloc_size = (bytes_count + 1) & 0xfffe;
1407
1408 /* check if enough mem */
1409 if((tp->tx_buff_used[queue] + alloc_size) > tp->tx_buff_size[queue])
1410 return (FCBlock *)(-1L);
1411
1412 /* check if past the end ;
1413 * if exactly enough mem to end of ring, alloc from front.
1414 * this avoids update of curr when curr = end
1415 */
1416 if(((unsigned long)(tp->tx_buff_curr[queue]) + alloc_size)
1417 >= (unsigned long)(tp->tx_buff_end[queue]))
1418 {
1419 /* check if enough memory from ring head */
1420 alloc_size = alloc_size +
1421 (__u16)((__u32)tp->tx_buff_end[queue]
1422 - (__u32)tp->tx_buff_curr[queue]);
1423
1424 if((tp->tx_buff_used[queue] + alloc_size)
1425 > tp->tx_buff_size[queue])
1426 {
1427 return (FCBlock *)(-1L);
1428 }
1429
1430 /* ring wrap */
1431 tp->tx_buff_curr[queue] = tp->tx_buff_head[queue];
1432 }
1433
1434 tp->tx_buff_used[queue] += alloc_size;
1435 tp->num_tx_fcbs_used[queue]++;
1436 tp->tx_fcb_curr[queue]->frame_length = bytes_count;
1437 tp->tx_fcb_curr[queue]->memory_alloc = alloc_size;
1438 temp = tp->tx_buff_curr[queue];
1439 tp->tx_buff_curr[queue]
1440 = (__u16 *)((__u32)temp + (__u32)((bytes_count + 1) & 0xfffe));
1441
1442 pbdb = tp->tx_fcb_curr[queue]->bdb_ptr;
1443 pbdb->buffer_length = bytes_count;
1444 pbdb->data_block_ptr = temp;
1445 pbdb->trc_data_block_ptr = TRC_POINTER(temp);
1446
1447 pFCB = tp->tx_fcb_curr[queue];
1448 tp->tx_fcb_curr[queue] = tp->tx_fcb_curr[queue]->next_ptr;
1449
1450 return pFCB;
1451}
1452
1453static int smctr_get_upstream_neighbor_addr(struct net_device *dev)
1454{
1455 smctr_issue_read_word_cmd(dev, RW_UPSTREAM_NEIGHBOR_ADDRESS);
1456
1457 return smctr_wait_cmd(dev);
1458}
1459
1460static int smctr_hardware_send_packet(struct net_device *dev,
1461 struct net_local *tp)
1462{
1463 struct tr_statistics *tstat = &tp->MacStat;
1464 struct sk_buff *skb;
1465 FCBlock *fcb;
1466
1467 if(smctr_debug > 10)
1468 printk(KERN_DEBUG"%s: smctr_hardware_send_packet\n", dev->name);
1469
1470 if(tp->status != OPEN)
1471 return -1;
1472
1473 if(tp->monitor_state_ready != 1)
1474 return -1;
1475
1476 for(;;)
1477 {
1478 /* Send first buffer from queue */
1479 skb = skb_dequeue(&tp->SendSkbQueue);
1480 if(skb == NULL)
1481 return -1;
1482
1483 tp->QueueSkb++;
1484
1485 if(skb->len < SMC_HEADER_SIZE || skb->len > tp->max_packet_size)
1486 return -1;
1487
1488 smctr_enable_16bit(dev);
1489 smctr_set_page(dev, (__u8 *)tp->ram_access);
1490
1491 if((fcb = smctr_get_tx_fcb(dev, NON_MAC_QUEUE, skb->len))
1492 == (FCBlock *)(-1L))
1493 {
1494 smctr_disable_16bit(dev);
1495 return -1;
1496 }
1497
1498 smctr_tx_move_frame(dev, skb,
1499 (__u8 *)fcb->bdb_ptr->data_block_ptr, skb->len);
1500
1501 smctr_set_page(dev, (__u8 *)fcb);
1502
1503 smctr_trc_send_packet(dev, fcb, NON_MAC_QUEUE);
1504 dev_kfree_skb(skb);
1505
1506 tstat->tx_packets++;
1507
1508 smctr_disable_16bit(dev);
1509 }
1510
1511 return 0;
1512}
1513
1514static int smctr_init_acbs(struct net_device *dev)
1515{
1516 struct net_local *tp = netdev_priv(dev);
1517 unsigned int i;
1518 ACBlock *acb;
1519
1520 if(smctr_debug > 10)
1521 printk(KERN_DEBUG "%s: smctr_init_acbs\n", dev->name);
1522
1523 acb = tp->acb_head;
1524 acb->cmd_done_status = (ACB_COMMAND_DONE | ACB_COMMAND_SUCCESSFUL);
1525 acb->cmd_info = ACB_CHAIN_END;
1526 acb->cmd = 0;
1527 acb->subcmd = 0;
1528 acb->data_offset_lo = 0;
1529 acb->data_offset_hi = 0;
1530 acb->next_ptr
1531 = (ACBlock *)(((char *)acb) + sizeof(ACBlock));
1532 acb->trc_next_ptr = TRC_POINTER(acb->next_ptr);
1533
1534 for(i = 1; i < tp->num_acbs; i++)
1535 {
1536 acb = acb->next_ptr;
1537 acb->cmd_done_status
1538 = (ACB_COMMAND_DONE | ACB_COMMAND_SUCCESSFUL);
1539 acb->cmd_info = ACB_CHAIN_END;
1540 acb->cmd = 0;
1541 acb->subcmd = 0;
1542 acb->data_offset_lo = 0;
1543 acb->data_offset_hi = 0;
1544 acb->next_ptr
1545 = (ACBlock *)(((char *)acb) + sizeof(ACBlock));
1546 acb->trc_next_ptr = TRC_POINTER(acb->next_ptr);
1547 }
1548
1549 acb->next_ptr = tp->acb_head;
1550 acb->trc_next_ptr = TRC_POINTER(tp->acb_head);
1551 tp->acb_next = tp->acb_head->next_ptr;
1552 tp->acb_curr = tp->acb_head->next_ptr;
1553 tp->num_acbs_used = 0;
1554
1555 return 0;
1556}
1557
1558static int smctr_init_adapter(struct net_device *dev)
1559{
1560 struct net_local *tp = netdev_priv(dev);
1561 int err;
1562
1563 if(smctr_debug > 10)
1564 printk(KERN_DEBUG "%s: smctr_init_adapter\n", dev->name);
1565
1566 tp->status = CLOSED;
1567 tp->page_offset_mask = (tp->ram_usable * 1024) - 1;
1568 skb_queue_head_init(&tp->SendSkbQueue);
1569 tp->QueueSkb = MAX_TX_QUEUE;
1570
1571 if(!(tp->group_address_0 & 0x0080))
1572 tp->group_address_0 |= 0x00C0;
1573
1574 if(!(tp->functional_address_0 & 0x00C0))
1575 tp->functional_address_0 |= 0x00C0;
1576
1577 tp->functional_address[0] &= 0xFF7F;
1578
1579 if(tp->authorized_function_classes == 0)
1580 tp->authorized_function_classes = 0x7FFF;
1581
1582 if(tp->authorized_access_priority == 0)
1583 tp->authorized_access_priority = 0x06;
1584
1585 smctr_disable_bic_int(dev);
1586 smctr_set_trc_reset(dev->base_addr);
1587
1588 smctr_enable_16bit(dev);
1589 smctr_set_page(dev, (__u8 *)tp->ram_access);
1590
1591 if(smctr_checksum_firmware(dev))
1592 {
1593 printk(KERN_ERR "%s: Previously loaded firmware is missing\n",dev->name);
1594 return -ENOENT;
1595 }
1596
1597 if((err = smctr_ram_memory_test(dev)))
1598 {
1599 printk(KERN_ERR "%s: RAM memory test failed.\n", dev->name);
1600 return -EIO;
1601 }
1602
1603 smctr_set_rx_look_ahead(dev);
1604 smctr_load_node_addr(dev);
1605
1606 /* Initialize adapter for Internal Self Test. */
1607 smctr_reset_adapter(dev);
1608 if((err = smctr_init_card_real(dev)))
1609 {
1610 printk(KERN_ERR "%s: Initialization of card failed (%d)\n",
1611 dev->name, err);
1612 return -EINVAL;
1613 }
1614
1615 /* This routine clobbers the TRC's internal registers. */
1616 if((err = smctr_internal_self_test(dev)))
1617 {
1618 printk(KERN_ERR "%s: Card failed internal self test (%d)\n",
1619 dev->name, err);
1620 return -EINVAL;
1621 }
1622
1623 /* Re-Initialize adapter's internal registers */
1624 smctr_reset_adapter(dev);
1625 if((err = smctr_init_card_real(dev)))
1626 {
1627 printk(KERN_ERR "%s: Initialization of card failed (%d)\n",
1628 dev->name, err);
1629 return -EINVAL;
1630 }
1631
1632 smctr_enable_bic_int(dev);
1633
1634 if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK)))
1635 return err;
1636
1637 smctr_disable_16bit(dev);
1638
1639 return 0;
1640}
1641
1642static int smctr_init_card_real(struct net_device *dev)
1643{
1644 struct net_local *tp = netdev_priv(dev);
1645 int err = 0;
1646
1647 if(smctr_debug > 10)
1648 printk(KERN_DEBUG "%s: smctr_init_card_real\n", dev->name);
1649
1650 tp->sh_mem_used = 0;
1651 tp->num_acbs = NUM_OF_ACBS;
1652
1653 /* Range Check Max Packet Size */
1654 if(tp->max_packet_size < 256)
1655 tp->max_packet_size = 256;
1656 else
1657 {
1658 if(tp->max_packet_size > NON_MAC_TX_BUFFER_MEMORY)
1659 tp->max_packet_size = NON_MAC_TX_BUFFER_MEMORY;
1660 }
1661
1662 tp->num_of_tx_buffs = (NON_MAC_TX_BUFFER_MEMORY
1663 / tp->max_packet_size) - 1;
1664
1665 if(tp->num_of_tx_buffs > NUM_NON_MAC_TX_FCBS)
1666 tp->num_of_tx_buffs = NUM_NON_MAC_TX_FCBS;
1667 else
1668 {
1669 if(tp->num_of_tx_buffs == 0)
1670 tp->num_of_tx_buffs = 1;
1671 }
1672
1673 /* Tx queue constants */
1674 tp->num_tx_fcbs [BUG_QUEUE] = NUM_BUG_TX_FCBS;
1675 tp->num_tx_bdbs [BUG_QUEUE] = NUM_BUG_TX_BDBS;
1676 tp->tx_buff_size [BUG_QUEUE] = BUG_TX_BUFFER_MEMORY;
1677 tp->tx_buff_used [BUG_QUEUE] = 0;
1678 tp->tx_queue_status [BUG_QUEUE] = NOT_TRANSMITING;
1679
1680 tp->num_tx_fcbs [MAC_QUEUE] = NUM_MAC_TX_FCBS;
1681 tp->num_tx_bdbs [MAC_QUEUE] = NUM_MAC_TX_BDBS;
1682 tp->tx_buff_size [MAC_QUEUE] = MAC_TX_BUFFER_MEMORY;
1683 tp->tx_buff_used [MAC_QUEUE] = 0;
1684 tp->tx_queue_status [MAC_QUEUE] = NOT_TRANSMITING;
1685
1686 tp->num_tx_fcbs [NON_MAC_QUEUE] = NUM_NON_MAC_TX_FCBS;
1687 tp->num_tx_bdbs [NON_MAC_QUEUE] = NUM_NON_MAC_TX_BDBS;
1688 tp->tx_buff_size [NON_MAC_QUEUE] = NON_MAC_TX_BUFFER_MEMORY;
1689 tp->tx_buff_used [NON_MAC_QUEUE] = 0;
1690 tp->tx_queue_status [NON_MAC_QUEUE] = NOT_TRANSMITING;
1691
1692 /* Receive Queue Constants */
1693 tp->num_rx_fcbs[MAC_QUEUE] = NUM_MAC_RX_FCBS;
1694 tp->num_rx_bdbs[MAC_QUEUE] = NUM_MAC_RX_BDBS;
1695
1696 if(tp->extra_info & CHIP_REV_MASK)
1697 tp->num_rx_fcbs[NON_MAC_QUEUE] = 78; /* 825 Rev. XE */
1698 else
1699 tp->num_rx_fcbs[NON_MAC_QUEUE] = 7; /* 825 Rev. XD */
1700
1701 tp->num_rx_bdbs[NON_MAC_QUEUE] = smctr_get_num_rx_bdbs(dev);
1702
1703 smctr_alloc_shared_memory(dev);
1704 smctr_init_shared_memory(dev);
1705
1706 if((err = smctr_issue_init_timers_cmd(dev)))
1707 return err;
1708
1709 if((err = smctr_issue_init_txrx_cmd(dev)))
1710 {
1711 printk(KERN_ERR "%s: Hardware failure\n", dev->name);
1712 return err;
1713 }
1714
1715 return 0;
1716}
1717
1718static int smctr_init_rx_bdbs(struct net_device *dev)
1719{
1720 struct net_local *tp = netdev_priv(dev);
1721 unsigned int i, j;
1722 BDBlock *bdb;
1723 __u16 *buf;
1724
1725 if(smctr_debug > 10)
1726 printk(KERN_DEBUG "%s: smctr_init_rx_bdbs\n", dev->name);
1727
1728 for(i = 0; i < NUM_RX_QS_USED; i++)
1729 {
1730 bdb = tp->rx_bdb_head[i];
1731 buf = tp->rx_buff_head[i];
1732 bdb->info = (BDB_CHAIN_END | BDB_NO_WARNING);
1733 bdb->buffer_length = RX_DATA_BUFFER_SIZE;
1734 bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock));
1735 bdb->data_block_ptr = buf;
1736 bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
1737
1738 if(i == NON_MAC_QUEUE)
1739 bdb->trc_data_block_ptr = RX_BUFF_TRC_POINTER(buf);
1740 else
1741 bdb->trc_data_block_ptr = TRC_POINTER(buf);
1742
1743 for(j = 1; j < tp->num_rx_bdbs[i]; j++)
1744 {
1745 bdb->next_ptr->back_ptr = bdb;
1746 bdb = bdb->next_ptr;
1747 buf = (__u16 *)((char *)buf + RX_DATA_BUFFER_SIZE);
1748 bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING);
1749 bdb->buffer_length = RX_DATA_BUFFER_SIZE;
1750 bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock));
1751 bdb->data_block_ptr = buf;
1752 bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
1753
1754 if(i == NON_MAC_QUEUE)
1755 bdb->trc_data_block_ptr = RX_BUFF_TRC_POINTER(buf);
1756 else
1757 bdb->trc_data_block_ptr = TRC_POINTER(buf);
1758 }
1759
1760 bdb->next_ptr = tp->rx_bdb_head[i];
1761 bdb->trc_next_ptr = TRC_POINTER(tp->rx_bdb_head[i]);
1762
1763 tp->rx_bdb_head[i]->back_ptr = bdb;
1764 tp->rx_bdb_curr[i] = tp->rx_bdb_head[i]->next_ptr;
1765 }
1766
1767 return 0;
1768}
1769
1770static int smctr_init_rx_fcbs(struct net_device *dev)
1771{
1772 struct net_local *tp = netdev_priv(dev);
1773 unsigned int i, j;
1774 FCBlock *fcb;
1775
1776 for(i = 0; i < NUM_RX_QS_USED; i++)
1777 {
1778 fcb = tp->rx_fcb_head[i];
1779 fcb->frame_status = 0;
1780 fcb->frame_length = 0;
1781 fcb->info = FCB_CHAIN_END;
1782 fcb->next_ptr = (FCBlock *)(((char*)fcb) + sizeof(FCBlock));
1783 if(i == NON_MAC_QUEUE)
1784 fcb->trc_next_ptr = RX_FCB_TRC_POINTER(fcb->next_ptr);
1785 else
1786 fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
1787
1788 for(j = 1; j < tp->num_rx_fcbs[i]; j++)
1789 {
1790 fcb->next_ptr->back_ptr = fcb;
1791 fcb = fcb->next_ptr;
1792 fcb->frame_status = 0;
1793 fcb->frame_length = 0;
1794 fcb->info = FCB_WARNING;
1795 fcb->next_ptr
1796 = (FCBlock *)(((char *)fcb) + sizeof(FCBlock));
1797
1798 if(i == NON_MAC_QUEUE)
1799 fcb->trc_next_ptr
1800 = RX_FCB_TRC_POINTER(fcb->next_ptr);
1801 else
1802 fcb->trc_next_ptr
1803 = TRC_POINTER(fcb->next_ptr);
1804 }
1805
1806 fcb->next_ptr = tp->rx_fcb_head[i];
1807
1808 if(i == NON_MAC_QUEUE)
1809 fcb->trc_next_ptr = RX_FCB_TRC_POINTER(fcb->next_ptr);
1810 else
1811 fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
1812
1813 tp->rx_fcb_head[i]->back_ptr = fcb;
1814 tp->rx_fcb_curr[i] = tp->rx_fcb_head[i]->next_ptr;
1815 }
1816
1817 return 0;
1818}
1819
1820static int smctr_init_shared_memory(struct net_device *dev)
1821{
1822 struct net_local *tp = netdev_priv(dev);
1823 unsigned int i;
1824 __u32 *iscpb;
1825
1826 if(smctr_debug > 10)
1827 printk(KERN_DEBUG "%s: smctr_init_shared_memory\n", dev->name);
1828
1829 smctr_set_page(dev, (__u8 *)(unsigned int)tp->iscpb_ptr);
1830
1831 /* Initialize Initial System Configuration Point. (ISCP) */
1832 iscpb = (__u32 *)PAGE_POINTER(&tp->iscpb_ptr->trc_scgb_ptr);
1833 *iscpb = (__u32)(SWAP_WORDS(TRC_POINTER(tp->scgb_ptr)));
1834
1835 smctr_set_page(dev, (__u8 *)tp->ram_access);
1836
1837 /* Initialize System Configuration Pointers. (SCP) */
1838 tp->scgb_ptr->config = (SCGB_ADDRESS_POINTER_FORMAT
1839 | SCGB_MULTI_WORD_CONTROL | SCGB_DATA_FORMAT
1840 | SCGB_BURST_LENGTH);
1841
1842 tp->scgb_ptr->trc_sclb_ptr = TRC_POINTER(tp->sclb_ptr);
1843 tp->scgb_ptr->trc_acb_ptr = TRC_POINTER(tp->acb_head);
1844 tp->scgb_ptr->trc_isb_ptr = TRC_POINTER(tp->isb_ptr);
1845 tp->scgb_ptr->isbsiz = (sizeof(ISBlock)) - 2;
1846
1847 /* Initialize System Control Block. (SCB) */
1848 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_NOP;
1849 tp->sclb_ptr->iack_code = 0;
1850 tp->sclb_ptr->resume_control = 0;
1851 tp->sclb_ptr->int_mask_control = 0;
1852 tp->sclb_ptr->int_mask_state = 0;
1853
1854 /* Initialize Interrupt Status Block. (ISB) */
1855 for(i = 0; i < NUM_OF_INTERRUPTS; i++)
1856 {
1857 tp->isb_ptr->IStatus[i].IType = 0xf0;
1858 tp->isb_ptr->IStatus[i].ISubtype = 0;
1859 }
1860
1861 tp->current_isb_index = 0;
1862
1863 /* Initialize Action Command Block. (ACB) */
1864 smctr_init_acbs(dev);
1865
1866 /* Initialize transmit FCB's and BDB's. */
1867 smctr_link_tx_fcbs_to_bdbs(dev);
1868 smctr_init_tx_bdbs(dev);
1869 smctr_init_tx_fcbs(dev);
1870
1871 /* Initialize receive FCB's and BDB's. */
1872 smctr_init_rx_bdbs(dev);
1873 smctr_init_rx_fcbs(dev);
1874
1875 return 0;
1876}
1877
1878static int smctr_init_tx_bdbs(struct net_device *dev)
1879{
1880 struct net_local *tp = netdev_priv(dev);
1881 unsigned int i, j;
1882 BDBlock *bdb;
1883
1884 for(i = 0; i < NUM_TX_QS_USED; i++)
1885 {
1886 bdb = tp->tx_bdb_head[i];
1887 bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING);
1888 bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock));
1889 bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
1890
1891 for(j = 1; j < tp->num_tx_bdbs[i]; j++)
1892 {
1893 bdb->next_ptr->back_ptr = bdb;
1894 bdb = bdb->next_ptr;
1895 bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING);
1896 bdb->next_ptr
1897 = (BDBlock *)(((char *)bdb) + sizeof( BDBlock)); bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
1898 }
1899
1900 bdb->next_ptr = tp->tx_bdb_head[i];
1901 bdb->trc_next_ptr = TRC_POINTER(tp->tx_bdb_head[i]);
1902 tp->tx_bdb_head[i]->back_ptr = bdb;
1903 }
1904
1905 return 0;
1906}
1907
1908static int smctr_init_tx_fcbs(struct net_device *dev)
1909{
1910 struct net_local *tp = netdev_priv(dev);
1911 unsigned int i, j;
1912 FCBlock *fcb;
1913
1914 for(i = 0; i < NUM_TX_QS_USED; i++)
1915 {
1916 fcb = tp->tx_fcb_head[i];
1917 fcb->frame_status = 0;
1918 fcb->frame_length = 0;
1919 fcb->info = FCB_CHAIN_END;
1920 fcb->next_ptr = (FCBlock *)(((char *)fcb) + sizeof(FCBlock));
1921 fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
1922
1923 for(j = 1; j < tp->num_tx_fcbs[i]; j++)
1924 {
1925 fcb->next_ptr->back_ptr = fcb;
1926 fcb = fcb->next_ptr;
1927 fcb->frame_status = 0;
1928 fcb->frame_length = 0;
1929 fcb->info = FCB_CHAIN_END;
1930 fcb->next_ptr
1931 = (FCBlock *)(((char *)fcb) + sizeof(FCBlock));
1932 fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
1933 }
1934
1935 fcb->next_ptr = tp->tx_fcb_head[i];
1936 fcb->trc_next_ptr = TRC_POINTER(tp->tx_fcb_head[i]);
1937
1938 tp->tx_fcb_head[i]->back_ptr = fcb;
1939 tp->tx_fcb_end[i] = tp->tx_fcb_head[i]->next_ptr;
1940 tp->tx_fcb_curr[i] = tp->tx_fcb_head[i]->next_ptr;
1941 tp->num_tx_fcbs_used[i] = 0;
1942 }
1943
1944 return 0;
1945}
1946
1947static int smctr_internal_self_test(struct net_device *dev)
1948{
1949 struct net_local *tp = netdev_priv(dev);
1950 int err;
1951
1952 if((err = smctr_issue_test_internal_rom_cmd(dev)))
1953 return err;
1954
1955 if((err = smctr_wait_cmd(dev)))
1956 return err;
1957
1958 if(tp->acb_head->cmd_done_status & 0xff)
1959 return -1;
1960
1961 if((err = smctr_issue_test_hic_cmd(dev)))
1962 return err;
1963
1964 if((err = smctr_wait_cmd(dev)))
1965 return err;
1966
1967 if(tp->acb_head->cmd_done_status & 0xff)
1968 return -1;
1969
1970 if((err = smctr_issue_test_mac_reg_cmd(dev)))
1971 return err;
1972
1973 if((err = smctr_wait_cmd(dev)))
1974 return err;
1975
1976 if(tp->acb_head->cmd_done_status & 0xff)
1977 return -1;
1978
1979 return 0;
1980}
1981
1982/*
1983 * The typical workload of the driver: Handle the network interface interrupts.
1984 */
1985static irqreturn_t smctr_interrupt(int irq, void *dev_id)
1986{
1987 struct net_device *dev = dev_id;
1988 struct net_local *tp;
1989 int ioaddr;
1990 __u16 interrupt_unmask_bits = 0, interrupt_ack_code = 0xff00;
1991 __u16 err1, err = NOT_MY_INTERRUPT;
1992 __u8 isb_type, isb_subtype;
1993 __u16 isb_index;
1994
1995 ioaddr = dev->base_addr;
1996 tp = netdev_priv(dev);
1997
1998 if(tp->status == NOT_INITIALIZED)
1999 return IRQ_NONE;
2000
2001 spin_lock(&tp->lock);
2002
2003 smctr_disable_bic_int(dev);
2004 smctr_enable_16bit(dev);
2005
2006 smctr_clear_int(dev);
2007
2008 /* First read the LSB */
2009 while((tp->isb_ptr->IStatus[tp->current_isb_index].IType & 0xf0) == 0)
2010 {
2011 isb_index = tp->current_isb_index;
2012 isb_type = tp->isb_ptr->IStatus[isb_index].IType;
2013 isb_subtype = tp->isb_ptr->IStatus[isb_index].ISubtype;
2014
2015 (tp->current_isb_index)++;
2016 if(tp->current_isb_index == NUM_OF_INTERRUPTS)
2017 tp->current_isb_index = 0;
2018
2019 if(isb_type >= 0x10)
2020 {
2021 smctr_disable_16bit(dev);
2022 spin_unlock(&tp->lock);
2023 return IRQ_HANDLED;
2024 }
2025
2026 err = HARDWARE_FAILED;
2027 interrupt_ack_code = isb_index;
2028 tp->isb_ptr->IStatus[isb_index].IType |= 0xf0;
2029
2030 interrupt_unmask_bits |= (1 << (__u16)isb_type);
2031
2032 switch(isb_type)
2033 {
2034 case ISB_IMC_MAC_TYPE_3:
2035 smctr_disable_16bit(dev);
2036
2037 switch(isb_subtype)
2038 {
2039 case 0:
2040 tp->monitor_state = MS_MONITOR_FSM_INACTIVE;
2041 break;
2042
2043 case 1:
2044 tp->monitor_state = MS_REPEAT_BEACON_STATE;
2045 break;
2046
2047 case 2:
2048 tp->monitor_state = MS_REPEAT_CLAIM_TOKEN_STATE;
2049 break;
2050
2051 case 3:
2052 tp->monitor_state = MS_TRANSMIT_CLAIM_TOKEN_STATE; break;
2053
2054 case 4:
2055 tp->monitor_state = MS_STANDBY_MONITOR_STATE;
2056 break;
2057
2058 case 5:
2059 tp->monitor_state = MS_TRANSMIT_BEACON_STATE;
2060 break;
2061
2062 case 6:
2063 tp->monitor_state = MS_ACTIVE_MONITOR_STATE;
2064 break;
2065
2066 case 7:
2067 tp->monitor_state = MS_TRANSMIT_RING_PURGE_STATE;
2068 break;
2069
2070 case 8: /* diagnostic state */
2071 break;
2072
2073 case 9:
2074 tp->monitor_state = MS_BEACON_TEST_STATE;
2075 if(smctr_lobe_media_test(dev))
2076 {
2077 tp->ring_status_flags = RING_STATUS_CHANGED;
2078 tp->ring_status = AUTO_REMOVAL_ERROR;
2079 smctr_ring_status_chg(dev);
2080 smctr_bypass_state(dev);
2081 }
2082 else
2083 smctr_issue_insert_cmd(dev);
2084 break;
2085
2086 /* case 0x0a-0xff, illegal states */
2087 default:
2088 break;
2089 }
2090
2091 tp->ring_status_flags = MONITOR_STATE_CHANGED;
2092 err = smctr_ring_status_chg(dev);
2093
2094 smctr_enable_16bit(dev);
2095 break;
2096
2097 /* Type 0x02 - MAC Error Counters Interrupt
2098 * One or more MAC Error Counter is half full
2099 * MAC Error Counters
2100 * Lost_FR_Error_Counter
2101 * RCV_Congestion_Counter
2102 * FR_copied_Error_Counter
2103 * FREQ_Error_Counter
2104 * Token_Error_Counter
2105 * Line_Error_Counter
2106 * Internal_Error_Count
2107 */
2108 case ISB_IMC_MAC_ERROR_COUNTERS:
2109 /* Read 802.5 Error Counters */
2110 err = smctr_issue_read_ring_status_cmd(dev);
2111 break;
2112
2113 /* Type 0x04 - MAC Type 2 Interrupt
2114 * HOST needs to enqueue MAC Frame for transmission
2115 * SubType Bit 15 - RQ_INIT_PDU( Request Initialization) * Changed from RQ_INIT_PDU to
2116 * TRC_Status_Changed_Indicate
2117 */
2118 case ISB_IMC_MAC_TYPE_2:
2119 err = smctr_issue_read_ring_status_cmd(dev);
2120 break;
2121
2122
2123 /* Type 0x05 - TX Frame Interrupt (FI). */
2124 case ISB_IMC_TX_FRAME:
2125 /* BUG QUEUE for TRC stuck receive BUG */
2126 if(isb_subtype & TX_PENDING_PRIORITY_2)
2127 {
2128 if((err = smctr_tx_complete(dev, BUG_QUEUE)) != SUCCESS)
2129 break;
2130 }
2131
2132 /* NON-MAC frames only */
2133 if(isb_subtype & TX_PENDING_PRIORITY_1)
2134 {
2135 if((err = smctr_tx_complete(dev, NON_MAC_QUEUE)) != SUCCESS)
2136 break;
2137 }
2138
2139 /* MAC frames only */
2140 if(isb_subtype & TX_PENDING_PRIORITY_0)
2141 err = smctr_tx_complete(dev, MAC_QUEUE); break;
2142
2143 /* Type 0x06 - TX END OF QUEUE (FE) */
2144 case ISB_IMC_END_OF_TX_QUEUE:
2145 /* BUG queue */
2146 if(isb_subtype & TX_PENDING_PRIORITY_2)
2147 {
2148 /* ok to clear Receive FIFO overrun
2149 * imask send_BUG now completes.
2150 */
2151 interrupt_unmask_bits |= 0x800;
2152
2153 tp->tx_queue_status[BUG_QUEUE] = NOT_TRANSMITING;
2154 if((err = smctr_tx_complete(dev, BUG_QUEUE)) != SUCCESS)
2155 break;
2156 if((err = smctr_restart_tx_chain(dev, BUG_QUEUE)) != SUCCESS)
2157 break;
2158 }
2159
2160 /* NON-MAC queue only */
2161 if(isb_subtype & TX_PENDING_PRIORITY_1)
2162 {
2163 tp->tx_queue_status[NON_MAC_QUEUE] = NOT_TRANSMITING;
2164 if((err = smctr_tx_complete(dev, NON_MAC_QUEUE)) != SUCCESS)
2165 break;
2166 if((err = smctr_restart_tx_chain(dev, NON_MAC_QUEUE)) != SUCCESS)
2167 break;
2168 }
2169
2170 /* MAC queue only */
2171 if(isb_subtype & TX_PENDING_PRIORITY_0)
2172 {
2173 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
2174 if((err = smctr_tx_complete(dev, MAC_QUEUE)) != SUCCESS)
2175 break;
2176
2177 err = smctr_restart_tx_chain(dev, MAC_QUEUE);
2178 }
2179 break;
2180
2181 /* Type 0x07 - NON-MAC RX Resource Interrupt
2182 * Subtype bit 12 - (BW) BDB warning
2183 * Subtype bit 13 - (FW) FCB warning
2184 * Subtype bit 14 - (BE) BDB End of chain
2185 * Subtype bit 15 - (FE) FCB End of chain
2186 */
2187 case ISB_IMC_NON_MAC_RX_RESOURCE:
2188 tp->rx_fifo_overrun_count = 0;
2189 tp->receive_queue_number = NON_MAC_QUEUE;
2190 err1 = smctr_rx_frame(dev);
2191
2192 if(isb_subtype & NON_MAC_RX_RESOURCE_FE)
2193 {
2194 if((err = smctr_issue_resume_rx_fcb_cmd( dev, NON_MAC_QUEUE)) != SUCCESS) break;
2195
2196 if(tp->ptr_rx_fcb_overruns)
2197 (*tp->ptr_rx_fcb_overruns)++;
2198 }
2199
2200 if(isb_subtype & NON_MAC_RX_RESOURCE_BE)
2201 {
2202 if((err = smctr_issue_resume_rx_bdb_cmd( dev, NON_MAC_QUEUE)) != SUCCESS) break;
2203
2204 if(tp->ptr_rx_bdb_overruns)
2205 (*tp->ptr_rx_bdb_overruns)++;
2206 }
2207 err = err1;
2208 break;
2209
2210 /* Type 0x08 - MAC RX Resource Interrupt
2211 * Subtype bit 12 - (BW) BDB warning
2212 * Subtype bit 13 - (FW) FCB warning
2213 * Subtype bit 14 - (BE) BDB End of chain
2214 * Subtype bit 15 - (FE) FCB End of chain
2215 */
2216 case ISB_IMC_MAC_RX_RESOURCE:
2217 tp->receive_queue_number = MAC_QUEUE;
2218 err1 = smctr_rx_frame(dev);
2219
2220 if(isb_subtype & MAC_RX_RESOURCE_FE)
2221 {
2222 if((err = smctr_issue_resume_rx_fcb_cmd( dev, MAC_QUEUE)) != SUCCESS)
2223 break;
2224
2225 if(tp->ptr_rx_fcb_overruns)
2226 (*tp->ptr_rx_fcb_overruns)++;
2227 }
2228
2229 if(isb_subtype & MAC_RX_RESOURCE_BE)
2230 {
2231 if((err = smctr_issue_resume_rx_bdb_cmd( dev, MAC_QUEUE)) != SUCCESS)
2232 break;
2233
2234 if(tp->ptr_rx_bdb_overruns)
2235 (*tp->ptr_rx_bdb_overruns)++;
2236 }
2237 err = err1;
2238 break;
2239
2240 /* Type 0x09 - NON_MAC RX Frame Interrupt */
2241 case ISB_IMC_NON_MAC_RX_FRAME:
2242 tp->rx_fifo_overrun_count = 0;
2243 tp->receive_queue_number = NON_MAC_QUEUE;
2244 err = smctr_rx_frame(dev);
2245 break;
2246
2247 /* Type 0x0A - MAC RX Frame Interrupt */
2248 case ISB_IMC_MAC_RX_FRAME:
2249 tp->receive_queue_number = MAC_QUEUE;
2250 err = smctr_rx_frame(dev);
2251 break;
2252
2253 /* Type 0x0B - TRC status
2254 * TRC has encountered an error condition
2255 * subtype bit 14 - transmit FIFO underrun
2256 * subtype bit 15 - receive FIFO overrun
2257 */
2258 case ISB_IMC_TRC_FIFO_STATUS:
2259 if(isb_subtype & TRC_FIFO_STATUS_TX_UNDERRUN)
2260 {
2261 if(tp->ptr_tx_fifo_underruns)
2262 (*tp->ptr_tx_fifo_underruns)++;
2263 }
2264
2265 if(isb_subtype & TRC_FIFO_STATUS_RX_OVERRUN)
2266 {
2267 /* update overrun stuck receive counter
2268 * if >= 3, has to clear it by sending
2269 * back to back frames. We pick
2270 * DAT(duplicate address MAC frame)
2271 */
2272 tp->rx_fifo_overrun_count++;
2273
2274 if(tp->rx_fifo_overrun_count >= 3)
2275 {
2276 tp->rx_fifo_overrun_count = 0;
2277
2278 /* delay clearing fifo overrun
2279 * imask till send_BUG tx
2280 * complete posted
2281 */
2282 interrupt_unmask_bits &= (~0x800);
2283 printk(KERN_CRIT "Jay please send bug\n");// smctr_send_bug(dev);
2284 }
2285
2286 if(tp->ptr_rx_fifo_overruns)
2287 (*tp->ptr_rx_fifo_overruns)++;
2288 }
2289
2290 err = SUCCESS;
2291 break;
2292
2293 /* Type 0x0C - Action Command Status Interrupt
2294 * Subtype bit 14 - CB end of command chain (CE)
2295 * Subtype bit 15 - CB command interrupt (CI)
2296 */
2297 case ISB_IMC_COMMAND_STATUS:
2298 err = SUCCESS;
2299 if(tp->acb_head->cmd == ACB_CMD_HIC_NOP)
2300 {
2301 printk(KERN_ERR "i1\n");
2302 smctr_disable_16bit(dev);
2303
2304 /* XXXXXXXXXXXXXXXXX */
2305 /* err = UM_Interrupt(dev); */
2306
2307 smctr_enable_16bit(dev);
2308 }
2309 else
2310 {
2311 if((tp->acb_head->cmd
2312 == ACB_CMD_READ_TRC_STATUS) &&
2313 (tp->acb_head->subcmd
2314 == RW_TRC_STATUS_BLOCK))
2315 {
2316 if(tp->ptr_bcn_type)
2317 {
2318 *(tp->ptr_bcn_type)
2319 = (__u32)((SBlock *)tp->misc_command_data)->BCN_Type;
2320 }
2321
2322 if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & ERROR_COUNTERS_CHANGED)
2323 {
2324 smctr_update_err_stats(dev);
2325 }
2326
2327 if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & TI_NDIS_RING_STATUS_CHANGED)
2328 {
2329 tp->ring_status
2330 = ((SBlock*)tp->misc_command_data)->TI_NDIS_Ring_Status;
2331 smctr_disable_16bit(dev);
2332 err = smctr_ring_status_chg(dev);
2333 smctr_enable_16bit(dev);
2334 if((tp->ring_status & REMOVE_RECEIVED) &&
2335 (tp->config_word0 & NO_AUTOREMOVE))
2336 {
2337 smctr_issue_remove_cmd(dev);
2338 }
2339
2340 if(err != SUCCESS)
2341 {
2342 tp->acb_pending = 0;
2343 break;
2344 }
2345 }
2346
2347 if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & UNA_CHANGED)
2348 {
2349 if(tp->ptr_una)
2350 {
2351 tp->ptr_una[0] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[0]);
2352 tp->ptr_una[1] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[1]);
2353 tp->ptr_una[2] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[2]);
2354 }
2355
2356 }
2357
2358 if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & READY_TO_SEND_RQ_INIT) {
2359 err = smctr_send_rq_init(dev);
2360 }
2361 }
2362 }
2363
2364 tp->acb_pending = 0;
2365 break;
2366
2367 /* Type 0x0D - MAC Type 1 interrupt
2368 * Subtype -- 00 FR_BCN received at S12
2369 * 01 FR_BCN received at S21
2370 * 02 FR_DAT(DA=MA, A<>0) received at S21
2371 * 03 TSM_EXP at S21
2372 * 04 FR_REMOVE received at S42
2373 * 05 TBR_EXP, BR_FLAG_SET at S42
2374 * 06 TBT_EXP at S53
2375 */
2376 case ISB_IMC_MAC_TYPE_1:
2377 if(isb_subtype > 8)
2378 {
2379 err = HARDWARE_FAILED;
2380 break;
2381 }
2382
2383 err = SUCCESS;
2384 switch(isb_subtype)
2385 {
2386 case 0:
2387 tp->join_state = JS_BYPASS_STATE;
2388 if(tp->status != CLOSED)
2389 {
2390 tp->status = CLOSED;
2391 err = smctr_status_chg(dev);
2392 }
2393 break;
2394
2395 case 1:
2396 tp->join_state = JS_LOBE_TEST_STATE;
2397 break;
2398
2399 case 2:
2400 tp->join_state = JS_DETECT_MONITOR_PRESENT_STATE;
2401 break;
2402
2403 case 3:
2404 tp->join_state = JS_AWAIT_NEW_MONITOR_STATE;
2405 break;
2406
2407 case 4:
2408 tp->join_state = JS_DUPLICATE_ADDRESS_TEST_STATE;
2409 break;
2410
2411 case 5:
2412 tp->join_state = JS_NEIGHBOR_NOTIFICATION_STATE;
2413 break;
2414
2415 case 6:
2416 tp->join_state = JS_REQUEST_INITIALIZATION_STATE;
2417 break;
2418
2419 case 7:
2420 tp->join_state = JS_JOIN_COMPLETE_STATE;
2421 tp->status = OPEN;
2422 err = smctr_status_chg(dev);
2423 break;
2424
2425 case 8:
2426 tp->join_state = JS_BYPASS_WAIT_STATE;
2427 break;
2428 }
2429 break ;
2430
2431 /* Type 0x0E - TRC Initialization Sequence Interrupt
2432 * Subtype -- 00-FF Initializatin sequence complete
2433 */
2434 case ISB_IMC_TRC_INTRNL_TST_STATUS:
2435 tp->status = INITIALIZED;
2436 smctr_disable_16bit(dev);
2437 err = smctr_status_chg(dev);
2438 smctr_enable_16bit(dev);
2439 break;
2440
2441 /* other interrupt types, illegal */
2442 default:
2443 break;
2444 }
2445
2446 if(err != SUCCESS)
2447 break;
2448 }
2449
2450 /* Checking the ack code instead of the unmask bits here is because :
2451 * while fixing the stuck receive, DAT frame are sent and mask off
2452 * FIFO overrun interrupt temporarily (interrupt_unmask_bits = 0)
2453 * but we still want to issue ack to ISB
2454 */
2455 if(!(interrupt_ack_code & 0xff00))
2456 smctr_issue_int_ack(dev, interrupt_ack_code, interrupt_unmask_bits);
2457
2458 smctr_disable_16bit(dev);
2459 smctr_enable_bic_int(dev);
2460 spin_unlock(&tp->lock);
2461
2462 return IRQ_HANDLED;
2463}
2464
2465static int smctr_issue_enable_int_cmd(struct net_device *dev,
2466 __u16 interrupt_enable_mask)
2467{
2468 struct net_local *tp = netdev_priv(dev);
2469 int err;
2470
2471 if((err = smctr_wait_while_cbusy(dev)))
2472 return err;
2473
2474 tp->sclb_ptr->int_mask_control = interrupt_enable_mask;
2475 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_CLEAR_INTERRUPT_MASK;
2476
2477 smctr_set_ctrl_attention(dev);
2478
2479 return 0;
2480}
2481
2482static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, __u16 ibits)
2483{
2484 struct net_local *tp = netdev_priv(dev);
2485
2486 if(smctr_wait_while_cbusy(dev))
2487 return -1;
2488
2489 tp->sclb_ptr->int_mask_control = ibits;
2490 tp->sclb_ptr->iack_code = iack_code << 1; /* use the offset from base */ tp->sclb_ptr->resume_control = 0;
2491 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_IACK_CODE_VALID | SCLB_CMD_CLEAR_INTERRUPT_MASK;
2492
2493 smctr_set_ctrl_attention(dev);
2494
2495 return 0;
2496}
2497
2498static int smctr_issue_init_timers_cmd(struct net_device *dev)
2499{
2500 struct net_local *tp = netdev_priv(dev);
2501 unsigned int i;
2502 int err;
2503 __u16 *pTimer_Struc = (__u16 *)tp->misc_command_data;
2504
2505 if((err = smctr_wait_while_cbusy(dev)))
2506 return err;
2507
2508 if((err = smctr_wait_cmd(dev)))
2509 return err;
2510
2511 tp->config_word0 = THDREN | DMA_TRIGGER | USETPT | NO_AUTOREMOVE;
2512 tp->config_word1 = 0;
2513
2514 if((tp->media_type == MEDIA_STP_16) ||
2515 (tp->media_type == MEDIA_UTP_16) ||
2516 (tp->media_type == MEDIA_STP_16_UTP_16))
2517 {
2518 tp->config_word0 |= FREQ_16MB_BIT;
2519 }
2520
2521 if(tp->mode_bits & EARLY_TOKEN_REL)
2522 tp->config_word0 |= ETREN;
2523
2524 if(tp->mode_bits & LOOPING_MODE_MASK)
2525 tp->config_word0 |= RX_OWN_BIT;
2526 else
2527 tp->config_word0 &= ~RX_OWN_BIT;
2528
2529 if(tp->receive_mask & PROMISCUOUS_MODE)
2530 tp->config_word0 |= PROMISCUOUS_BIT;
2531 else
2532 tp->config_word0 &= ~PROMISCUOUS_BIT;
2533
2534 if(tp->receive_mask & ACCEPT_ERR_PACKETS)
2535 tp->config_word0 |= SAVBAD_BIT;
2536 else
2537 tp->config_word0 &= ~SAVBAD_BIT;
2538
2539 if(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES)
2540 tp->config_word0 |= RXATMAC;
2541 else
2542 tp->config_word0 &= ~RXATMAC;
2543
2544 if(tp->receive_mask & ACCEPT_MULTI_PROM)
2545 tp->config_word1 |= MULTICAST_ADDRESS_BIT;
2546 else
2547 tp->config_word1 &= ~MULTICAST_ADDRESS_BIT;
2548
2549 if(tp->receive_mask & ACCEPT_SOURCE_ROUTING_SPANNING)
2550 tp->config_word1 |= SOURCE_ROUTING_SPANNING_BITS;
2551 else
2552 {
2553 if(tp->receive_mask & ACCEPT_SOURCE_ROUTING)
2554 tp->config_word1 |= SOURCE_ROUTING_EXPLORER_BIT;
2555 else
2556 tp->config_word1 &= ~SOURCE_ROUTING_SPANNING_BITS;
2557 }
2558
2559 if((tp->media_type == MEDIA_STP_16) ||
2560 (tp->media_type == MEDIA_UTP_16) ||
2561 (tp->media_type == MEDIA_STP_16_UTP_16))
2562 {
2563 tp->config_word1 |= INTERFRAME_SPACING_16;
2564 }
2565 else
2566 tp->config_word1 |= INTERFRAME_SPACING_4;
2567
2568 *pTimer_Struc++ = tp->config_word0;
2569 *pTimer_Struc++ = tp->config_word1;
2570
2571 if((tp->media_type == MEDIA_STP_4) ||
2572 (tp->media_type == MEDIA_UTP_4) ||
2573 (tp->media_type == MEDIA_STP_4_UTP_4))
2574 {
2575 *pTimer_Struc++ = 0x00FA; /* prescale */
2576 *pTimer_Struc++ = 0x2710; /* TPT_limit */
2577 *pTimer_Struc++ = 0x2710; /* TQP_limit */
2578 *pTimer_Struc++ = 0x0A28; /* TNT_limit */
2579 *pTimer_Struc++ = 0x3E80; /* TBT_limit */
2580 *pTimer_Struc++ = 0x3A98; /* TSM_limit */
2581 *pTimer_Struc++ = 0x1B58; /* TAM_limit */
2582 *pTimer_Struc++ = 0x00C8; /* TBR_limit */
2583 *pTimer_Struc++ = 0x07D0; /* TER_limit */
2584 *pTimer_Struc++ = 0x000A; /* TGT_limit */
2585 *pTimer_Struc++ = 0x1162; /* THT_limit */
2586 *pTimer_Struc++ = 0x07D0; /* TRR_limit */
2587 *pTimer_Struc++ = 0x1388; /* TVX_limit */
2588 *pTimer_Struc++ = 0x0000; /* reserved */
2589 }
2590 else
2591 {
2592 *pTimer_Struc++ = 0x03E8; /* prescale */
2593 *pTimer_Struc++ = 0x9C40; /* TPT_limit */
2594 *pTimer_Struc++ = 0x9C40; /* TQP_limit */
2595 *pTimer_Struc++ = 0x0A28; /* TNT_limit */
2596 *pTimer_Struc++ = 0x3E80; /* TBT_limit */
2597 *pTimer_Struc++ = 0x3A98; /* TSM_limit */
2598 *pTimer_Struc++ = 0x1B58; /* TAM_limit */
2599 *pTimer_Struc++ = 0x00C8; /* TBR_limit */
2600 *pTimer_Struc++ = 0x07D0; /* TER_limit */
2601 *pTimer_Struc++ = 0x000A; /* TGT_limit */
2602 *pTimer_Struc++ = 0x4588; /* THT_limit */
2603 *pTimer_Struc++ = 0x1F40; /* TRR_limit */
2604 *pTimer_Struc++ = 0x4E20; /* TVX_limit */
2605 *pTimer_Struc++ = 0x0000; /* reserved */
2606 }
2607
2608 /* Set node address. */
2609 *pTimer_Struc++ = dev->dev_addr[0] << 8
2610 | (dev->dev_addr[1] & 0xFF);
2611 *pTimer_Struc++ = dev->dev_addr[2] << 8
2612 | (dev->dev_addr[3] & 0xFF);
2613 *pTimer_Struc++ = dev->dev_addr[4] << 8
2614 | (dev->dev_addr[5] & 0xFF);
2615
2616 /* Set group address. */
2617 *pTimer_Struc++ = tp->group_address_0 << 8
2618 | tp->group_address_0 >> 8;
2619 *pTimer_Struc++ = tp->group_address[0] << 8
2620 | tp->group_address[0] >> 8;
2621 *pTimer_Struc++ = tp->group_address[1] << 8
2622 | tp->group_address[1] >> 8;
2623
2624 /* Set functional address. */
2625 *pTimer_Struc++ = tp->functional_address_0 << 8
2626 | tp->functional_address_0 >> 8;
2627 *pTimer_Struc++ = tp->functional_address[0] << 8
2628 | tp->functional_address[0] >> 8;
2629 *pTimer_Struc++ = tp->functional_address[1] << 8
2630 | tp->functional_address[1] >> 8;
2631
2632 /* Set Bit-Wise group address. */
2633 *pTimer_Struc++ = tp->bitwise_group_address[0] << 8
2634 | tp->bitwise_group_address[0] >> 8;
2635 *pTimer_Struc++ = tp->bitwise_group_address[1] << 8
2636 | tp->bitwise_group_address[1] >> 8;
2637
2638 /* Set ring number address. */
2639 *pTimer_Struc++ = tp->source_ring_number;
2640 *pTimer_Struc++ = tp->target_ring_number;
2641
2642 /* Physical drop number. */
2643 *pTimer_Struc++ = (unsigned short)0;
2644 *pTimer_Struc++ = (unsigned short)0;
2645
2646 /* Product instance ID. */
2647 for(i = 0; i < 9; i++)
2648 *pTimer_Struc++ = (unsigned short)0;
2649
2650 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TRC_TIMERS, 0);
2651
2652 return err;
2653}
2654
2655static int smctr_issue_init_txrx_cmd(struct net_device *dev)
2656{
2657 struct net_local *tp = netdev_priv(dev);
2658 unsigned int i;
2659 int err;
2660 void **txrx_ptrs = (void *)tp->misc_command_data;
2661
2662 if((err = smctr_wait_while_cbusy(dev)))
2663 return err;
2664
2665 if((err = smctr_wait_cmd(dev)))
2666 {
2667 printk(KERN_ERR "%s: Hardware failure\n", dev->name);
2668 return err;
2669 }
2670
2671 /* Initialize Transmit Queue Pointers that are used, to point to
2672 * a single FCB.
2673 */
2674 for(i = 0; i < NUM_TX_QS_USED; i++)
2675 *txrx_ptrs++ = (void *)TRC_POINTER(tp->tx_fcb_head[i]);
2676
2677 /* Initialize Transmit Queue Pointers that are NOT used to ZERO. */
2678 for(; i < MAX_TX_QS; i++)
2679 *txrx_ptrs++ = (void *)0;
2680
2681 /* Initialize Receive Queue Pointers (MAC and Non-MAC) that are
2682 * used, to point to a single FCB and a BDB chain of buffers.
2683 */
2684 for(i = 0; i < NUM_RX_QS_USED; i++)
2685 {
2686 *txrx_ptrs++ = (void *)TRC_POINTER(tp->rx_fcb_head[i]);
2687 *txrx_ptrs++ = (void *)TRC_POINTER(tp->rx_bdb_head[i]);
2688 }
2689
2690 /* Initialize Receive Queue Pointers that are NOT used to ZERO. */
2691 for(; i < MAX_RX_QS; i++)
2692 {
2693 *txrx_ptrs++ = (void *)0;
2694 *txrx_ptrs++ = (void *)0;
2695 }
2696
2697 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TX_RX, 0);
2698
2699 return err;
2700}
2701
2702static int smctr_issue_insert_cmd(struct net_device *dev)
2703{
2704 int err;
2705
2706 err = smctr_setup_single_cmd(dev, ACB_CMD_INSERT, ACB_SUB_CMD_NOP);
2707
2708 return err;
2709}
2710
2711static int smctr_issue_read_ring_status_cmd(struct net_device *dev)
2712{
2713 int err;
2714
2715 if((err = smctr_wait_while_cbusy(dev)))
2716 return err;
2717
2718 if((err = smctr_wait_cmd(dev)))
2719 return err;
2720
2721 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_READ_TRC_STATUS,
2722 RW_TRC_STATUS_BLOCK);
2723
2724 return err;
2725}
2726
2727static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt)
2728{
2729 int err;
2730
2731 if((err = smctr_wait_while_cbusy(dev)))
2732 return err;
2733
2734 if((err = smctr_wait_cmd(dev)))
2735 return err;
2736
2737 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_READ_VALUE,
2738 aword_cnt);
2739
2740 return err;
2741}
2742
2743static int smctr_issue_remove_cmd(struct net_device *dev)
2744{
2745 struct net_local *tp = netdev_priv(dev);
2746 int err;
2747
2748 if((err = smctr_wait_while_cbusy(dev)))
2749 return err;
2750
2751 tp->sclb_ptr->resume_control = 0;
2752 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_REMOVE;
2753
2754 smctr_set_ctrl_attention(dev);
2755
2756 return 0;
2757}
2758
2759static int smctr_issue_resume_acb_cmd(struct net_device *dev)
2760{
2761 struct net_local *tp = netdev_priv(dev);
2762 int err;
2763
2764 if((err = smctr_wait_while_cbusy(dev)))
2765 return err;
2766
2767 tp->sclb_ptr->resume_control = SCLB_RC_ACB;
2768 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
2769
2770 tp->acb_pending = 1;
2771
2772 smctr_set_ctrl_attention(dev);
2773
2774 return 0;
2775}
2776
2777static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue)
2778{
2779 struct net_local *tp = netdev_priv(dev);
2780 int err;
2781
2782 if((err = smctr_wait_while_cbusy(dev)))
2783 return err;
2784
2785 if(queue == MAC_QUEUE)
2786 tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_BDB;
2787 else
2788 tp->sclb_ptr->resume_control = SCLB_RC_RX_NON_MAC_BDB;
2789
2790 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
2791
2792 smctr_set_ctrl_attention(dev);
2793
2794 return 0;
2795}
2796
2797static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue)
2798{
2799 struct net_local *tp = netdev_priv(dev);
2800
2801 if(smctr_debug > 10)
2802 printk(KERN_DEBUG "%s: smctr_issue_resume_rx_fcb_cmd\n", dev->name);
2803
2804 if(smctr_wait_while_cbusy(dev))
2805 return -1;
2806
2807 if(queue == MAC_QUEUE)
2808 tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_FCB;
2809 else
2810 tp->sclb_ptr->resume_control = SCLB_RC_RX_NON_MAC_FCB;
2811
2812 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
2813
2814 smctr_set_ctrl_attention(dev);
2815
2816 return 0;
2817}
2818
2819static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue)
2820{
2821 struct net_local *tp = netdev_priv(dev);
2822
2823 if(smctr_debug > 10)
2824 printk(KERN_DEBUG "%s: smctr_issue_resume_tx_fcb_cmd\n", dev->name);
2825
2826 if(smctr_wait_while_cbusy(dev))
2827 return -1;
2828
2829 tp->sclb_ptr->resume_control = (SCLB_RC_TFCB0 << queue);
2830 tp->sclb_ptr->valid_command = SCLB_RESUME_CONTROL_VALID | SCLB_VALID;
2831
2832 smctr_set_ctrl_attention(dev);
2833
2834 return 0;
2835}
2836
2837static int smctr_issue_test_internal_rom_cmd(struct net_device *dev)
2838{
2839 int err;
2840
2841 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2842 TRC_INTERNAL_ROM_TEST);
2843
2844 return err;
2845}
2846
2847static int smctr_issue_test_hic_cmd(struct net_device *dev)
2848{
2849 int err;
2850
2851 err = smctr_setup_single_cmd(dev, ACB_CMD_HIC_TEST,
2852 TRC_HOST_INTERFACE_REG_TEST);
2853
2854 return err;
2855}
2856
2857static int smctr_issue_test_mac_reg_cmd(struct net_device *dev)
2858{
2859 int err;
2860
2861 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2862 TRC_MAC_REGISTERS_TEST);
2863
2864 return err;
2865}
2866
2867static int smctr_issue_trc_loopback_cmd(struct net_device *dev)
2868{
2869 int err;
2870
2871 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2872 TRC_INTERNAL_LOOPBACK);
2873
2874 return err;
2875}
2876
2877static int smctr_issue_tri_loopback_cmd(struct net_device *dev)
2878{
2879 int err;
2880
2881 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2882 TRC_TRI_LOOPBACK);
2883
2884 return err;
2885}
2886
2887static int smctr_issue_write_byte_cmd(struct net_device *dev,
2888 short aword_cnt, void *byte)
2889{
2890 struct net_local *tp = netdev_priv(dev);
2891 unsigned int iword, ibyte;
2892 int err;
2893
2894 if((err = smctr_wait_while_cbusy(dev)))
2895 return err;
2896
2897 if((err = smctr_wait_cmd(dev)))
2898 return err;
2899
2900 for(iword = 0, ibyte = 0; iword < (unsigned int)(aword_cnt & 0xff);
2901 iword++, ibyte += 2)
2902 {
2903 tp->misc_command_data[iword] = (*((__u8 *)byte + ibyte) << 8)
2904 | (*((__u8 *)byte + ibyte + 1));
2905 }
2906
2907 return smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE,
2908 aword_cnt);
2909}
2910
2911static int smctr_issue_write_word_cmd(struct net_device *dev,
2912 short aword_cnt, void *word)
2913{
2914 struct net_local *tp = netdev_priv(dev);
2915 unsigned int i, err;
2916
2917 if((err = smctr_wait_while_cbusy(dev)))
2918 return err;
2919
2920 if((err = smctr_wait_cmd(dev)))
2921 return err;
2922
2923 for(i = 0; i < (unsigned int)(aword_cnt & 0xff); i++)
2924 tp->misc_command_data[i] = *((__u16 *)word + i);
2925
2926 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE,
2927 aword_cnt);
2928
2929 return err;
2930}
2931
2932static int smctr_join_complete_state(struct net_device *dev)
2933{
2934 int err;
2935
2936 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE,
2937 JS_JOIN_COMPLETE_STATE);
2938
2939 return err;
2940}
2941
2942static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev)
2943{
2944 struct net_local *tp = netdev_priv(dev);
2945 unsigned int i, j;
2946 FCBlock *fcb;
2947 BDBlock *bdb;
2948
2949 for(i = 0; i < NUM_TX_QS_USED; i++)
2950 {
2951 fcb = tp->tx_fcb_head[i];
2952 bdb = tp->tx_bdb_head[i];
2953
2954 for(j = 0; j < tp->num_tx_fcbs[i]; j++)
2955 {
2956 fcb->bdb_ptr = bdb;
2957 fcb->trc_bdb_ptr = TRC_POINTER(bdb);
2958 fcb = (FCBlock *)((char *)fcb + sizeof(FCBlock));
2959 bdb = (BDBlock *)((char *)bdb + sizeof(BDBlock));
2960 }
2961 }
2962
2963 return 0;
2964}
2965
2966static int smctr_load_firmware(struct net_device *dev)
2967{
2968 struct net_local *tp = netdev_priv(dev);
2969 const struct firmware *fw;
2970 __u16 i, checksum = 0;
2971 int err = 0;
2972
2973 if(smctr_debug > 10)
2974 printk(KERN_DEBUG "%s: smctr_load_firmware\n", dev->name);
2975
2976 if (request_firmware(&fw, "tr_smctr.bin", &dev->dev)) {
2977 printk(KERN_ERR "%s: firmware not found\n", dev->name);
2978 return UCODE_NOT_PRESENT;
2979 }
2980
2981 tp->num_of_tx_buffs = 4;
2982 tp->mode_bits |= UMAC;
2983 tp->receive_mask = 0;
2984 tp->max_packet_size = 4177;
2985
2986 /* Can only upload the firmware once per adapter reset. */
2987 if (tp->microcode_version != 0) {
2988 err = (UCODE_PRESENT);
2989 goto out;
2990 }
2991
2992 /* Verify the firmware exists and is there in the right amount. */
2993 if (!fw->data ||
2994 (*(fw->data + UCODE_VERSION_OFFSET) < UCODE_VERSION))
2995 {
2996 err = (UCODE_NOT_PRESENT);
2997 goto out;
2998 }
2999
3000 /* UCODE_SIZE is not included in Checksum. */
3001 for(i = 0; i < *((__u16 *)(fw->data + UCODE_SIZE_OFFSET)); i += 2)
3002 checksum += *((__u16 *)(fw->data + 2 + i));
3003 if (checksum) {
3004 err = (UCODE_NOT_PRESENT);
3005 goto out;
3006 }
3007
3008 /* At this point we have a valid firmware image, lets kick it on up. */
3009 smctr_enable_adapter_ram(dev);
3010 smctr_enable_16bit(dev);
3011 smctr_set_page(dev, (__u8 *)tp->ram_access);
3012
3013 if((smctr_checksum_firmware(dev)) ||
3014 (*(fw->data + UCODE_VERSION_OFFSET) > tp->microcode_version))
3015 {
3016 smctr_enable_adapter_ctrl_store(dev);
3017
3018 /* Zero out ram space for firmware. */
3019 for(i = 0; i < CS_RAM_SIZE; i += 2)
3020 *((__u16 *)(tp->ram_access + i)) = 0;
3021
3022 smctr_decode_firmware(dev, fw);
3023
3024 tp->microcode_version = *(fw->data + UCODE_VERSION_OFFSET); *((__u16 *)(tp->ram_access + CS_RAM_VERSION_OFFSET))
3025 = (tp->microcode_version << 8);
3026 *((__u16 *)(tp->ram_access + CS_RAM_CHECKSUM_OFFSET))
3027 = ~(tp->microcode_version << 8) + 1;
3028
3029 smctr_disable_adapter_ctrl_store(dev);
3030
3031 if(smctr_checksum_firmware(dev))
3032 err = HARDWARE_FAILED;
3033 }
3034 else
3035 err = UCODE_PRESENT;
3036
3037 smctr_disable_16bit(dev);
3038 out:
3039 release_firmware(fw);
3040 return err;
3041}
3042
3043static int smctr_load_node_addr(struct net_device *dev)
3044{
3045 int ioaddr = dev->base_addr;
3046 unsigned int i;
3047 __u8 r;
3048
3049 for(i = 0; i < 6; i++)
3050 {
3051 r = inb(ioaddr + LAR0 + i);
3052 dev->dev_addr[i] = (char)r;
3053 }
3054 dev->addr_len = 6;
3055
3056 return 0;
3057}
3058
3059/* Lobe Media Test.
3060 * During the transmission of the initial 1500 lobe media MAC frames,
3061 * the phase lock loop in the 805 chip may lock, and then un-lock, causing
3062 * the 825 to go into a PURGE state. When performing a PURGE, the MCT
3063 * microcode will not transmit any frames given to it by the host, and
3064 * will consequently cause a timeout.
3065 *
3066 * NOTE 1: If the monitor_state is MS_BEACON_TEST_STATE, all transmit
3067 * queues other than the one used for the lobe_media_test should be
3068 * disabled.!?
3069 *
3070 * NOTE 2: If the monitor_state is MS_BEACON_TEST_STATE and the receive_mask
3071 * has any multi-cast or promiscuous bits set, the receive_mask needs to
3072 * be changed to clear the multi-cast or promiscuous mode bits, the lobe_test
3073 * run, and then the receive mask set back to its original value if the test
3074 * is successful.
3075 */
3076static int smctr_lobe_media_test(struct net_device *dev)
3077{
3078 struct net_local *tp = netdev_priv(dev);
3079 unsigned int i, perror = 0;
3080 unsigned short saved_rcv_mask;
3081
3082 if(smctr_debug > 10)
3083 printk(KERN_DEBUG "%s: smctr_lobe_media_test\n", dev->name);
3084
3085 /* Clear receive mask for lobe test. */
3086 saved_rcv_mask = tp->receive_mask;
3087 tp->receive_mask = 0;
3088
3089 smctr_chg_rx_mask(dev);
3090
3091 /* Setup the lobe media test. */
3092 smctr_lobe_media_test_cmd(dev);
3093 if(smctr_wait_cmd(dev))
3094 goto err;
3095
3096 /* Tx lobe media test frames. */
3097 for(i = 0; i < 1500; ++i)
3098 {
3099 if(smctr_send_lobe_media_test(dev))
3100 {
3101 if(perror)
3102 goto err;
3103 else
3104 {
3105 perror = 1;
3106 if(smctr_lobe_media_test_cmd(dev))
3107 goto err;
3108 }
3109 }
3110 }
3111
3112 if(smctr_send_dat(dev))
3113 {
3114 if(smctr_send_dat(dev))
3115 goto err;
3116 }
3117
3118 /* Check if any frames received during test. */
3119 if((tp->rx_fcb_curr[MAC_QUEUE]->frame_status) ||
3120 (tp->rx_fcb_curr[NON_MAC_QUEUE]->frame_status))
3121 goto err;
3122
3123 /* Set receive mask to "Promisc" mode. */
3124 tp->receive_mask = saved_rcv_mask;
3125
3126 smctr_chg_rx_mask(dev);
3127
3128 return 0;
3129err:
3130 smctr_reset_adapter(dev);
3131 tp->status = CLOSED;
3132 return LOBE_MEDIA_TEST_FAILED;
3133}
3134
3135static int smctr_lobe_media_test_cmd(struct net_device *dev)
3136{
3137 struct net_local *tp = netdev_priv(dev);
3138 int err;
3139
3140 if(smctr_debug > 10)
3141 printk(KERN_DEBUG "%s: smctr_lobe_media_test_cmd\n", dev->name);
3142
3143 /* Change to lobe media test state. */
3144 if(tp->monitor_state != MS_BEACON_TEST_STATE)
3145 {
3146 smctr_lobe_media_test_state(dev);
3147 if(smctr_wait_cmd(dev))
3148 {
3149 printk(KERN_ERR "Lobe Failed test state\n");
3150 return LOBE_MEDIA_TEST_FAILED;
3151 }
3152 }
3153
3154 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
3155 TRC_LOBE_MEDIA_TEST);
3156
3157 return err;
3158}
3159
3160static int smctr_lobe_media_test_state(struct net_device *dev)
3161{
3162 int err;
3163
3164 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE,
3165 JS_LOBE_TEST_STATE);
3166
3167 return err;
3168}
3169
3170static int smctr_make_8025_hdr(struct net_device *dev,
3171 MAC_HEADER *rmf, MAC_HEADER *tmf, __u16 ac_fc)
3172{
3173 tmf->ac = MSB(ac_fc); /* msb is access control */
3174 tmf->fc = LSB(ac_fc); /* lsb is frame control */
3175
3176 tmf->sa[0] = dev->dev_addr[0];
3177 tmf->sa[1] = dev->dev_addr[1];
3178 tmf->sa[2] = dev->dev_addr[2];
3179 tmf->sa[3] = dev->dev_addr[3];
3180 tmf->sa[4] = dev->dev_addr[4];
3181 tmf->sa[5] = dev->dev_addr[5];
3182
3183 switch(tmf->vc)
3184 {
3185 /* Send RQ_INIT to RPS */
3186 case RQ_INIT:
3187 tmf->da[0] = 0xc0;
3188 tmf->da[1] = 0x00;
3189 tmf->da[2] = 0x00;
3190 tmf->da[3] = 0x00;
3191 tmf->da[4] = 0x00;
3192 tmf->da[5] = 0x02;
3193 break;
3194
3195 /* Send RPT_TX_FORWARD to CRS */
3196 case RPT_TX_FORWARD:
3197 tmf->da[0] = 0xc0;
3198 tmf->da[1] = 0x00;
3199 tmf->da[2] = 0x00;
3200 tmf->da[3] = 0x00;
3201 tmf->da[4] = 0x00;
3202 tmf->da[5] = 0x10;
3203 break;
3204
3205 /* Everything else goes to sender */
3206 default:
3207 tmf->da[0] = rmf->sa[0];
3208 tmf->da[1] = rmf->sa[1];
3209 tmf->da[2] = rmf->sa[2];
3210 tmf->da[3] = rmf->sa[3];
3211 tmf->da[4] = rmf->sa[4];
3212 tmf->da[5] = rmf->sa[5];
3213 break;
3214 }
3215
3216 return 0;
3217}
3218
3219static int smctr_make_access_pri(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3220{
3221 struct net_local *tp = netdev_priv(dev);
3222
3223 tsv->svi = AUTHORIZED_ACCESS_PRIORITY;
3224 tsv->svl = S_AUTHORIZED_ACCESS_PRIORITY;
3225
3226 tsv->svv[0] = MSB(tp->authorized_access_priority);
3227 tsv->svv[1] = LSB(tp->authorized_access_priority);
3228
3229 return 0;
3230}
3231
3232static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3233{
3234 tsv->svi = ADDRESS_MODIFER;
3235 tsv->svl = S_ADDRESS_MODIFER;
3236
3237 tsv->svv[0] = 0;
3238 tsv->svv[1] = 0;
3239
3240 return 0;
3241}
3242
3243static int smctr_make_auth_funct_class(struct net_device *dev,
3244 MAC_SUB_VECTOR *tsv)
3245{
3246 struct net_local *tp = netdev_priv(dev);
3247
3248 tsv->svi = AUTHORIZED_FUNCTION_CLASS;
3249 tsv->svl = S_AUTHORIZED_FUNCTION_CLASS;
3250
3251 tsv->svv[0] = MSB(tp->authorized_function_classes);
3252 tsv->svv[1] = LSB(tp->authorized_function_classes);
3253
3254 return 0;
3255}
3256
3257static int smctr_make_corr(struct net_device *dev,
3258 MAC_SUB_VECTOR *tsv, __u16 correlator)
3259{
3260 tsv->svi = CORRELATOR;
3261 tsv->svl = S_CORRELATOR;
3262
3263 tsv->svv[0] = MSB(correlator);
3264 tsv->svv[1] = LSB(correlator);
3265
3266 return 0;
3267}
3268
3269static int smctr_make_funct_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3270{
3271 struct net_local *tp = netdev_priv(dev);
3272
3273 smctr_get_functional_address(dev);
3274
3275 tsv->svi = FUNCTIONAL_ADDRESS;
3276 tsv->svl = S_FUNCTIONAL_ADDRESS;
3277
3278 tsv->svv[0] = MSB(tp->misc_command_data[0]);
3279 tsv->svv[1] = LSB(tp->misc_command_data[0]);
3280
3281 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3282 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3283
3284 return 0;
3285}
3286
3287static int smctr_make_group_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3288{
3289 struct net_local *tp = netdev_priv(dev);
3290
3291 smctr_get_group_address(dev);
3292
3293 tsv->svi = GROUP_ADDRESS;
3294 tsv->svl = S_GROUP_ADDRESS;
3295
3296 tsv->svv[0] = MSB(tp->misc_command_data[0]);
3297 tsv->svv[1] = LSB(tp->misc_command_data[0]);
3298
3299 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3300 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3301
3302 /* Set Group Address Sub-vector to all zeros if only the
3303 * Group Address/Functional Address Indicator is set.
3304 */
3305 if(tsv->svv[0] == 0x80 && tsv->svv[1] == 0x00 &&
3306 tsv->svv[2] == 0x00 && tsv->svv[3] == 0x00)
3307 tsv->svv[0] = 0x00;
3308
3309 return 0;
3310}
3311
3312static int smctr_make_phy_drop_num(struct net_device *dev,
3313 MAC_SUB_VECTOR *tsv)
3314{
3315 struct net_local *tp = netdev_priv(dev);
3316
3317 smctr_get_physical_drop_number(dev);
3318
3319 tsv->svi = PHYSICAL_DROP;
3320 tsv->svl = S_PHYSICAL_DROP;
3321
3322 tsv->svv[0] = MSB(tp->misc_command_data[0]);
3323 tsv->svv[1] = LSB(tp->misc_command_data[0]);
3324
3325 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3326 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3327
3328 return 0;
3329}
3330
3331static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3332{
3333 int i;
3334
3335 tsv->svi = PRODUCT_INSTANCE_ID;
3336 tsv->svl = S_PRODUCT_INSTANCE_ID;
3337
3338 for(i = 0; i < 18; i++)
3339 tsv->svv[i] = 0xF0;
3340
3341 return 0;
3342}
3343
3344static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3345{
3346 struct net_local *tp = netdev_priv(dev);
3347
3348 smctr_get_station_id(dev);
3349
3350 tsv->svi = STATION_IDENTIFER;
3351 tsv->svl = S_STATION_IDENTIFER;
3352
3353 tsv->svv[0] = MSB(tp->misc_command_data[0]);
3354 tsv->svv[1] = LSB(tp->misc_command_data[0]);
3355
3356 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3357 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3358
3359 tsv->svv[4] = MSB(tp->misc_command_data[2]);
3360 tsv->svv[5] = LSB(tp->misc_command_data[2]);
3361
3362 return 0;
3363}
3364
3365static int smctr_make_ring_station_status(struct net_device *dev,
3366 MAC_SUB_VECTOR * tsv)
3367{
3368 tsv->svi = RING_STATION_STATUS;
3369 tsv->svl = S_RING_STATION_STATUS;
3370
3371 tsv->svv[0] = 0;
3372 tsv->svv[1] = 0;
3373 tsv->svv[2] = 0;
3374 tsv->svv[3] = 0;
3375 tsv->svv[4] = 0;
3376 tsv->svv[5] = 0;
3377
3378 return 0;
3379}
3380
3381static int smctr_make_ring_station_version(struct net_device *dev,
3382 MAC_SUB_VECTOR *tsv)
3383{
3384 struct net_local *tp = netdev_priv(dev);
3385
3386 tsv->svi = RING_STATION_VERSION_NUMBER;
3387 tsv->svl = S_RING_STATION_VERSION_NUMBER;
3388
3389 tsv->svv[0] = 0xe2; /* EBCDIC - S */
3390 tsv->svv[1] = 0xd4; /* EBCDIC - M */
3391 tsv->svv[2] = 0xc3; /* EBCDIC - C */
3392 tsv->svv[3] = 0x40; /* EBCDIC - */
3393 tsv->svv[4] = 0xe5; /* EBCDIC - V */
3394 tsv->svv[5] = 0xF0 + (tp->microcode_version >> 4);
3395 tsv->svv[6] = 0xF0 + (tp->microcode_version & 0x0f);
3396 tsv->svv[7] = 0x40; /* EBCDIC - */
3397 tsv->svv[8] = 0xe7; /* EBCDIC - X */
3398
3399 if(tp->extra_info & CHIP_REV_MASK)
3400 tsv->svv[9] = 0xc5; /* EBCDIC - E */
3401 else
3402 tsv->svv[9] = 0xc4; /* EBCDIC - D */
3403
3404 return 0;
3405}
3406
3407static int smctr_make_tx_status_code(struct net_device *dev,
3408 MAC_SUB_VECTOR *tsv, __u16 tx_fstatus)
3409{
3410 tsv->svi = TRANSMIT_STATUS_CODE;
3411 tsv->svl = S_TRANSMIT_STATUS_CODE;
3412
3413 tsv->svv[0] = ((tx_fstatus & 0x0100 >> 6) | IBM_PASS_SOURCE_ADDR);
3414
3415 /* Stripped frame status of Transmitted Frame */
3416 tsv->svv[1] = tx_fstatus & 0xff;
3417
3418 return 0;
3419}
3420
3421static int smctr_make_upstream_neighbor_addr(struct net_device *dev,
3422 MAC_SUB_VECTOR *tsv)
3423{
3424 struct net_local *tp = netdev_priv(dev);
3425
3426 smctr_get_upstream_neighbor_addr(dev);
3427
3428 tsv->svi = UPSTREAM_NEIGHBOR_ADDRESS;
3429 tsv->svl = S_UPSTREAM_NEIGHBOR_ADDRESS;
3430
3431 tsv->svv[0] = MSB(tp->misc_command_data[0]);
3432 tsv->svv[1] = LSB(tp->misc_command_data[0]);
3433
3434 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3435 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3436
3437 tsv->svv[4] = MSB(tp->misc_command_data[2]);
3438 tsv->svv[5] = LSB(tp->misc_command_data[2]);
3439
3440 return 0;
3441}
3442
3443static int smctr_make_wrap_data(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3444{
3445 tsv->svi = WRAP_DATA;
3446 tsv->svl = S_WRAP_DATA;
3447
3448 return 0;
3449}
3450
3451/*
3452 * Open/initialize the board. This is called sometime after
3453 * booting when the 'ifconfig' program is run.
3454 *
3455 * This routine should set everything up anew at each open, even
3456 * registers that "should" only need to be set once at boot, so that
3457 * there is non-reboot way to recover if something goes wrong.
3458 */
3459static int smctr_open(struct net_device *dev)
3460{
3461 int err;
3462
3463 if(smctr_debug > 10)
3464 printk(KERN_DEBUG "%s: smctr_open\n", dev->name);
3465
3466 err = smctr_init_adapter(dev);
3467 if(err < 0)
3468 return err;
3469
3470 return err;
3471}
3472
3473/* Interrupt driven open of Token card. */
3474static int smctr_open_tr(struct net_device *dev)
3475{
3476 struct net_local *tp = netdev_priv(dev);
3477 unsigned long flags;
3478 int err;
3479
3480 if(smctr_debug > 10)
3481 printk(KERN_DEBUG "%s: smctr_open_tr\n", dev->name);
3482
3483 /* Now we can actually open the adapter. */
3484 if(tp->status == OPEN)
3485 return 0;
3486 if(tp->status != INITIALIZED)
3487 return -1;
3488
3489 /* FIXME: it would work a lot better if we masked the irq sources
3490 on the card here, then we could skip the locking and poll nicely */
3491 spin_lock_irqsave(&tp->lock, flags);
3492
3493 smctr_set_page(dev, (__u8 *)tp->ram_access);
3494
3495 if((err = smctr_issue_resume_rx_fcb_cmd(dev, (short)MAC_QUEUE)))
3496 goto out;
3497
3498 if((err = smctr_issue_resume_rx_bdb_cmd(dev, (short)MAC_QUEUE)))
3499 goto out;
3500
3501 if((err = smctr_issue_resume_rx_fcb_cmd(dev, (short)NON_MAC_QUEUE)))
3502 goto out;
3503
3504 if((err = smctr_issue_resume_rx_bdb_cmd(dev, (short)NON_MAC_QUEUE)))
3505 goto out;
3506
3507 tp->status = CLOSED;
3508
3509 /* Insert into the Ring or Enter Loopback Mode. */
3510 if((tp->mode_bits & LOOPING_MODE_MASK) == LOOPBACK_MODE_1)
3511 {
3512 tp->status = CLOSED;
3513
3514 if(!(err = smctr_issue_trc_loopback_cmd(dev)))
3515 {
3516 if(!(err = smctr_wait_cmd(dev)))
3517 tp->status = OPEN;
3518 }
3519
3520 smctr_status_chg(dev);
3521 }
3522 else
3523 {
3524 if((tp->mode_bits & LOOPING_MODE_MASK) == LOOPBACK_MODE_2)
3525 {
3526 tp->status = CLOSED;
3527 if(!(err = smctr_issue_tri_loopback_cmd(dev)))
3528 {
3529 if(!(err = smctr_wait_cmd(dev)))
3530 tp->status = OPEN;
3531 }
3532
3533 smctr_status_chg(dev);
3534 }
3535 else
3536 {
3537 if((tp->mode_bits & LOOPING_MODE_MASK)
3538 == LOOPBACK_MODE_3)
3539 {
3540 tp->status = CLOSED;
3541 if(!(err = smctr_lobe_media_test_cmd(dev)))
3542 {
3543 if(!(err = smctr_wait_cmd(dev)))
3544 tp->status = OPEN;
3545 }
3546 smctr_status_chg(dev);
3547 }
3548 else
3549 {
3550 if(!(err = smctr_lobe_media_test(dev)))
3551 err = smctr_issue_insert_cmd(dev);
3552 else
3553 {
3554 if(err == LOBE_MEDIA_TEST_FAILED)
3555 printk(KERN_WARNING "%s: Lobe Media Test Failure - Check cable?\n", dev->name);
3556 }
3557 }
3558 }
3559 }
3560
3561out:
3562 spin_unlock_irqrestore(&tp->lock, flags);
3563
3564 return err;
3565}
3566
3567/* Check for a network adapter of this type,
3568 * and return device structure if one exists.
3569 */
3570struct net_device __init *smctr_probe(int unit)
3571{
3572 struct net_device *dev = alloc_trdev(sizeof(struct net_local));
3573 static const unsigned ports[] = {
3574 0x200, 0x220, 0x240, 0x260, 0x280, 0x2A0, 0x2C0, 0x2E0, 0x300,
3575 0x320, 0x340, 0x360, 0x380, 0
3576 };
3577 const unsigned *port;
3578 int err = 0;
3579
3580 if (!dev)
3581 return ERR_PTR(-ENOMEM);
3582
3583 if (unit >= 0) {
3584 sprintf(dev->name, "tr%d", unit);
3585 netdev_boot_setup_check(dev);
3586 }
3587
3588 if (dev->base_addr > 0x1ff) /* Check a single specified location. */
3589 err = smctr_probe1(dev, dev->base_addr);
3590 else if(dev->base_addr != 0) /* Don't probe at all. */
3591 err =-ENXIO;
3592 else {
3593 for (port = ports; *port; port++) {
3594 err = smctr_probe1(dev, *port);
3595 if (!err)
3596 break;
3597 }
3598 }
3599 if (err)
3600 goto out;
3601 err = register_netdev(dev);
3602 if (err)
3603 goto out1;
3604 return dev;
3605out1:
3606#ifdef CONFIG_MCA_LEGACY
3607 { struct net_local *tp = netdev_priv(dev);
3608 if (tp->slot_num)
3609 mca_mark_as_unused(tp->slot_num);
3610 }
3611#endif
3612 release_region(dev->base_addr, SMCTR_IO_EXTENT);
3613 free_irq(dev->irq, dev);
3614out:
3615 free_netdev(dev);
3616 return ERR_PTR(err);
3617}
3618
3619static const struct net_device_ops smctr_netdev_ops = {
3620 .ndo_open = smctr_open,
3621 .ndo_stop = smctr_close,
3622 .ndo_start_xmit = smctr_send_packet,
3623 .ndo_tx_timeout = smctr_timeout,
3624 .ndo_get_stats = smctr_get_stats,
3625 .ndo_set_rx_mode = smctr_set_multicast_list,
3626};
3627
3628static int __init smctr_probe1(struct net_device *dev, int ioaddr)
3629{
3630 static unsigned version_printed;
3631 struct net_local *tp = netdev_priv(dev);
3632 int err;
3633 __u32 *ram;
3634
3635 if(smctr_debug && version_printed++ == 0)
3636 printk(version);
3637
3638 spin_lock_init(&tp->lock);
3639 dev->base_addr = ioaddr;
3640
3641 /* Actually detect an adapter now. */
3642 err = smctr_chk_isa(dev);
3643 if(err < 0)
3644 {
3645 if ((err = smctr_chk_mca(dev)) < 0) {
3646 err = -ENODEV;
3647 goto out;
3648 }
3649 }
3650
3651 tp = netdev_priv(dev);
3652 dev->mem_start = tp->ram_base;
3653 dev->mem_end = dev->mem_start + 0x10000;
3654 ram = (__u32 *)phys_to_virt(dev->mem_start);
3655 tp->ram_access = *(__u32 *)&ram;
3656 tp->status = NOT_INITIALIZED;
3657
3658 err = smctr_load_firmware(dev);
3659 if(err != UCODE_PRESENT && err != SUCCESS)
3660 {
3661 printk(KERN_ERR "%s: Firmware load failed (%d)\n", dev->name, err);
3662 err = -EIO;
3663 goto out;
3664 }
3665
3666 /* Allow user to specify ring speed on module insert. */
3667 if(ringspeed == 4)
3668 tp->media_type = MEDIA_UTP_4;
3669 else
3670 tp->media_type = MEDIA_UTP_16;
3671
3672 printk(KERN_INFO "%s: %s %s at Io %#4x, Irq %d, Rom %#4x, Ram %#4x.\n",
3673 dev->name, smctr_name, smctr_model,
3674 (unsigned int)dev->base_addr,
3675 dev->irq, tp->rom_base, tp->ram_base);
3676
3677 dev->netdev_ops = &smctr_netdev_ops;
3678 dev->watchdog_timeo = HZ;
3679 return 0;
3680
3681out:
3682 return err;
3683}
3684
3685static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3686 struct net_device *dev, __u16 rx_status)
3687{
3688 struct net_local *tp = netdev_priv(dev);
3689 struct sk_buff *skb;
3690 __u16 rcode, correlator;
3691 int err = 0;
3692 __u8 xframe = 1;
3693
3694 rmf->vl = SWAP_BYTES(rmf->vl);
3695 if(rx_status & FCB_RX_STATUS_DA_MATCHED)
3696 {
3697 switch(rmf->vc)
3698 {
3699 /* Received MAC Frames Processed by RS. */
3700 case INIT:
3701 if((rcode = smctr_rcv_init(dev, rmf, &correlator)) == HARDWARE_FAILED)
3702 {
3703 return rcode;
3704 }
3705
3706 if((err = smctr_send_rsp(dev, rmf, rcode,
3707 correlator)))
3708 {
3709 return err;
3710 }
3711 break;
3712
3713 case CHG_PARM:
3714 if((rcode = smctr_rcv_chg_param(dev, rmf,
3715 &correlator)) ==HARDWARE_FAILED)
3716 {
3717 return rcode;
3718 }
3719
3720 if((err = smctr_send_rsp(dev, rmf, rcode,
3721 correlator)))
3722 {
3723 return err;
3724 }
3725 break;
3726
3727 case RQ_ADDR:
3728 if((rcode = smctr_rcv_rq_addr_state_attch(dev,
3729 rmf, &correlator)) != POSITIVE_ACK)
3730 {
3731 if(rcode == HARDWARE_FAILED)
3732 return rcode;
3733 else
3734 return smctr_send_rsp(dev, rmf,
3735 rcode, correlator);
3736 }
3737
3738 if((err = smctr_send_rpt_addr(dev, rmf,
3739 correlator)))
3740 {
3741 return err;
3742 }
3743 break;
3744
3745 case RQ_ATTCH:
3746 if((rcode = smctr_rcv_rq_addr_state_attch(dev,
3747 rmf, &correlator)) != POSITIVE_ACK)
3748 {
3749 if(rcode == HARDWARE_FAILED)
3750 return rcode;
3751 else
3752 return smctr_send_rsp(dev, rmf,
3753 rcode,
3754 correlator);
3755 }
3756
3757 if((err = smctr_send_rpt_attch(dev, rmf,
3758 correlator)))
3759 {
3760 return err;
3761 }
3762 break;
3763
3764 case RQ_STATE:
3765 if((rcode = smctr_rcv_rq_addr_state_attch(dev,
3766 rmf, &correlator)) != POSITIVE_ACK)
3767 {
3768 if(rcode == HARDWARE_FAILED)
3769 return rcode;
3770 else
3771 return smctr_send_rsp(dev, rmf,
3772 rcode,
3773 correlator);
3774 }
3775
3776 if((err = smctr_send_rpt_state(dev, rmf,
3777 correlator)))
3778 {
3779 return err;
3780 }
3781 break;
3782
3783 case TX_FORWARD: {
3784 __u16 uninitialized_var(tx_fstatus);
3785
3786 if((rcode = smctr_rcv_tx_forward(dev, rmf))
3787 != POSITIVE_ACK)
3788 {
3789 if(rcode == HARDWARE_FAILED)
3790 return rcode;
3791 else
3792 return smctr_send_rsp(dev, rmf,
3793 rcode,
3794 correlator);
3795 }
3796
3797 if((err = smctr_send_tx_forward(dev, rmf,
3798 &tx_fstatus)) == HARDWARE_FAILED)
3799 {
3800 return err;
3801 }
3802
3803 if(err == A_FRAME_WAS_FORWARDED)
3804 {
3805 if((err = smctr_send_rpt_tx_forward(dev,
3806 rmf, tx_fstatus))
3807 == HARDWARE_FAILED)
3808 {
3809 return err;
3810 }
3811 }
3812 break;
3813 }
3814
3815 /* Received MAC Frames Processed by CRS/REM/RPS. */
3816 case RSP:
3817 case RQ_INIT:
3818 case RPT_NEW_MON:
3819 case RPT_SUA_CHG:
3820 case RPT_ACTIVE_ERR:
3821 case RPT_NN_INCMP:
3822 case RPT_ERROR:
3823 case RPT_ATTCH:
3824 case RPT_STATE:
3825 case RPT_ADDR:
3826 break;
3827
3828 /* Rcvd Att. MAC Frame (if RXATMAC set) or UNKNOWN */
3829 default:
3830 xframe = 0;
3831 if(!(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES))
3832 {
3833 rcode = smctr_rcv_unknown(dev, rmf,
3834 &correlator);
3835 if((err = smctr_send_rsp(dev, rmf,rcode,
3836 correlator)))
3837 {
3838 return err;
3839 }
3840 }
3841
3842 break;
3843 }
3844 }
3845 else
3846 {
3847 /* 1. DA doesn't match (Promiscuous Mode).
3848 * 2. Parse for Extended MAC Frame Type.
3849 */
3850 switch(rmf->vc)
3851 {
3852 case RSP:
3853 case INIT:
3854 case RQ_INIT:
3855 case RQ_ADDR:
3856 case RQ_ATTCH:
3857 case RQ_STATE:
3858 case CHG_PARM:
3859 case RPT_ADDR:
3860 case RPT_ERROR:
3861 case RPT_ATTCH:
3862 case RPT_STATE:
3863 case RPT_NEW_MON:
3864 case RPT_SUA_CHG:
3865 case RPT_NN_INCMP:
3866 case RPT_ACTIVE_ERR:
3867 break;
3868
3869 default:
3870 xframe = 0;
3871 break;
3872 }
3873 }
3874
3875 /* NOTE: UNKNOWN MAC frames will NOT be passed up unless
3876 * ACCEPT_ATT_MAC_FRAMES is set.
3877 */
3878 if(((tp->receive_mask & ACCEPT_ATT_MAC_FRAMES) &&
3879 (xframe == (__u8)0)) ||
3880 ((tp->receive_mask & ACCEPT_EXT_MAC_FRAMES) &&
3881 (xframe == (__u8)1)))
3882 {
3883 rmf->vl = SWAP_BYTES(rmf->vl);
3884
3885 if (!(skb = dev_alloc_skb(size)))
3886 return -ENOMEM;
3887 skb->len = size;
3888
3889 /* Slide data into a sleek skb. */
3890 skb_put(skb, skb->len);
3891 skb_copy_to_linear_data(skb, rmf, skb->len);
3892
3893 /* Update Counters */
3894 tp->MacStat.rx_packets++;
3895 tp->MacStat.rx_bytes += skb->len;
3896
3897 /* Kick the packet on up. */
3898 skb->protocol = tr_type_trans(skb, dev);
3899 netif_rx(skb);
3900 err = 0;
3901 }
3902
3903 return err;
3904}
3905
3906/* Adapter RAM test. Incremental word ODD boundary data test. */
3907static int smctr_ram_memory_test(struct net_device *dev)
3908{
3909 struct net_local *tp = netdev_priv(dev);
3910 __u16 page, pages_of_ram, start_pattern = 0, word_pattern = 0,
3911 word_read = 0, err_word = 0, err_pattern = 0;
3912 unsigned int err_offset;
3913 __u32 j, pword;
3914 __u8 err = 0;
3915
3916 if(smctr_debug > 10)
3917 printk(KERN_DEBUG "%s: smctr_ram_memory_test\n", dev->name);
3918
3919 start_pattern = 0x0001;
3920 pages_of_ram = tp->ram_size / tp->ram_usable;
3921 pword = tp->ram_access;
3922
3923 /* Incremental word ODD boundary test. */
3924 for(page = 0; (page < pages_of_ram) && (~err);
3925 page++, start_pattern += 0x8000)
3926 {
3927 smctr_set_page(dev, (__u8 *)(tp->ram_access
3928 + (page * tp->ram_usable * 1024) + 1));
3929 word_pattern = start_pattern;
3930
3931 for(j = 1; j < (__u32)(tp->ram_usable * 1024) - 1; j += 2)
3932 *(__u16 *)(pword + j) = word_pattern++;
3933
3934 word_pattern = start_pattern;
3935
3936 for(j = 1; j < (__u32)(tp->ram_usable * 1024) - 1 && (~err);
3937 j += 2, word_pattern++)
3938 {
3939 word_read = *(__u16 *)(pword + j);
3940 if(word_read != word_pattern)
3941 {
3942 err = (__u8)1;
3943 err_offset = j;
3944 err_word = word_read;
3945 err_pattern = word_pattern;
3946 return RAM_TEST_FAILED;
3947 }
3948 }
3949 }
3950
3951 /* Zero out memory. */
3952 for(page = 0; page < pages_of_ram && (~err); page++)
3953 {
3954 smctr_set_page(dev, (__u8 *)(tp->ram_access
3955 + (page * tp->ram_usable * 1024)));
3956 word_pattern = 0;
3957
3958 for(j = 0; j < (__u32)tp->ram_usable * 1024; j +=2)
3959 *(__u16 *)(pword + j) = word_pattern;
3960
3961 for(j =0; j < (__u32)tp->ram_usable * 1024 && (~err); j += 2)
3962 {
3963 word_read = *(__u16 *)(pword + j);
3964 if(word_read != word_pattern)
3965 {
3966 err = (__u8)1;
3967 err_offset = j;
3968 err_word = word_read;
3969 err_pattern = word_pattern;
3970 return RAM_TEST_FAILED;
3971 }
3972 }
3973 }
3974
3975 smctr_set_page(dev, (__u8 *)tp->ram_access);
3976
3977 return 0;
3978}
3979
3980static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
3981 __u16 *correlator)
3982{
3983 MAC_SUB_VECTOR *rsv;
3984 signed short vlen;
3985 __u16 rcode = POSITIVE_ACK;
3986 unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
3987
3988 /* This Frame can only come from a CRS */
3989 if((rmf->dc_sc & SC_MASK) != SC_CRS)
3990 return E_INAPPROPRIATE_SOURCE_CLASS;
3991
3992 /* Remove MVID Length from total length. */
3993 vlen = (signed short)rmf->vl - 4;
3994
3995 /* Point to First SVID */
3996 rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
3997
3998 /* Search for Appropriate SVID's. */
3999 while((vlen > 0) && (rcode == POSITIVE_ACK))
4000 {
4001 switch(rsv->svi)
4002 {
4003 case CORRELATOR:
4004 svectors |= F_CORRELATOR;
4005 rcode = smctr_set_corr(dev, rsv, correlator);
4006 break;
4007
4008 case LOCAL_RING_NUMBER:
4009 svectors |= F_LOCAL_RING_NUMBER;
4010 rcode = smctr_set_local_ring_num(dev, rsv);
4011 break;
4012
4013 case ASSIGN_PHYSICAL_DROP:
4014 svectors |= F_ASSIGN_PHYSICAL_DROP;
4015 rcode = smctr_set_phy_drop(dev, rsv);
4016 break;
4017
4018 case ERROR_TIMER_VALUE:
4019 svectors |= F_ERROR_TIMER_VALUE;
4020 rcode = smctr_set_error_timer_value(dev, rsv);
4021 break;
4022
4023 case AUTHORIZED_FUNCTION_CLASS:
4024 svectors |= F_AUTHORIZED_FUNCTION_CLASS;
4025 rcode = smctr_set_auth_funct_class(dev, rsv);
4026 break;
4027
4028 case AUTHORIZED_ACCESS_PRIORITY:
4029 svectors |= F_AUTHORIZED_ACCESS_PRIORITY;
4030 rcode = smctr_set_auth_access_pri(dev, rsv);
4031 break;
4032
4033 default:
4034 rcode = E_SUB_VECTOR_UNKNOWN;
4035 break;
4036 }
4037
4038 /* Let Sender Know if SUM of SV length's is
4039 * larger then length in MVID length field
4040 */
4041 if((vlen -= rsv->svl) < 0)
4042 rcode = E_VECTOR_LENGTH_ERROR;
4043
4044 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4045 }
4046
4047 if(rcode == POSITIVE_ACK)
4048 {
4049 /* Let Sender Know if MVID length field
4050 * is larger then SUM of SV length's
4051 */
4052 if(vlen != 0)
4053 rcode = E_VECTOR_LENGTH_ERROR;
4054 else
4055 {
4056 /* Let Sender Know if Expected SVID Missing */
4057 if((svectors & R_CHG_PARM) ^ R_CHG_PARM)
4058 rcode = E_MISSING_SUB_VECTOR;
4059 }
4060 }
4061
4062 return rcode;
4063}
4064
4065static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
4066 __u16 *correlator)
4067{
4068 MAC_SUB_VECTOR *rsv;
4069 signed short vlen;
4070 __u16 rcode = POSITIVE_ACK;
4071 unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
4072
4073 /* This Frame can only come from a RPS */
4074 if((rmf->dc_sc & SC_MASK) != SC_RPS)
4075 return E_INAPPROPRIATE_SOURCE_CLASS;
4076
4077 /* Remove MVID Length from total length. */
4078 vlen = (signed short)rmf->vl - 4;
4079
4080 /* Point to First SVID */
4081 rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
4082
4083 /* Search for Appropriate SVID's */
4084 while((vlen > 0) && (rcode == POSITIVE_ACK))
4085 {
4086 switch(rsv->svi)
4087 {
4088 case CORRELATOR:
4089 svectors |= F_CORRELATOR;
4090 rcode = smctr_set_corr(dev, rsv, correlator);
4091 break;
4092
4093 case LOCAL_RING_NUMBER:
4094 svectors |= F_LOCAL_RING_NUMBER;
4095 rcode = smctr_set_local_ring_num(dev, rsv);
4096 break;
4097
4098 case ASSIGN_PHYSICAL_DROP:
4099 svectors |= F_ASSIGN_PHYSICAL_DROP;
4100 rcode = smctr_set_phy_drop(dev, rsv);
4101 break;
4102
4103 case ERROR_TIMER_VALUE:
4104 svectors |= F_ERROR_TIMER_VALUE;
4105 rcode = smctr_set_error_timer_value(dev, rsv);
4106 break;
4107
4108 default:
4109 rcode = E_SUB_VECTOR_UNKNOWN;
4110 break;
4111 }
4112
4113 /* Let Sender Know if SUM of SV length's is
4114 * larger then length in MVID length field
4115 */
4116 if((vlen -= rsv->svl) < 0)
4117 rcode = E_VECTOR_LENGTH_ERROR;
4118
4119 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4120 }
4121
4122 if(rcode == POSITIVE_ACK)
4123 {
4124 /* Let Sender Know if MVID length field
4125 * is larger then SUM of SV length's
4126 */
4127 if(vlen != 0)
4128 rcode = E_VECTOR_LENGTH_ERROR;
4129 else
4130 {
4131 /* Let Sender Know if Expected SV Missing */
4132 if((svectors & R_INIT) ^ R_INIT)
4133 rcode = E_MISSING_SUB_VECTOR;
4134 }
4135 }
4136
4137 return rcode;
4138}
4139
4140static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf)
4141{
4142 MAC_SUB_VECTOR *rsv;
4143 signed short vlen;
4144 __u16 rcode = POSITIVE_ACK;
4145 unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
4146
4147 /* This Frame can only come from a CRS */
4148 if((rmf->dc_sc & SC_MASK) != SC_CRS)
4149 return E_INAPPROPRIATE_SOURCE_CLASS;
4150
4151 /* Remove MVID Length from total length */
4152 vlen = (signed short)rmf->vl - 4;
4153
4154 /* Point to First SVID */
4155 rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
4156
4157 /* Search for Appropriate SVID's */
4158 while((vlen > 0) && (rcode == POSITIVE_ACK))
4159 {
4160 switch(rsv->svi)
4161 {
4162 case FRAME_FORWARD:
4163 svectors |= F_FRAME_FORWARD;
4164 rcode = smctr_set_frame_forward(dev, rsv,
4165 rmf->dc_sc);
4166 break;
4167
4168 default:
4169 rcode = E_SUB_VECTOR_UNKNOWN;
4170 break;
4171 }
4172
4173 /* Let Sender Know if SUM of SV length's is
4174 * larger then length in MVID length field
4175 */
4176 if((vlen -= rsv->svl) < 0)
4177 rcode = E_VECTOR_LENGTH_ERROR;
4178
4179 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4180 }
4181
4182 if(rcode == POSITIVE_ACK)
4183 {
4184 /* Let Sender Know if MVID length field
4185 * is larger then SUM of SV length's
4186 */
4187 if(vlen != 0)
4188 rcode = E_VECTOR_LENGTH_ERROR;
4189 else
4190 {
4191 /* Let Sender Know if Expected SV Missing */
4192 if((svectors & R_TX_FORWARD) ^ R_TX_FORWARD)
4193 rcode = E_MISSING_SUB_VECTOR;
4194 }
4195 }
4196
4197 return rcode;
4198}
4199
4200static int smctr_rcv_rq_addr_state_attch(struct net_device *dev,
4201 MAC_HEADER *rmf, __u16 *correlator)
4202{
4203 MAC_SUB_VECTOR *rsv;
4204 signed short vlen;
4205 __u16 rcode = POSITIVE_ACK;
4206 unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
4207
4208 /* Remove MVID Length from total length */
4209 vlen = (signed short)rmf->vl - 4;
4210
4211 /* Point to First SVID */
4212 rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
4213
4214 /* Search for Appropriate SVID's */
4215 while((vlen > 0) && (rcode == POSITIVE_ACK))
4216 {
4217 switch(rsv->svi)
4218 {
4219 case CORRELATOR:
4220 svectors |= F_CORRELATOR;
4221 rcode = smctr_set_corr(dev, rsv, correlator);
4222 break;
4223
4224 default:
4225 rcode = E_SUB_VECTOR_UNKNOWN;
4226 break;
4227 }
4228
4229 /* Let Sender Know if SUM of SV length's is
4230 * larger then length in MVID length field
4231 */
4232 if((vlen -= rsv->svl) < 0)
4233 rcode = E_VECTOR_LENGTH_ERROR;
4234
4235 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4236 }
4237
4238 if(rcode == POSITIVE_ACK)
4239 {
4240 /* Let Sender Know if MVID length field
4241 * is larger then SUM of SV length's
4242 */
4243 if(vlen != 0)
4244 rcode = E_VECTOR_LENGTH_ERROR;
4245 else
4246 {
4247 /* Let Sender Know if Expected SVID Missing */
4248 if((svectors & R_RQ_ATTCH_STATE_ADDR)
4249 ^ R_RQ_ATTCH_STATE_ADDR)
4250 rcode = E_MISSING_SUB_VECTOR;
4251 }
4252 }
4253
4254 return rcode;
4255}
4256
4257static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf,
4258 __u16 *correlator)
4259{
4260 MAC_SUB_VECTOR *rsv;
4261 signed short vlen;
4262
4263 *correlator = 0;
4264
4265 /* Remove MVID Length from total length */
4266 vlen = (signed short)rmf->vl - 4;
4267
4268 /* Point to First SVID */
4269 rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
4270
4271 /* Search for CORRELATOR for RSP to UNKNOWN */
4272 while((vlen > 0) && (*correlator == 0))
4273 {
4274 switch(rsv->svi)
4275 {
4276 case CORRELATOR:
4277 smctr_set_corr(dev, rsv, correlator);
4278 break;
4279
4280 default:
4281 break;
4282 }
4283
4284 vlen -= rsv->svl;
4285 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4286 }
4287
4288 return E_UNRECOGNIZED_VECTOR_ID;
4289}
4290
4291/*
4292 * Reset the 825 NIC and exit w:
4293 * 1. The NIC reset cleared (non-reset state), halted and un-initialized.
4294 * 2. TINT masked.
4295 * 3. CBUSY masked.
4296 * 4. TINT clear.
4297 * 5. CBUSY clear.
4298 */
4299static int smctr_reset_adapter(struct net_device *dev)
4300{
4301 struct net_local *tp = netdev_priv(dev);
4302 int ioaddr = dev->base_addr;
4303
4304 /* Reseting the NIC will put it in a halted and un-initialized state. */ smctr_set_trc_reset(ioaddr);
4305 mdelay(200); /* ~2 ms */
4306
4307 smctr_clear_trc_reset(ioaddr);
4308 mdelay(200); /* ~2 ms */
4309
4310 /* Remove any latched interrupts that occurred prior to reseting the
4311 * adapter or possibily caused by line glitches due to the reset.
4312 */
4313 outb(tp->trc_mask | CSR_CLRTINT | CSR_CLRCBUSY, ioaddr + CSR);
4314
4315 return 0;
4316}
4317
4318static int smctr_restart_tx_chain(struct net_device *dev, short queue)
4319{
4320 struct net_local *tp = netdev_priv(dev);
4321 int err = 0;
4322
4323 if(smctr_debug > 10)
4324 printk(KERN_DEBUG "%s: smctr_restart_tx_chain\n", dev->name);
4325
4326 if(tp->num_tx_fcbs_used[queue] != 0 &&
4327 tp->tx_queue_status[queue] == NOT_TRANSMITING)
4328 {
4329 tp->tx_queue_status[queue] = TRANSMITING;
4330 err = smctr_issue_resume_tx_fcb_cmd(dev, queue);
4331 }
4332
4333 return err;
4334}
4335
4336static int smctr_ring_status_chg(struct net_device *dev)
4337{
4338 struct net_local *tp = netdev_priv(dev);
4339
4340 if(smctr_debug > 10)
4341 printk(KERN_DEBUG "%s: smctr_ring_status_chg\n", dev->name);
4342
4343 /* Check for ring_status_flag: whenever MONITOR_STATE_BIT
4344 * Bit is set, check value of monitor_state, only then we
4345 * enable and start transmit/receive timeout (if and only
4346 * if it is MS_ACTIVE_MONITOR_STATE or MS_STANDBY_MONITOR_STATE)
4347 */
4348 if(tp->ring_status_flags == MONITOR_STATE_CHANGED)
4349 {
4350 if((tp->monitor_state == MS_ACTIVE_MONITOR_STATE) ||
4351 (tp->monitor_state == MS_STANDBY_MONITOR_STATE))
4352 {
4353 tp->monitor_state_ready = 1;
4354 }
4355 else
4356 {
4357 /* if adapter is NOT in either active monitor
4358 * or standby monitor state => Disable
4359 * transmit/receive timeout.
4360 */
4361 tp->monitor_state_ready = 0;
4362
4363 /* Ring speed problem, switching to auto mode. */
4364 if(tp->monitor_state == MS_MONITOR_FSM_INACTIVE &&
4365 !tp->cleanup)
4366 {
4367 printk(KERN_INFO "%s: Incorrect ring speed switching.\n",
4368 dev->name);
4369 smctr_set_ring_speed(dev);
4370 }
4371 }
4372 }
4373
4374 if(!(tp->ring_status_flags & RING_STATUS_CHANGED))
4375 return 0;
4376
4377 switch(tp->ring_status)
4378 {
4379 case RING_RECOVERY:
4380 printk(KERN_INFO "%s: Ring Recovery\n", dev->name);
4381 break;
4382
4383 case SINGLE_STATION:
4384 printk(KERN_INFO "%s: Single Statinon\n", dev->name);
4385 break;
4386
4387 case COUNTER_OVERFLOW:
4388 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
4389 break;
4390
4391 case REMOVE_RECEIVED:
4392 printk(KERN_INFO "%s: Remove Received\n", dev->name);
4393 break;
4394
4395 case AUTO_REMOVAL_ERROR:
4396 printk(KERN_INFO "%s: Auto Remove Error\n", dev->name);
4397 break;
4398
4399 case LOBE_WIRE_FAULT:
4400 printk(KERN_INFO "%s: Lobe Wire Fault\n", dev->name);
4401 break;
4402
4403 case TRANSMIT_BEACON:
4404 printk(KERN_INFO "%s: Transmit Beacon\n", dev->name);
4405 break;
4406
4407 case SOFT_ERROR:
4408 printk(KERN_INFO "%s: Soft Error\n", dev->name);
4409 break;
4410
4411 case HARD_ERROR:
4412 printk(KERN_INFO "%s: Hard Error\n", dev->name);
4413 break;
4414
4415 case SIGNAL_LOSS:
4416 printk(KERN_INFO "%s: Signal Loss\n", dev->name);
4417 break;
4418
4419 default:
4420 printk(KERN_INFO "%s: Unknown ring status change\n",
4421 dev->name);
4422 break;
4423 }
4424
4425 return 0;
4426}
4427
4428static int smctr_rx_frame(struct net_device *dev)
4429{
4430 struct net_local *tp = netdev_priv(dev);
4431 __u16 queue, status, rx_size, err = 0;
4432 __u8 *pbuff;
4433
4434 if(smctr_debug > 10)
4435 printk(KERN_DEBUG "%s: smctr_rx_frame\n", dev->name);
4436
4437 queue = tp->receive_queue_number;
4438
4439 while((status = tp->rx_fcb_curr[queue]->frame_status) != SUCCESS)
4440 {
4441 err = HARDWARE_FAILED;
4442
4443 if(((status & 0x007f) == 0) ||
4444 ((tp->receive_mask & ACCEPT_ERR_PACKETS) != 0))
4445 {
4446 /* frame length less the CRC (4 bytes) + FS (1 byte) */
4447 rx_size = tp->rx_fcb_curr[queue]->frame_length - 5;
4448
4449 pbuff = smctr_get_rx_pointer(dev, queue);
4450
4451 smctr_set_page(dev, pbuff);
4452 smctr_disable_16bit(dev);
4453
4454 /* pbuff points to addr within one page */
4455 pbuff = (__u8 *)PAGE_POINTER(pbuff);
4456
4457 if(queue == NON_MAC_QUEUE)
4458 {
4459 struct sk_buff *skb;
4460
4461 skb = dev_alloc_skb(rx_size);
4462 if (skb) {
4463 skb_put(skb, rx_size);
4464
4465 skb_copy_to_linear_data(skb, pbuff, rx_size);
4466
4467 /* Update Counters */
4468 tp->MacStat.rx_packets++;
4469 tp->MacStat.rx_bytes += skb->len;
4470
4471 /* Kick the packet on up. */
4472 skb->protocol = tr_type_trans(skb, dev);
4473 netif_rx(skb);
4474 } else {
4475 }
4476 }
4477 else
4478 smctr_process_rx_packet((MAC_HEADER *)pbuff,
4479 rx_size, dev, status);
4480 }
4481
4482 smctr_enable_16bit(dev);
4483 smctr_set_page(dev, (__u8 *)tp->ram_access);
4484 smctr_update_rx_chain(dev, queue);
4485
4486 if(err != SUCCESS)
4487 break;
4488 }
4489
4490 return err;
4491}
4492
4493static int smctr_send_dat(struct net_device *dev)
4494{
4495 struct net_local *tp = netdev_priv(dev);
4496 unsigned int i, err;
4497 MAC_HEADER *tmf;
4498 FCBlock *fcb;
4499
4500 if(smctr_debug > 10)
4501 printk(KERN_DEBUG "%s: smctr_send_dat\n", dev->name);
4502
4503 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE,
4504 sizeof(MAC_HEADER))) == (FCBlock *)(-1L))
4505 {
4506 return OUT_OF_RESOURCES;
4507 }
4508
4509 /* Initialize DAT Data Fields. */
4510 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4511 tmf->ac = MSB(AC_FC_DAT);
4512 tmf->fc = LSB(AC_FC_DAT);
4513
4514 for(i = 0; i < 6; i++)
4515 {
4516 tmf->sa[i] = dev->dev_addr[i];
4517 tmf->da[i] = dev->dev_addr[i];
4518
4519 }
4520
4521 tmf->vc = DAT;
4522 tmf->dc_sc = DC_RS | SC_RS;
4523 tmf->vl = 4;
4524 tmf->vl = SWAP_BYTES(tmf->vl);
4525
4526 /* Start Transmit. */
4527 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
4528 return err;
4529
4530 /* Wait for Transmit to Complete */
4531 for(i = 0; i < 10000; i++)
4532 {
4533 if(fcb->frame_status & FCB_COMMAND_DONE)
4534 break;
4535 mdelay(1);
4536 }
4537
4538 /* Check if GOOD frame Tx'ed. */
4539 if(!(fcb->frame_status & FCB_COMMAND_DONE) ||
4540 fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
4541 {
4542 return INITIALIZE_FAILED;
4543 }
4544
4545 /* De-allocated Tx FCB and Frame Buffer
4546 * The FCB must be de-allocated manually if executing with
4547 * interrupts disabled, other wise the ISR (LM_Service_Events)
4548 * will de-allocate it when the interrupt occurs.
4549 */
4550 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
4551 smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
4552
4553 return 0;
4554}
4555
4556static void smctr_timeout(struct net_device *dev)
4557{
4558 /*
4559 * If we get here, some higher level has decided we are broken.
4560 * There should really be a "kick me" function call instead.
4561 *
4562 * Resetting the token ring adapter takes a long time so just
4563 * fake transmission time and go on trying. Our own timeout
4564 * routine is in sktr_timer_chk()
4565 */
4566 dev->trans_start = jiffies; /* prevent tx timeout */
4567 netif_wake_queue(dev);
4568}
4569
4570/*
4571 * Gets skb from system, queues it and checks if it can be sent
4572 */
4573static netdev_tx_t smctr_send_packet(struct sk_buff *skb,
4574 struct net_device *dev)
4575{
4576 struct net_local *tp = netdev_priv(dev);
4577
4578 if(smctr_debug > 10)
4579 printk(KERN_DEBUG "%s: smctr_send_packet\n", dev->name);
4580
4581 /*
4582 * Block a transmit overlap
4583 */
4584
4585 netif_stop_queue(dev);
4586
4587 if(tp->QueueSkb == 0)
4588 return NETDEV_TX_BUSY; /* Return with tbusy set: queue full */
4589
4590 tp->QueueSkb--;
4591 skb_queue_tail(&tp->SendSkbQueue, skb);
4592 smctr_hardware_send_packet(dev, tp);
4593 if(tp->QueueSkb > 0)
4594 netif_wake_queue(dev);
4595
4596 return NETDEV_TX_OK;
4597}
4598
4599static int smctr_send_lobe_media_test(struct net_device *dev)
4600{
4601 struct net_local *tp = netdev_priv(dev);
4602 MAC_SUB_VECTOR *tsv;
4603 MAC_HEADER *tmf;
4604 FCBlock *fcb;
4605 __u32 i;
4606 int err;
4607
4608 if(smctr_debug > 15)
4609 printk(KERN_DEBUG "%s: smctr_send_lobe_media_test\n", dev->name);
4610
4611 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(struct trh_hdr)
4612 + S_WRAP_DATA + S_WRAP_DATA)) == (FCBlock *)(-1L))
4613 {
4614 return OUT_OF_RESOURCES;
4615 }
4616
4617 /* Initialize DAT Data Fields. */
4618 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4619 tmf->ac = MSB(AC_FC_LOBE_MEDIA_TEST);
4620 tmf->fc = LSB(AC_FC_LOBE_MEDIA_TEST);
4621
4622 for(i = 0; i < 6; i++)
4623 {
4624 tmf->da[i] = 0;
4625 tmf->sa[i] = dev->dev_addr[i];
4626 }
4627
4628 tmf->vc = LOBE_MEDIA_TEST;
4629 tmf->dc_sc = DC_RS | SC_RS;
4630 tmf->vl = 4;
4631
4632 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4633 smctr_make_wrap_data(dev, tsv);
4634 tmf->vl += tsv->svl;
4635
4636 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4637 smctr_make_wrap_data(dev, tsv);
4638 tmf->vl += tsv->svl;
4639
4640 /* Start Transmit. */
4641 tmf->vl = SWAP_BYTES(tmf->vl);
4642 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
4643 return err;
4644
4645 /* Wait for Transmit to Complete. (10 ms). */
4646 for(i=0; i < 10000; i++)
4647 {
4648 if(fcb->frame_status & FCB_COMMAND_DONE)
4649 break;
4650 mdelay(1);
4651 }
4652
4653 /* Check if GOOD frame Tx'ed */
4654 if(!(fcb->frame_status & FCB_COMMAND_DONE) ||
4655 fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
4656 {
4657 return LOBE_MEDIA_TEST_FAILED;
4658 }
4659
4660 /* De-allocated Tx FCB and Frame Buffer
4661 * The FCB must be de-allocated manually if executing with
4662 * interrupts disabled, other wise the ISR (LM_Service_Events)
4663 * will de-allocate it when the interrupt occurs.
4664 */
4665 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
4666 smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
4667
4668 return 0;
4669}
4670
4671static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
4672 __u16 correlator)
4673{
4674 MAC_HEADER *tmf;
4675 MAC_SUB_VECTOR *tsv;
4676 FCBlock *fcb;
4677
4678 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4679 + S_CORRELATOR + S_PHYSICAL_DROP + S_UPSTREAM_NEIGHBOR_ADDRESS
4680 + S_ADDRESS_MODIFER + S_GROUP_ADDRESS + S_FUNCTIONAL_ADDRESS))
4681 == (FCBlock *)(-1L))
4682 {
4683 return 0;
4684 }
4685
4686 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4687 tmf->vc = RPT_ADDR;
4688 tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
4689 tmf->vl = 4;
4690
4691 smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_ADDR);
4692
4693 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4694 smctr_make_corr(dev, tsv, correlator);
4695
4696 tmf->vl += tsv->svl;
4697 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4698 smctr_make_phy_drop_num(dev, tsv);
4699
4700 tmf->vl += tsv->svl;
4701 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4702 smctr_make_upstream_neighbor_addr(dev, tsv);
4703
4704 tmf->vl += tsv->svl;
4705 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4706 smctr_make_addr_mod(dev, tsv);
4707
4708 tmf->vl += tsv->svl;
4709 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4710 smctr_make_group_addr(dev, tsv);
4711
4712 tmf->vl += tsv->svl;
4713 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4714 smctr_make_funct_addr(dev, tsv);
4715
4716 tmf->vl += tsv->svl;
4717
4718 /* Subtract out MVID and MVL which is
4719 * include in both vl and MAC_HEADER
4720 */
4721/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4722 fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4723*/
4724 tmf->vl = SWAP_BYTES(tmf->vl);
4725
4726 return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
4727}
4728
4729static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
4730 __u16 correlator)
4731{
4732 MAC_HEADER *tmf;
4733 MAC_SUB_VECTOR *tsv;
4734 FCBlock *fcb;
4735
4736 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4737 + S_CORRELATOR + S_PRODUCT_INSTANCE_ID + S_FUNCTIONAL_ADDRESS
4738 + S_AUTHORIZED_FUNCTION_CLASS + S_AUTHORIZED_ACCESS_PRIORITY))
4739 == (FCBlock *)(-1L))
4740 {
4741 return 0;
4742 }
4743
4744 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4745 tmf->vc = RPT_ATTCH;
4746 tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
4747 tmf->vl = 4;
4748
4749 smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_ATTCH);
4750
4751 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4752 smctr_make_corr(dev, tsv, correlator);
4753
4754 tmf->vl += tsv->svl;
4755 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4756 smctr_make_product_id(dev, tsv);
4757
4758 tmf->vl += tsv->svl;
4759 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4760 smctr_make_funct_addr(dev, tsv);
4761
4762 tmf->vl += tsv->svl;
4763 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4764 smctr_make_auth_funct_class(dev, tsv);
4765
4766 tmf->vl += tsv->svl;
4767 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4768 smctr_make_access_pri(dev, tsv);
4769
4770 tmf->vl += tsv->svl;
4771
4772 /* Subtract out MVID and MVL which is
4773 * include in both vl and MAC_HEADER
4774 */
4775/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4776 fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4777*/
4778 tmf->vl = SWAP_BYTES(tmf->vl);
4779
4780 return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
4781}
4782
4783static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
4784 __u16 correlator)
4785{
4786 MAC_HEADER *tmf;
4787 MAC_SUB_VECTOR *tsv;
4788 FCBlock *fcb;
4789
4790 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4791 + S_CORRELATOR + S_RING_STATION_VERSION_NUMBER
4792 + S_RING_STATION_STATUS + S_STATION_IDENTIFER))
4793 == (FCBlock *)(-1L))
4794 {
4795 return 0;
4796 }
4797
4798 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4799 tmf->vc = RPT_STATE;
4800 tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
4801 tmf->vl = 4;
4802
4803 smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_STATE);
4804
4805 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4806 smctr_make_corr(dev, tsv, correlator);
4807
4808 tmf->vl += tsv->svl;
4809 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4810 smctr_make_ring_station_version(dev, tsv);
4811
4812 tmf->vl += tsv->svl;
4813 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4814 smctr_make_ring_station_status(dev, tsv);
4815
4816 tmf->vl += tsv->svl;
4817 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4818 smctr_make_station_id(dev, tsv);
4819
4820 tmf->vl += tsv->svl;
4821
4822 /* Subtract out MVID and MVL which is
4823 * include in both vl and MAC_HEADER
4824 */
4825/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4826 fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4827*/
4828 tmf->vl = SWAP_BYTES(tmf->vl);
4829
4830 return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
4831}
4832
4833static int smctr_send_rpt_tx_forward(struct net_device *dev,
4834 MAC_HEADER *rmf, __u16 tx_fstatus)
4835{
4836 MAC_HEADER *tmf;
4837 MAC_SUB_VECTOR *tsv;
4838 FCBlock *fcb;
4839
4840 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4841 + S_TRANSMIT_STATUS_CODE)) == (FCBlock *)(-1L))
4842 {
4843 return 0;
4844 }
4845
4846 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4847 tmf->vc = RPT_TX_FORWARD;
4848 tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
4849 tmf->vl = 4;
4850
4851 smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_TX_FORWARD);
4852
4853 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4854 smctr_make_tx_status_code(dev, tsv, tx_fstatus);
4855
4856 tmf->vl += tsv->svl;
4857
4858 /* Subtract out MVID and MVL which is
4859 * include in both vl and MAC_HEADER
4860 */
4861/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4862 fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4863*/
4864 tmf->vl = SWAP_BYTES(tmf->vl);
4865
4866 return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
4867}
4868
4869static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
4870 __u16 rcode, __u16 correlator)
4871{
4872 MAC_HEADER *tmf;
4873 MAC_SUB_VECTOR *tsv;
4874 FCBlock *fcb;
4875
4876 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4877 + S_CORRELATOR + S_RESPONSE_CODE)) == (FCBlock *)(-1L))
4878 {
4879 return 0;
4880 }
4881
4882 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4883 tmf->vc = RSP;
4884 tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
4885 tmf->vl = 4;
4886
4887 smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RSP);
4888
4889 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4890 smctr_make_corr(dev, tsv, correlator);
4891
4892 return 0;
4893}
4894
4895static int smctr_send_rq_init(struct net_device *dev)
4896{
4897 struct net_local *tp = netdev_priv(dev);
4898 MAC_HEADER *tmf;
4899 MAC_SUB_VECTOR *tsv;
4900 FCBlock *fcb;
4901 unsigned int i, count = 0;
4902 __u16 fstatus;
4903 int err;
4904
4905 do {
4906 if(((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4907 + S_PRODUCT_INSTANCE_ID + S_UPSTREAM_NEIGHBOR_ADDRESS
4908 + S_RING_STATION_VERSION_NUMBER + S_ADDRESS_MODIFER))
4909 == (FCBlock *)(-1L)))
4910 {
4911 return 0;
4912 }
4913
4914 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4915 tmf->vc = RQ_INIT;
4916 tmf->dc_sc = DC_RPS | SC_RS;
4917 tmf->vl = 4;
4918
4919 smctr_make_8025_hdr(dev, NULL, tmf, AC_FC_RQ_INIT);
4920
4921 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4922 smctr_make_product_id(dev, tsv);
4923
4924 tmf->vl += tsv->svl;
4925 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4926 smctr_make_upstream_neighbor_addr(dev, tsv);
4927
4928 tmf->vl += tsv->svl;
4929 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4930 smctr_make_ring_station_version(dev, tsv);
4931
4932 tmf->vl += tsv->svl;
4933 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4934 smctr_make_addr_mod(dev, tsv);
4935
4936 tmf->vl += tsv->svl;
4937
4938 /* Subtract out MVID and MVL which is
4939 * include in both vl and MAC_HEADER
4940 */
4941/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4942 fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4943*/
4944 tmf->vl = SWAP_BYTES(tmf->vl);
4945
4946 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
4947 return err;
4948
4949 /* Wait for Transmit to Complete */
4950 for(i = 0; i < 10000; i++)
4951 {
4952 if(fcb->frame_status & FCB_COMMAND_DONE)
4953 break;
4954 mdelay(1);
4955 }
4956
4957 /* Check if GOOD frame Tx'ed */
4958 fstatus = fcb->frame_status;
4959
4960 if(!(fstatus & FCB_COMMAND_DONE))
4961 return HARDWARE_FAILED;
4962
4963 if(!(fstatus & FCB_TX_STATUS_E))
4964 count++;
4965
4966 /* De-allocated Tx FCB and Frame Buffer
4967 * The FCB must be de-allocated manually if executing with
4968 * interrupts disabled, other wise the ISR (LM_Service_Events)
4969 * will de-allocate it when the interrupt occurs.
4970 */
4971 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
4972 smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
4973 } while(count < 4 && ((fstatus & FCB_TX_AC_BITS) ^ FCB_TX_AC_BITS));
4974
4975 return smctr_join_complete_state(dev);
4976}
4977
4978static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
4979 __u16 *tx_fstatus)
4980{
4981 struct net_local *tp = netdev_priv(dev);
4982 FCBlock *fcb;
4983 unsigned int i;
4984 int err;
4985
4986 /* Check if this is the END POINT of the Transmit Forward Chain. */
4987 if(rmf->vl <= 18)
4988 return 0;
4989
4990 /* Allocate Transmit FCB only by requesting 0 bytes
4991 * of data buffer.
4992 */
4993 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, 0)) == (FCBlock *)(-1L))
4994 return 0;
4995
4996 /* Set pointer to Transmit Frame Buffer to the data
4997 * portion of the received TX Forward frame, making
4998 * sure to skip over the Vector Code (vc) and Vector
4999 * length (vl).
5000 */
5001 fcb->bdb_ptr->trc_data_block_ptr = TRC_POINTER((__u32)rmf
5002 + sizeof(MAC_HEADER) + 2);
5003 fcb->bdb_ptr->data_block_ptr = (__u16 *)((__u32)rmf
5004 + sizeof(MAC_HEADER) + 2);
5005
5006 fcb->frame_length = rmf->vl - 4 - 2;
5007 fcb->bdb_ptr->buffer_length = rmf->vl - 4 - 2;
5008
5009 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
5010 return err;
5011
5012 /* Wait for Transmit to Complete */
5013 for(i = 0; i < 10000; i++)
5014 {
5015 if(fcb->frame_status & FCB_COMMAND_DONE)
5016 break;
5017 mdelay(1);
5018 }
5019
5020 /* Check if GOOD frame Tx'ed */
5021 if(!(fcb->frame_status & FCB_COMMAND_DONE))
5022 {
5023 if((err = smctr_issue_resume_tx_fcb_cmd(dev, MAC_QUEUE)))
5024 return err;
5025
5026 for(i = 0; i < 10000; i++)
5027 {
5028 if(fcb->frame_status & FCB_COMMAND_DONE)
5029 break;
5030 mdelay(1);
5031 }
5032
5033 if(!(fcb->frame_status & FCB_COMMAND_DONE))
5034 return HARDWARE_FAILED;
5035 }
5036
5037 *tx_fstatus = fcb->frame_status;
5038
5039 return A_FRAME_WAS_FORWARDED;
5040}
5041
5042static int smctr_set_auth_access_pri(struct net_device *dev,
5043 MAC_SUB_VECTOR *rsv)
5044{
5045 struct net_local *tp = netdev_priv(dev);
5046
5047 if(rsv->svl != S_AUTHORIZED_ACCESS_PRIORITY)
5048 return E_SUB_VECTOR_LENGTH_ERROR;
5049
5050 tp->authorized_access_priority = (rsv->svv[0] << 8 | rsv->svv[1]);
5051
5052 return POSITIVE_ACK;
5053}
5054
5055static int smctr_set_auth_funct_class(struct net_device *dev,
5056 MAC_SUB_VECTOR *rsv)
5057{
5058 struct net_local *tp = netdev_priv(dev);
5059
5060 if(rsv->svl != S_AUTHORIZED_FUNCTION_CLASS)
5061 return E_SUB_VECTOR_LENGTH_ERROR;
5062
5063 tp->authorized_function_classes = (rsv->svv[0] << 8 | rsv->svv[1]);
5064
5065 return POSITIVE_ACK;
5066}
5067
5068static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv,
5069 __u16 *correlator)
5070{
5071 if(rsv->svl != S_CORRELATOR)
5072 return E_SUB_VECTOR_LENGTH_ERROR;
5073
5074 *correlator = (rsv->svv[0] << 8 | rsv->svv[1]);
5075
5076 return POSITIVE_ACK;
5077}
5078
5079static int smctr_set_error_timer_value(struct net_device *dev,
5080 MAC_SUB_VECTOR *rsv)
5081{
5082 __u16 err_tval;
5083 int err;
5084
5085 if(rsv->svl != S_ERROR_TIMER_VALUE)
5086 return E_SUB_VECTOR_LENGTH_ERROR;
5087
5088 err_tval = (rsv->svv[0] << 8 | rsv->svv[1])*10;
5089
5090 smctr_issue_write_word_cmd(dev, RW_TER_THRESHOLD, &err_tval);
5091
5092 if((err = smctr_wait_cmd(dev)))
5093 return err;
5094
5095 return POSITIVE_ACK;
5096}
5097
5098static int smctr_set_frame_forward(struct net_device *dev,
5099 MAC_SUB_VECTOR *rsv, __u8 dc_sc)
5100{
5101 if((rsv->svl < 2) || (rsv->svl > S_FRAME_FORWARD))
5102 return E_SUB_VECTOR_LENGTH_ERROR;
5103
5104 if((dc_sc & DC_MASK) != DC_CRS)
5105 {
5106 if(rsv->svl >= 2 && rsv->svl < 20)
5107 return E_TRANSMIT_FORWARD_INVALID;
5108
5109 if((rsv->svv[0] != 0) || (rsv->svv[1] != 0))
5110 return E_TRANSMIT_FORWARD_INVALID;
5111 }
5112
5113 return POSITIVE_ACK;
5114}
5115
5116static int smctr_set_local_ring_num(struct net_device *dev,
5117 MAC_SUB_VECTOR *rsv)
5118{
5119 struct net_local *tp = netdev_priv(dev);
5120
5121 if(rsv->svl != S_LOCAL_RING_NUMBER)
5122 return E_SUB_VECTOR_LENGTH_ERROR;
5123
5124 if(tp->ptr_local_ring_num)
5125 *(__u16 *)(tp->ptr_local_ring_num)
5126 = (rsv->svv[0] << 8 | rsv->svv[1]);
5127
5128 return POSITIVE_ACK;
5129}
5130
5131static unsigned short smctr_set_ctrl_attention(struct net_device *dev)
5132{
5133 struct net_local *tp = netdev_priv(dev);
5134 int ioaddr = dev->base_addr;
5135
5136 if(tp->bic_type == BIC_585_CHIP)
5137 outb((tp->trc_mask | HWR_CA), ioaddr + HWR);
5138 else
5139 {
5140 outb((tp->trc_mask | CSR_CA), ioaddr + CSR);
5141 outb(tp->trc_mask, ioaddr + CSR);
5142 }
5143
5144 return 0;
5145}
5146
5147static void smctr_set_multicast_list(struct net_device *dev)
5148{
5149 if(smctr_debug > 10)
5150 printk(KERN_DEBUG "%s: smctr_set_multicast_list\n", dev->name);
5151}
5152
5153static int smctr_set_page(struct net_device *dev, __u8 *buf)
5154{
5155 struct net_local *tp = netdev_priv(dev);
5156 __u8 amask;
5157 __u32 tptr;
5158
5159 tptr = (__u32)buf - (__u32)tp->ram_access;
5160 amask = (__u8)((tptr & PR_PAGE_MASK) >> 8);
5161 outb(amask, dev->base_addr + PR);
5162
5163 return 0;
5164}
5165
5166static int smctr_set_phy_drop(struct net_device *dev, MAC_SUB_VECTOR *rsv)
5167{
5168 int err;
5169
5170 if(rsv->svl != S_PHYSICAL_DROP)
5171 return E_SUB_VECTOR_LENGTH_ERROR;
5172
5173 smctr_issue_write_byte_cmd(dev, RW_PHYSICAL_DROP_NUMBER, &rsv->svv[0]);
5174 if((err = smctr_wait_cmd(dev)))
5175 return err;
5176
5177 return POSITIVE_ACK;
5178}
5179
5180/* Reset the ring speed to the opposite of what it was. This auto-pilot
5181 * mode requires a complete reset and re-init of the adapter.
5182 */
5183static int smctr_set_ring_speed(struct net_device *dev)
5184{
5185 struct net_local *tp = netdev_priv(dev);
5186 int err;
5187
5188 if(tp->media_type == MEDIA_UTP_16)
5189 tp->media_type = MEDIA_UTP_4;
5190 else
5191 tp->media_type = MEDIA_UTP_16;
5192
5193 smctr_enable_16bit(dev);
5194
5195 /* Re-Initialize adapter's internal registers */
5196 smctr_reset_adapter(dev);
5197
5198 if((err = smctr_init_card_real(dev)))
5199 return err;
5200
5201 smctr_enable_bic_int(dev);
5202
5203 if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK)))
5204 return err;
5205
5206 smctr_disable_16bit(dev);
5207
5208 return 0;
5209}
5210
5211static int smctr_set_rx_look_ahead(struct net_device *dev)
5212{
5213 struct net_local *tp = netdev_priv(dev);
5214 __u16 sword, rword;
5215
5216 if(smctr_debug > 10)
5217 printk(KERN_DEBUG "%s: smctr_set_rx_look_ahead_flag\n", dev->name);
5218
5219 tp->adapter_flags &= ~(FORCED_16BIT_MODE);
5220 tp->adapter_flags |= RX_VALID_LOOKAHEAD;
5221
5222 if(tp->adapter_bus == BUS_ISA16_TYPE)
5223 {
5224 sword = *((__u16 *)(tp->ram_access));
5225 *((__u16 *)(tp->ram_access)) = 0x1234;
5226
5227 smctr_disable_16bit(dev);
5228 rword = *((__u16 *)(tp->ram_access));
5229 smctr_enable_16bit(dev);
5230
5231 if(rword != 0x1234)
5232 tp->adapter_flags |= FORCED_16BIT_MODE;
5233
5234 *((__u16 *)(tp->ram_access)) = sword;
5235 }
5236
5237 return 0;
5238}
5239
5240static int smctr_set_trc_reset(int ioaddr)
5241{
5242 __u8 r;
5243
5244 r = inb(ioaddr + MSR);
5245 outb(MSR_RST | r, ioaddr + MSR);
5246
5247 return 0;
5248}
5249
5250/*
5251 * This function can be called if the adapter is busy or not.
5252 */
5253static int smctr_setup_single_cmd(struct net_device *dev,
5254 __u16 command, __u16 subcommand)
5255{
5256 struct net_local *tp = netdev_priv(dev);
5257 unsigned int err;
5258
5259 if(smctr_debug > 10)
5260 printk(KERN_DEBUG "%s: smctr_setup_single_cmd\n", dev->name);
5261
5262 if((err = smctr_wait_while_cbusy(dev)))
5263 return err;
5264
5265 if((err = (unsigned int)smctr_wait_cmd(dev)))
5266 return err;
5267
5268 tp->acb_head->cmd_done_status = 0;
5269 tp->acb_head->cmd = command;
5270 tp->acb_head->subcmd = subcommand;
5271
5272 err = smctr_issue_resume_acb_cmd(dev);
5273
5274 return err;
5275}
5276
5277/*
5278 * This function can not be called with the adapter busy.
5279 */
5280static int smctr_setup_single_cmd_w_data(struct net_device *dev,
5281 __u16 command, __u16 subcommand)
5282{
5283 struct net_local *tp = netdev_priv(dev);
5284
5285 tp->acb_head->cmd_done_status = ACB_COMMAND_NOT_DONE;
5286 tp->acb_head->cmd = command;
5287 tp->acb_head->subcmd = subcommand;
5288 tp->acb_head->data_offset_lo
5289 = (__u16)TRC_POINTER(tp->misc_command_data);
5290
5291 return smctr_issue_resume_acb_cmd(dev);
5292}
5293
5294static char *smctr_malloc(struct net_device *dev, __u16 size)
5295{
5296 struct net_local *tp = netdev_priv(dev);
5297 char *m;
5298
5299 m = (char *)(tp->ram_access + tp->sh_mem_used);
5300 tp->sh_mem_used += (__u32)size;
5301
5302 return m;
5303}
5304
5305static int smctr_status_chg(struct net_device *dev)
5306{
5307 struct net_local *tp = netdev_priv(dev);
5308
5309 if(smctr_debug > 10)
5310 printk(KERN_DEBUG "%s: smctr_status_chg\n", dev->name);
5311
5312 switch(tp->status)
5313 {
5314 case OPEN:
5315 break;
5316
5317 case CLOSED:
5318 break;
5319
5320 /* Interrupt driven open() completion. XXX */
5321 case INITIALIZED:
5322 tp->group_address_0 = 0;
5323 tp->group_address[0] = 0;
5324 tp->group_address[1] = 0;
5325 tp->functional_address_0 = 0;
5326 tp->functional_address[0] = 0;
5327 tp->functional_address[1] = 0;
5328 smctr_open_tr(dev);
5329 break;
5330
5331 default:
5332 printk(KERN_INFO "%s: status change unknown %x\n",
5333 dev->name, tp->status);
5334 break;
5335 }
5336
5337 return 0;
5338}
5339
5340static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb,
5341 __u16 queue)
5342{
5343 struct net_local *tp = netdev_priv(dev);
5344 int err = 0;
5345
5346 if(smctr_debug > 10)
5347 printk(KERN_DEBUG "%s: smctr_trc_send_packet\n", dev->name);
5348
5349 fcb->info = FCB_CHAIN_END | FCB_ENABLE_TFS;
5350 if(tp->num_tx_fcbs[queue] != 1)
5351 fcb->back_ptr->info = FCB_INTERRUPT_ENABLE | FCB_ENABLE_TFS;
5352
5353 if(tp->tx_queue_status[queue] == NOT_TRANSMITING)
5354 {
5355 tp->tx_queue_status[queue] = TRANSMITING;
5356 err = smctr_issue_resume_tx_fcb_cmd(dev, queue);
5357 }
5358
5359 return err;
5360}
5361
5362static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue)
5363{
5364 struct net_local *tp = netdev_priv(dev);
5365 __u16 status, err = 0;
5366 int cstatus;
5367
5368 if(smctr_debug > 10)
5369 printk(KERN_DEBUG "%s: smctr_tx_complete\n", dev->name);
5370
5371 while((status = tp->tx_fcb_end[queue]->frame_status) != SUCCESS)
5372 {
5373 if(status & 0x7e00 )
5374 {
5375 err = HARDWARE_FAILED;
5376 break;
5377 }
5378
5379 if((err = smctr_update_tx_chain(dev, tp->tx_fcb_end[queue],
5380 queue)) != SUCCESS)
5381 break;
5382
5383 smctr_disable_16bit(dev);
5384
5385 if(tp->mode_bits & UMAC)
5386 {
5387 if(!(status & (FCB_TX_STATUS_AR1 | FCB_TX_STATUS_AR2)))
5388 cstatus = NO_SUCH_DESTINATION;
5389 else
5390 {
5391 if(!(status & (FCB_TX_STATUS_CR1 | FCB_TX_STATUS_CR2)))
5392 cstatus = DEST_OUT_OF_RESOURCES;
5393 else
5394 {
5395 if(status & FCB_TX_STATUS_E)
5396 cstatus = MAX_COLLISIONS;
5397 else
5398 cstatus = SUCCESS;
5399 }
5400 }
5401 }
5402 else
5403 cstatus = SUCCESS;
5404
5405 if(queue == BUG_QUEUE)
5406 err = SUCCESS;
5407
5408 smctr_enable_16bit(dev);
5409 if(err != SUCCESS)
5410 break;
5411 }
5412
5413 return err;
5414}
5415
5416static unsigned short smctr_tx_move_frame(struct net_device *dev,
5417 struct sk_buff *skb, __u8 *pbuff, unsigned int bytes)
5418{
5419 struct net_local *tp = netdev_priv(dev);
5420 unsigned int ram_usable;
5421 __u32 flen, len, offset = 0;
5422 __u8 *frag, *page;
5423
5424 if(smctr_debug > 10)
5425 printk(KERN_DEBUG "%s: smctr_tx_move_frame\n", dev->name);
5426
5427 ram_usable = ((unsigned int)tp->ram_usable) << 10;
5428 frag = skb->data;
5429 flen = skb->len;
5430
5431 while(flen > 0 && bytes > 0)
5432 {
5433 smctr_set_page(dev, pbuff);
5434
5435 offset = SMC_PAGE_OFFSET(pbuff);
5436
5437 if(offset + flen > ram_usable)
5438 len = ram_usable - offset;
5439 else
5440 len = flen;
5441
5442 if(len > bytes)
5443 len = bytes;
5444
5445 page = (char *) (offset + tp->ram_access);
5446 memcpy(page, frag, len);
5447
5448 flen -=len;
5449 bytes -= len;
5450 frag += len;
5451 pbuff += len;
5452 }
5453
5454 return 0;
5455}
5456
5457/* Update the error statistic counters for this adapter. */
5458static int smctr_update_err_stats(struct net_device *dev)
5459{
5460 struct net_local *tp = netdev_priv(dev);
5461 struct tr_statistics *tstat = &tp->MacStat;
5462
5463 if(tstat->internal_errors)
5464 tstat->internal_errors
5465 += *(tp->misc_command_data + 0) & 0x00ff;
5466
5467 if(tstat->line_errors)
5468 tstat->line_errors += *(tp->misc_command_data + 0) >> 8;
5469
5470 if(tstat->A_C_errors)
5471 tstat->A_C_errors += *(tp->misc_command_data + 1) & 0x00ff;
5472
5473 if(tstat->burst_errors)
5474 tstat->burst_errors += *(tp->misc_command_data + 1) >> 8;
5475
5476 if(tstat->abort_delimiters)
5477 tstat->abort_delimiters += *(tp->misc_command_data + 2) >> 8;
5478
5479 if(tstat->recv_congest_count)
5480 tstat->recv_congest_count
5481 += *(tp->misc_command_data + 3) & 0x00ff;
5482
5483 if(tstat->lost_frames)
5484 tstat->lost_frames
5485 += *(tp->misc_command_data + 3) >> 8;
5486
5487 if(tstat->frequency_errors)
5488 tstat->frequency_errors += *(tp->misc_command_data + 4) & 0x00ff;
5489
5490 if(tstat->frame_copied_errors)
5491 tstat->frame_copied_errors
5492 += *(tp->misc_command_data + 4) >> 8;
5493
5494 if(tstat->token_errors)
5495 tstat->token_errors += *(tp->misc_command_data + 5) >> 8;
5496
5497 return 0;
5498}
5499
5500static int smctr_update_rx_chain(struct net_device *dev, __u16 queue)
5501{
5502 struct net_local *tp = netdev_priv(dev);
5503 FCBlock *fcb;
5504 BDBlock *bdb;
5505 __u16 size, len;
5506
5507 fcb = tp->rx_fcb_curr[queue];
5508 len = fcb->frame_length;
5509
5510 fcb->frame_status = 0;
5511 fcb->info = FCB_CHAIN_END;
5512 fcb->back_ptr->info = FCB_WARNING;
5513
5514 tp->rx_fcb_curr[queue] = tp->rx_fcb_curr[queue]->next_ptr;
5515
5516 /* update RX BDBs */
5517 size = (len >> RX_BDB_SIZE_SHIFT);
5518 if(len & RX_DATA_BUFFER_SIZE_MASK)
5519 size += sizeof(BDBlock);
5520 size &= (~RX_BDB_SIZE_MASK);
5521
5522 /* check if wrap around */
5523 bdb = (BDBlock *)((__u32)(tp->rx_bdb_curr[queue]) + (__u32)(size));
5524 if((__u32)bdb >= (__u32)tp->rx_bdb_end[queue])
5525 {
5526 bdb = (BDBlock *)((__u32)(tp->rx_bdb_head[queue])
5527 + (__u32)(bdb) - (__u32)(tp->rx_bdb_end[queue]));
5528 }
5529
5530 bdb->back_ptr->info = BDB_CHAIN_END;
5531 tp->rx_bdb_curr[queue]->back_ptr->info = BDB_NOT_CHAIN_END;
5532 tp->rx_bdb_curr[queue] = bdb;
5533
5534 return 0;
5535}
5536
5537static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
5538 __u16 queue)
5539{
5540 struct net_local *tp = netdev_priv(dev);
5541
5542 if(smctr_debug > 20)
5543 printk(KERN_DEBUG "smctr_update_tx_chain\n");
5544
5545 if(tp->num_tx_fcbs_used[queue] <= 0)
5546 return HARDWARE_FAILED;
5547 else
5548 {
5549 if(tp->tx_buff_used[queue] < fcb->memory_alloc)
5550 {
5551 tp->tx_buff_used[queue] = 0;
5552 return HARDWARE_FAILED;
5553 }
5554
5555 tp->tx_buff_used[queue] -= fcb->memory_alloc;
5556
5557 /* if all transmit buffer are cleared
5558 * need to set the tx_buff_curr[] to tx_buff_head[]
5559 * otherwise, tx buffer will be segregate and cannot
5560 * accommodate and buffer greater than (curr - head) and
5561 * (end - curr) since we do not allow wrap around allocation.
5562 */
5563 if(tp->tx_buff_used[queue] == 0)
5564 tp->tx_buff_curr[queue] = tp->tx_buff_head[queue];
5565
5566 tp->num_tx_fcbs_used[queue]--;
5567 fcb->frame_status = 0;
5568 tp->tx_fcb_end[queue] = fcb->next_ptr;
5569 netif_wake_queue(dev);
5570 return 0;
5571 }
5572}
5573
5574static int smctr_wait_cmd(struct net_device *dev)
5575{
5576 struct net_local *tp = netdev_priv(dev);
5577 unsigned int loop_count = 0x20000;
5578
5579 if(smctr_debug > 10)
5580 printk(KERN_DEBUG "%s: smctr_wait_cmd\n", dev->name);
5581
5582 while(loop_count)
5583 {
5584 if(tp->acb_head->cmd_done_status & ACB_COMMAND_DONE)
5585 break;
5586 udelay(1);
5587 loop_count--;
5588 }
5589
5590 if(loop_count == 0)
5591 return HARDWARE_FAILED;
5592
5593 if(tp->acb_head->cmd_done_status & 0xff)
5594 return HARDWARE_FAILED;
5595
5596 return 0;
5597}
5598
5599static int smctr_wait_while_cbusy(struct net_device *dev)
5600{
5601 struct net_local *tp = netdev_priv(dev);
5602 unsigned int timeout = 0x20000;
5603 int ioaddr = dev->base_addr;
5604 __u8 r;
5605
5606 if(tp->bic_type == BIC_585_CHIP)
5607 {
5608 while(timeout)
5609 {
5610 r = inb(ioaddr + HWR);
5611 if((r & HWR_CBUSY) == 0)
5612 break;
5613 timeout--;
5614 }
5615 }
5616 else
5617 {
5618 while(timeout)
5619 {
5620 r = inb(ioaddr + CSR);
5621 if((r & CSR_CBUSY) == 0)
5622 break;
5623 timeout--;
5624 }
5625 }
5626
5627 if(timeout)
5628 return 0;
5629 else
5630 return HARDWARE_FAILED;
5631}
5632
5633#ifdef MODULE
5634
5635static struct net_device* dev_smctr[SMCTR_MAX_ADAPTERS];
5636static int io[SMCTR_MAX_ADAPTERS];
5637static int irq[SMCTR_MAX_ADAPTERS];
5638
5639MODULE_LICENSE("GPL");
5640MODULE_FIRMWARE("tr_smctr.bin");
5641
5642module_param_array(io, int, NULL, 0);
5643module_param_array(irq, int, NULL, 0);
5644module_param(ringspeed, int, 0);
5645
5646static struct net_device * __init setup_card(int n)
5647{
5648 struct net_device *dev = alloc_trdev(sizeof(struct net_local));
5649 int err;
5650
5651 if (!dev)
5652 return ERR_PTR(-ENOMEM);
5653
5654 dev->irq = irq[n];
5655 err = smctr_probe1(dev, io[n]);
5656 if (err)
5657 goto out;
5658
5659 err = register_netdev(dev);
5660 if (err)
5661 goto out1;
5662 return dev;
5663 out1:
5664#ifdef CONFIG_MCA_LEGACY
5665 { struct net_local *tp = netdev_priv(dev);
5666 if (tp->slot_num)
5667 mca_mark_as_unused(tp->slot_num);
5668 }
5669#endif
5670 release_region(dev->base_addr, SMCTR_IO_EXTENT);
5671 free_irq(dev->irq, dev);
5672out:
5673 free_netdev(dev);
5674 return ERR_PTR(err);
5675}
5676
5677int __init init_module(void)
5678{
5679 int i, found = 0;
5680 struct net_device *dev;
5681
5682 for(i = 0; i < SMCTR_MAX_ADAPTERS; i++) {
5683 dev = io[0]? setup_card(i) : smctr_probe(-1);
5684 if (!IS_ERR(dev)) {
5685 ++found;
5686 dev_smctr[i] = dev;
5687 }
5688 }
5689
5690 return found ? 0 : -ENODEV;
5691}
5692
5693void __exit cleanup_module(void)
5694{
5695 int i;
5696
5697 for(i = 0; i < SMCTR_MAX_ADAPTERS; i++) {
5698 struct net_device *dev = dev_smctr[i];
5699
5700 if (dev) {
5701
5702 unregister_netdev(dev);
5703#ifdef CONFIG_MCA_LEGACY
5704 { struct net_local *tp = netdev_priv(dev);
5705 if (tp->slot_num)
5706 mca_mark_as_unused(tp->slot_num);
5707 }
5708#endif
5709 release_region(dev->base_addr, SMCTR_IO_EXTENT);
5710 if (dev->irq)
5711 free_irq(dev->irq, dev);
5712
5713 free_netdev(dev);
5714 }
5715 }
5716}
5717#endif /* MODULE */
diff --git a/drivers/net/tokenring/smctr.h b/drivers/net/tokenring/smctr.h
deleted file mode 100644
index 6e5700ab4fc3..000000000000
--- a/drivers/net/tokenring/smctr.h
+++ /dev/null
@@ -1,1585 +0,0 @@
1/* smctr.h: SMC Token Ring driver header for Linux
2 *
3 * Authors:
4 * - Jay Schulist <jschlst@samba.org>
5 */
6
7#ifndef __LINUX_SMCTR_H
8#define __LINUX_SMCTR_H
9
10#ifdef __KERNEL__
11
12#define MAX_TX_QUEUE 10
13
14#define SMC_HEADER_SIZE 14
15
16#define SMC_PAGE_OFFSET(X) (((unsigned long)(X) - tp->ram_access) & tp->page_offset_mask)
17
18#define INIT 0x0D
19#define RQ_ATTCH 0x10
20#define RQ_STATE 0x0F
21#define RQ_ADDR 0x0E
22#define CHG_PARM 0x0C
23#define RSP 0x00
24#define TX_FORWARD 0x09
25
26#define AC_FC_DAT ((3<<13) | 1)
27#define DAT 0x07
28
29#define RPT_NEW_MON 0x25
30#define RPT_SUA_CHG 0x26
31#define RPT_ACTIVE_ERR 0x28
32#define RPT_NN_INCMP 0x27
33#define RPT_ERROR 0x29
34
35#define RQ_INIT 0x20
36#define RPT_ATTCH 0x24
37#define RPT_STATE 0x23
38#define RPT_ADDR 0x22
39
40#define POSITIVE_ACK 0x0001
41#define A_FRAME_WAS_FORWARDED 0x8888
42
43#define GROUP_ADDRESS 0x2B
44#define PHYSICAL_DROP 0x0B
45#define AUTHORIZED_ACCESS_PRIORITY 0x07
46#define AUTHORIZED_FUNCTION_CLASS 0x06
47#define FUNCTIONAL_ADDRESS 0x2C
48#define RING_STATION_STATUS 0x29
49#define TRANSMIT_STATUS_CODE 0x2A
50#define IBM_PASS_SOURCE_ADDR 0x01
51#define AC_FC_RPT_TX_FORWARD ((0<<13) | 0)
52#define AC_FC_RPT_STATE ((0<<13) | 0)
53#define AC_FC_RPT_ADDR ((0<<13) | 0)
54#define CORRELATOR 0x09
55
56#define POSITIVE_ACK 0x0001 /* */
57#define E_MAC_DATA_INCOMPLETE 0x8001 /* not used */
58#define E_VECTOR_LENGTH_ERROR 0x8002 /* */
59#define E_UNRECOGNIZED_VECTOR_ID 0x8003 /* */
60#define E_INAPPROPRIATE_SOURCE_CLASS 0x8004 /* */
61#define E_SUB_VECTOR_LENGTH_ERROR 0x8005 /* */
62#define E_TRANSMIT_FORWARD_INVALID 0x8006 /* def. by IBM */
63#define E_MISSING_SUB_VECTOR 0x8007 /* */
64#define E_SUB_VECTOR_UNKNOWN 0x8008 /* */
65#define E_MAC_HEADER_TOO_LONG 0x8009 /* */
66#define E_FUNCTION_DISABLED 0x800A /* not used */
67
68#define A_FRAME_WAS_FORWARDED 0x8888 /* used by send_TX_FORWARD */
69
70#define UPSTREAM_NEIGHBOR_ADDRESS 0x02
71#define LOCAL_RING_NUMBER 0x03
72#define ASSIGN_PHYSICAL_DROP 0x04
73#define ERROR_TIMER_VALUE 0x05
74#define AUTHORIZED_FUNCTION_CLASS 0x06
75#define AUTHORIZED_ACCESS_PRIORITY 0x07
76#define CORRELATOR 0x09
77#define PHYSICAL_DROP 0x0B
78#define RESPONSE_CODE 0x20
79#define ADDRESS_MODIFER 0x21
80#define PRODUCT_INSTANCE_ID 0x22
81#define RING_STATION_VERSION_NUMBER 0x23
82#define WRAP_DATA 0x26
83#define FRAME_FORWARD 0x27
84#define STATION_IDENTIFER 0x28
85#define RING_STATION_STATUS 0x29
86#define TRANSMIT_STATUS_CODE 0x2A
87#define GROUP_ADDRESS 0x2B
88#define FUNCTIONAL_ADDRESS 0x2C
89
90#define F_NO_SUB_VECTORS_FOUND 0x0000
91#define F_UPSTREAM_NEIGHBOR_ADDRESS 0x0001
92#define F_LOCAL_RING_NUMBER 0x0002
93#define F_ASSIGN_PHYSICAL_DROP 0x0004
94#define F_ERROR_TIMER_VALUE 0x0008
95#define F_AUTHORIZED_FUNCTION_CLASS 0x0010
96#define F_AUTHORIZED_ACCESS_PRIORITY 0x0020
97#define F_CORRELATOR 0x0040
98#define F_PHYSICAL_DROP 0x0080
99#define F_RESPONSE_CODE 0x0100
100#define F_PRODUCT_INSTANCE_ID 0x0200
101#define F_RING_STATION_VERSION_NUMBER 0x0400
102#define F_STATION_IDENTIFER 0x0800
103#define F_RING_STATION_STATUS 0x1000
104#define F_GROUP_ADDRESS 0x2000
105#define F_FUNCTIONAL_ADDRESS 0x4000
106#define F_FRAME_FORWARD 0x8000
107
108#define R_INIT 0x00
109#define R_RQ_ATTCH_STATE_ADDR 0x00
110#define R_CHG_PARM 0x00
111#define R_TX_FORWARD F_FRAME_FORWARD
112
113
114#define UPSTREAM_NEIGHBOR_ADDRESS 0x02
115#define ADDRESS_MODIFER 0x21
116#define RING_STATION_VERSION_NUMBER 0x23
117#define PRODUCT_INSTANCE_ID 0x22
118
119#define RPT_TX_FORWARD 0x2A
120
121#define AC_FC_INIT (3<<13) | 0 /* */
122#define AC_FC_RQ_INIT ((3<<13) | 0) /* */
123#define AC_FC_RQ_ATTCH (3<<13) | 0 /* DC = SC of rx frame */
124#define AC_FC_RQ_STATE (3<<13) | 0 /* DC = SC of rx frame */
125#define AC_FC_RQ_ADDR (3<<13) | 0 /* DC = SC of rx frame */
126#define AC_FC_CHG_PARM (3<<13) | 0 /* */
127#define AC_FC_RSP (0<<13) | 0 /* DC = SC of rx frame */
128#define AC_FC_RPT_ATTCH (0<<13) | 0
129
130#define S_UPSTREAM_NEIGHBOR_ADDRESS 6 + 2
131#define S_LOCAL_RING_NUMBER 2 + 2
132#define S_ASSIGN_PHYSICAL_DROP 4 + 2
133#define S_ERROR_TIMER_VALUE 2 + 2
134#define S_AUTHORIZED_FUNCTION_CLASS 2 + 2
135#define S_AUTHORIZED_ACCESS_PRIORITY 2 + 2
136#define S_CORRELATOR 2 + 2
137#define S_PHYSICAL_DROP 4 + 2
138#define S_RESPONSE_CODE 4 + 2
139#define S_ADDRESS_MODIFER 2 + 2
140#define S_PRODUCT_INSTANCE_ID 18 + 2
141#define S_RING_STATION_VERSION_NUMBER 10 + 2
142#define S_STATION_IDENTIFER 6 + 2
143#define S_RING_STATION_STATUS 6 + 2
144#define S_GROUP_ADDRESS 4 + 2
145#define S_FUNCTIONAL_ADDRESS 4 + 2
146#define S_FRAME_FORWARD 252 + 2
147#define S_TRANSMIT_STATUS_CODE 2 + 2
148
149#define ISB_IMC_RES0 0x0000 /* */
150#define ISB_IMC_MAC_TYPE_3 0x0001 /* MAC_ARC_INDICATE */
151#define ISB_IMC_MAC_ERROR_COUNTERS 0x0002 /* */
152#define ISB_IMC_RES1 0x0003 /* */
153#define ISB_IMC_MAC_TYPE_2 0x0004 /* QUE_MAC_INDICATE */
154#define ISB_IMC_TX_FRAME 0x0005 /* */
155#define ISB_IMC_END_OF_TX_QUEUE 0x0006 /* */
156#define ISB_IMC_NON_MAC_RX_RESOURCE 0x0007 /* */
157#define ISB_IMC_MAC_RX_RESOURCE 0x0008 /* */
158#define ISB_IMC_NON_MAC_RX_FRAME 0x0009 /* */
159#define ISB_IMC_MAC_RX_FRAME 0x000A /* */
160#define ISB_IMC_TRC_FIFO_STATUS 0x000B /* */
161#define ISB_IMC_COMMAND_STATUS 0x000C /* */
162#define ISB_IMC_MAC_TYPE_1 0x000D /* Self Removed */
163#define ISB_IMC_TRC_INTRNL_TST_STATUS 0x000E /* */
164#define ISB_IMC_RES2 0x000F /* */
165
166#define NON_MAC_RX_RESOURCE_BW 0x10 /* shifted right 8 bits */
167#define NON_MAC_RX_RESOURCE_FW 0x20 /* shifted right 8 bits */
168#define NON_MAC_RX_RESOURCE_BE 0x40 /* shifted right 8 bits */
169#define NON_MAC_RX_RESOURCE_FE 0x80 /* shifted right 8 bits */
170#define RAW_NON_MAC_RX_RESOURCE_BW 0x1000 /* */
171#define RAW_NON_MAC_RX_RESOURCE_FW 0x2000 /* */
172#define RAW_NON_MAC_RX_RESOURCE_BE 0x4000 /* */
173#define RAW_NON_MAC_RX_RESOURCE_FE 0x8000 /* */
174
175#define MAC_RX_RESOURCE_BW 0x10 /* shifted right 8 bits */
176#define MAC_RX_RESOURCE_FW 0x20 /* shifted right 8 bits */
177#define MAC_RX_RESOURCE_BE 0x40 /* shifted right 8 bits */
178#define MAC_RX_RESOURCE_FE 0x80 /* shifted right 8 bits */
179#define RAW_MAC_RX_RESOURCE_BW 0x1000 /* */
180#define RAW_MAC_RX_RESOURCE_FW 0x2000 /* */
181#define RAW_MAC_RX_RESOURCE_BE 0x4000 /* */
182#define RAW_MAC_RX_RESOURCE_FE 0x8000 /* */
183
184#define TRC_FIFO_STATUS_TX_UNDERRUN 0x40 /* shifted right 8 bits */
185#define TRC_FIFO_STATUS_RX_OVERRUN 0x80 /* shifted right 8 bits */
186#define RAW_TRC_FIFO_STATUS_TX_UNDERRUN 0x4000 /* */
187#define RAW_TRC_FIFO_STATUS_RX_OVERRUN 0x8000 /* */
188
189#define CSR_CLRTINT 0x08
190
191#define MSB(X) ((__u8)((__u16) X >> 8))
192#define LSB(X) ((__u8)((__u16) X & 0xff))
193
194#define AC_FC_LOBE_MEDIA_TEST ((3<<13) | 0)
195#define S_WRAP_DATA 248 + 2 /* 500 + 2 */
196#define WRAP_DATA 0x26
197#define LOBE_MEDIA_TEST 0x08
198
199/* Destination Class (dc) */
200
201#define DC_MASK 0xF0
202#define DC_RS 0x00
203#define DC_CRS 0x40
204#define DC_RPS 0x50
205#define DC_REM 0x60
206
207/* Source Classes (sc) */
208
209#define SC_MASK 0x0F
210#define SC_RS 0x00
211#define SC_CRS 0x04
212#define SC_RPS 0x05
213#define SC_REM 0x06
214
215#define PR 0x11
216#define PR_PAGE_MASK 0x0C000
217
218#define MICROCHANNEL 0x0008
219#define INTERFACE_CHIP 0x0010
220#define BOARD_16BIT 0x0040
221#define PAGED_RAM 0x0080
222#define WD8115TA (TOKEN_MEDIA | MICROCHANNEL | INTERFACE_CHIP | PAGED_RAM)
223#define WD8115T (TOKEN_MEDIA | INTERFACE_CHIP | BOARD_16BIT | PAGED_RAM)
224
225#define BRD_ID_8316 0x50
226
227#define r587_SER 0x001
228#define SER_DIN 0x80
229#define SER_DOUT 0x40
230#define SER_CLK 0x20
231#define SER_ECS 0x10
232#define SER_E806 0x08
233#define SER_PNP 0x04
234#define SER_BIO 0x02
235#define SER_16B 0x01
236
237#define r587_IDR 0x004
238#define IDR_IRQ_MASK 0x0F0
239#define IDR_DCS_MASK 0x007
240#define IDR_RWS 0x008
241
242
243#define r587_BIO 0x003
244#define BIO_ENB 0x080
245#define BIO_MASK 0x03F
246
247#define r587_PCR 0x005
248#define PCR_RAMS 0x040
249
250
251
252#define NUM_ADDR_BITS 8
253
254#define ISA_MAX_ADDRESS 0x00ffffff
255
256#define SMCTR_MAX_ADAPTERS 7
257
258#define MC_TABLE_ENTRIES 16
259
260#define MAXFRAGMENTS 32
261
262#define CHIP_REV_MASK 0x3000
263
264#define MAX_TX_QS 8
265#define NUM_TX_QS_USED 3
266
267#define MAX_RX_QS 2
268#define NUM_RX_QS_USED 2
269
270#define INTEL_DATA_FORMAT 0x4000
271#define INTEL_ADDRESS_POINTER_FORMAT 0x8000
272#define PAGE_POINTER(X) ((((unsigned long)(X) - tp->ram_access) & tp->page_offset_mask) + tp->ram_access)
273#define SWAP_WORDS(X) (((X & 0xFFFF) << 16) | (X >> 16))
274
275#define INTERFACE_CHIP 0x0010 /* Soft Config Adapter */
276#define ADVANCED_FEATURES 0x0020 /* Adv. netw. interface features */
277#define BOARD_16BIT 0x0040 /* 16 bit capability */
278#define PAGED_RAM 0x0080 /* Adapter has paged RAM */
279
280#define PAGED_ROM 0x0100 /* Adapter has paged ROM */
281
282#define RAM_SIZE_UNKNOWN 0x0000 /* Unknown RAM size */
283#define RAM_SIZE_0K 0x0001 /* 0K RAM */
284#define RAM_SIZE_8K 0x0002 /* 8k RAM */
285#define RAM_SIZE_16K 0x0003 /* 16k RAM */
286#define RAM_SIZE_32K 0x0004 /* 32k RAM */
287#define RAM_SIZE_64K 0x0005 /* 64k RAM */
288#define RAM_SIZE_RESERVED_6 0x0006 /* Reserved RAM size */
289#define RAM_SIZE_RESERVED_7 0x0007 /* Reserved RAM size */
290#define RAM_SIZE_MASK 0x0007 /* Isolates RAM Size */
291
292#define TOKEN_MEDIA 0x0005
293
294#define BID_REG_0 0x00
295#define BID_REG_1 0x01
296#define BID_REG_2 0x02
297#define BID_REG_3 0x03
298#define BID_REG_4 0x04
299#define BID_REG_5 0x05
300#define BID_REG_6 0x06
301#define BID_REG_7 0x07
302#define BID_LAR_0 0x08
303#define BID_LAR_1 0x09
304#define BID_LAR_2 0x0A
305#define BID_LAR_3 0x0B
306#define BID_LAR_4 0x0C
307#define BID_LAR_5 0x0D
308
309#define BID_BOARD_ID_BYTE 0x0E
310#define BID_CHCKSM_BYTE 0x0F
311#define BID_LAR_OFFSET 0x08
312
313#define BID_MSZ_583_BIT 0x08
314#define BID_SIXTEEN_BIT_BIT 0x01
315
316#define BID_BOARD_REV_MASK 0x1E
317
318#define BID_MEDIA_TYPE_BIT 0x01
319#define BID_SOFT_CONFIG_BIT 0x20
320#define BID_RAM_SIZE_BIT 0x40
321#define BID_BUS_TYPE_BIT 0x80
322
323#define BID_CR 0x10
324
325#define BID_TXP 0x04 /* Transmit Packet Command */
326
327#define BID_TCR_DIFF 0x0D /* Transmit Configuration Register */
328
329#define BID_TCR_VAL 0x18 /* Value to Test 8390 or 690 */
330#define BID_PS0 0x00 /* Register Page Select 0 */
331#define BID_PS1 0x40 /* Register Page Select 1 */
332#define BID_PS2 0x80 /* Register Page Select 2 */
333#define BID_PS_MASK 0x3F /* For Masking Off Page Select Bits */
334
335#define BID_EEPROM_0 0x08
336#define BID_EEPROM_1 0x09
337#define BID_EEPROM_2 0x0A
338#define BID_EEPROM_3 0x0B
339#define BID_EEPROM_4 0x0C
340#define BID_EEPROM_5 0x0D
341#define BID_EEPROM_6 0x0E
342#define BID_EEPROM_7 0x0F
343
344#define BID_OTHER_BIT 0x02
345#define BID_ICR_MASK 0x0C
346#define BID_EAR_MASK 0x0F
347#define BID_ENGR_PAGE 0x0A0
348#define BID_RLA 0x10
349#define BID_EA6 0x80
350#define BID_RECALL_DONE_MASK 0x10
351#define BID_BID_EEPROM_OVERRIDE 0xFFB0
352#define BID_EXTRA_EEPROM_OVERRIDE 0xFFD0
353#define BID_EEPROM_MEDIA_MASK 0x07
354#define BID_STARLAN_TYPE 0x00
355#define BID_ETHERNET_TYPE 0x01
356#define BID_TP_TYPE 0x02
357#define BID_EW_TYPE 0x03
358#define BID_TOKEN_RING_TYPE 0x04
359#define BID_UTP2_TYPE 0x05
360#define BID_EEPROM_IRQ_MASK 0x18
361#define BID_PRIMARY_IRQ 0x00
362#define BID_ALTERNATE_IRQ_1 0x08
363#define BID_ALTERNATE_IRQ_2 0x10
364#define BID_ALTERNATE_IRQ_3 0x18
365#define BID_EEPROM_RAM_SIZE_MASK 0xE0
366#define BID_EEPROM_RAM_SIZE_RES1 0x00
367#define BID_EEPROM_RAM_SIZE_RES2 0x20
368#define BID_EEPROM_RAM_SIZE_8K 0x40
369#define BID_EEPROM_RAM_SIZE_16K 0x60
370#define BID_EEPROM_RAM_SIZE_32K 0x80
371#define BID_EEPROM_RAM_SIZE_64K 0xA0
372#define BID_EEPROM_RAM_SIZE_RES3 0xC0
373#define BID_EEPROM_RAM_SIZE_RES4 0xE0
374#define BID_EEPROM_BUS_TYPE_MASK 0x07
375#define BID_EEPROM_BUS_TYPE_AT 0x00
376#define BID_EEPROM_BUS_TYPE_MCA 0x01
377#define BID_EEPROM_BUS_TYPE_EISA 0x02
378#define BID_EEPROM_BUS_TYPE_NEC 0x03
379#define BID_EEPROM_BUS_SIZE_MASK 0x18
380#define BID_EEPROM_BUS_SIZE_8BIT 0x00
381#define BID_EEPROM_BUS_SIZE_16BIT 0x08
382#define BID_EEPROM_BUS_SIZE_32BIT 0x10
383#define BID_EEPROM_BUS_SIZE_64BIT 0x18
384#define BID_EEPROM_BUS_MASTER 0x20
385#define BID_EEPROM_RAM_PAGING 0x40
386#define BID_EEPROM_ROM_PAGING 0x80
387#define BID_EEPROM_PAGING_MASK 0xC0
388#define BID_EEPROM_LOW_COST 0x08
389#define BID_EEPROM_IO_MAPPED 0x10
390#define BID_EEPROM_HMI 0x01
391#define BID_EEPROM_AUTO_MEDIA_DETECT 0x01
392#define BID_EEPROM_CHIP_REV_MASK 0x0C
393
394#define BID_EEPROM_LAN_ADDR 0x30
395
396#define BID_EEPROM_MEDIA_OPTION 0x54
397#define BID_EEPROM_MEDIA_UTP 0x01
398#define BID_EEPROM_4MB_RING 0x08
399#define BID_EEPROM_16MB_RING 0x10
400#define BID_EEPROM_MEDIA_STP 0x40
401
402#define BID_EEPROM_MISC_DATA 0x56
403#define BID_EEPROM_EARLY_TOKEN_RELEASE 0x02
404
405#define CNFG_ID_8003E 0x6fc0
406#define CNFG_ID_8003S 0x6fc1
407#define CNFG_ID_8003W 0x6fc2
408#define CNFG_ID_8115TRA 0x6ec6
409#define CNFG_ID_8013E 0x61C8
410#define CNFG_ID_8013W 0x61C9
411#define CNFG_ID_BISTRO03E 0xEFE5
412#define CNFG_ID_BISTRO13E 0xEFD5
413#define CNFG_ID_BISTRO13W 0xEFD4
414#define CNFG_MSR_583 0x0
415#define CNFG_ICR_583 0x1
416#define CNFG_IAR_583 0x2
417#define CNFG_BIO_583 0x3
418#define CNFG_EAR_583 0x3
419#define CNFG_IRR_583 0x4
420#define CNFG_LAAR_584 0x5
421#define CNFG_GP2 0x7
422#define CNFG_LAAR_MASK 0x1F
423#define CNFG_LAAR_ZWS 0x20
424#define CNFG_LAAR_L16E 0x40
425#define CNFG_ICR_IR2_584 0x04
426#define CNFG_ICR_MASK 0x08
427#define CNFG_ICR_MSZ 0x08
428#define CNFG_ICR_RLA 0x10
429#define CNFG_ICR_STO 0x80
430#define CNFG_IRR_IRQS 0x60
431#define CNFG_IRR_IEN 0x80
432#define CNFG_IRR_ZWS 0x01
433#define CNFG_GP2_BOOT_NIBBLE 0x0F
434#define CNFG_IRR_OUT2 0x04
435#define CNFG_IRR_OUT1 0x02
436
437#define CNFG_SIZE_8KB 8
438#define CNFG_SIZE_16KB 16
439#define CNFG_SIZE_32KB 32
440#define CNFG_SIZE_64KB 64
441#define CNFG_SIZE_128KB 128
442#define CNFG_SIZE_256KB 256
443#define ROM_DISABLE 0x0
444
445#define CNFG_SLOT_ENABLE_BIT 0x08
446
447#define CNFG_POS_CONTROL_REG 0x096
448#define CNFG_POS_REG0 0x100
449#define CNFG_POS_REG1 0x101
450#define CNFG_POS_REG2 0x102
451#define CNFG_POS_REG3 0x103
452#define CNFG_POS_REG4 0x104
453#define CNFG_POS_REG5 0x105
454
455#define CNFG_ADAPTER_TYPE_MASK 0x0e
456
457#define SLOT_16BIT 0x0008
458#define INTERFACE_5X3_CHIP 0x0000 /* 0000 = 583 or 593 chips */
459#define NIC_690_BIT 0x0010 /* NIC is 690 */
460#define ALTERNATE_IRQ_BIT 0x0020 /* Alternate IRQ is used */
461#define INTERFACE_584_CHIP 0x0040 /* 0001 = 584 chip */
462#define INTERFACE_594_CHIP 0x0080 /* 0010 = 594 chip */
463#define INTERFACE_585_CHIP 0x0100 /* 0100 = 585/790 chip */
464#define INTERFACE_CHIP_MASK 0x03C0 /* Isolates Intfc Chip Type */
465
466#define BOARD_16BIT 0x0040
467#define NODE_ADDR_CKSUM 0xEE
468#define BRD_ID_8115T 0x04
469
470#define NIC_825_BIT 0x0400 /* TRC 83C825 NIC */
471#define NIC_790_BIT 0x0800 /* NIC is 83C790 Ethernet */
472
473#define CHIP_REV_MASK 0x3000
474
475#define HWR_CBUSY 0x02
476#define HWR_CA 0x01
477
478#define MAC_QUEUE 0
479#define NON_MAC_QUEUE 1
480#define BUG_QUEUE 2 /* NO RECEIVE QUEUE, ONLY TX */
481
482#define NUM_MAC_TX_FCBS 8
483#define NUM_MAC_TX_BDBS NUM_MAC_TX_FCBS
484#define NUM_MAC_RX_FCBS 7
485#define NUM_MAC_RX_BDBS 8
486
487#define NUM_NON_MAC_TX_FCBS 6
488#define NUM_NON_MAC_TX_BDBS NUM_NON_MAC_TX_FCBS
489
490#define NUM_NON_MAC_RX_BDBS 0 /* CALCULATED DYNAMICALLY */
491
492#define NUM_BUG_TX_FCBS 8
493#define NUM_BUG_TX_BDBS NUM_BUG_TX_FCBS
494
495#define MAC_TX_BUFFER_MEMORY 1024
496#define NON_MAC_TX_BUFFER_MEMORY (20 * 1024)
497#define BUG_TX_BUFFER_MEMORY (NUM_BUG_TX_FCBS * 32)
498
499#define RX_BUFFER_MEMORY 0 /* CALCULATED DYNAMICALLY */
500#define RX_DATA_BUFFER_SIZE 256
501#define RX_BDB_SIZE_SHIFT 3 /* log2(RX_DATA_BUFFER_SIZE)-log2(sizeof(BDBlock)) */
502#define RX_BDB_SIZE_MASK (sizeof(BDBlock) - 1)
503#define RX_DATA_BUFFER_SIZE_MASK (RX_DATA_BUFFER_SIZE-1)
504
505#define NUM_OF_INTERRUPTS 0x20
506
507#define NOT_TRANSMITING 0
508#define TRANSMITING 1
509
510#define TRC_INTERRUPT_ENABLE_MASK 0x7FF6
511
512#define UCODE_VERSION 0x58
513
514#define UCODE_SIZE_OFFSET 0x0000 /* WORD */
515#define UCODE_CHECKSUM_OFFSET 0x0002 /* WORD */
516#define UCODE_VERSION_OFFSET 0x0004 /* BYTE */
517
518#define CS_RAM_SIZE 0X2000
519#define CS_RAM_CHECKSUM_OFFSET 0x1FFE /* WORD 1FFE(MSB)-1FFF(LSB)*/
520#define CS_RAM_VERSION_OFFSET 0x1FFC /* WORD 1FFC(MSB)-1FFD(LSB)*/
521
522#define MISC_DATA_SIZE 128
523#define NUM_OF_ACBS 1
524
525#define ACB_COMMAND_NOT_DONE 0x0000 /* Init, command not done */
526#define ACB_COMMAND_DONE 0x8000 /* TRC says command done */
527#define ACB_COMMAND_STATUS_MASK 0x00FF /* low byte is status */
528#define ACB_COMMAND_SUCCESSFUL 0x0000 /* means cmd was successful */
529#define ACB_NOT_CHAIN_END 0x0000 /* tell TRC more CBs in chain */
530#define ACB_CHAIN_END 0x8000 /* tell TRC last CB in chain */
531#define ACB_COMMAND_NO_INTERRUPT 0x0000 /* tell TRC no INT after CB */
532#define ACB_COMMAND_INTERRUPT 0x2000 /* tell TRC to INT after CB */
533#define ACB_SUB_CMD_NOP 0x0000
534#define ACB_CMD_HIC_NOP 0x0080
535#define ACB_CMD_MCT_NOP 0x0000
536#define ACB_CMD_MCT_TEST 0x0001
537#define ACB_CMD_HIC_TEST 0x0081
538#define ACB_CMD_INSERT 0x0002
539#define ACB_CMD_REMOVE 0x0003
540#define ACB_CMD_MCT_WRITE_VALUE 0x0004
541#define ACB_CMD_HIC_WRITE_VALUE 0x0084
542#define ACB_CMD_MCT_READ_VALUE 0x0005
543#define ACB_CMD_HIC_READ_VALUE 0x0085
544#define ACB_CMD_INIT_TX_RX 0x0086
545#define ACB_CMD_INIT_TRC_TIMERS 0x0006
546#define ACB_CMD_READ_TRC_STATUS 0x0007
547#define ACB_CMD_CHANGE_JOIN_STATE 0x0008
548#define ACB_CMD_RESERVED_9 0x0009
549#define ACB_CMD_RESERVED_A 0x000A
550#define ACB_CMD_RESERVED_B 0x000B
551#define ACB_CMD_RESERVED_C 0x000C
552#define ACB_CMD_RESERVED_D 0x000D
553#define ACB_CMD_RESERVED_E 0x000E
554#define ACB_CMD_RESERVED_F 0x000F
555
556#define TRC_MAC_REGISTERS_TEST 0x0000
557#define TRC_INTERNAL_LOOPBACK 0x0001
558#define TRC_TRI_LOOPBACK 0x0002
559#define TRC_INTERNAL_ROM_TEST 0x0003
560#define TRC_LOBE_MEDIA_TEST 0x0004
561#define TRC_ANALOG_TEST 0x0005
562#define TRC_HOST_INTERFACE_REG_TEST 0x0003
563
564#define TEST_DMA_1 0x0000
565#define TEST_DMA_2 0x0001
566#define TEST_MCT_ROM 0x0002
567#define HIC_INTERNAL_DIAG 0x0003
568
569#define ABORT_TRANSMIT_PRIORITY_0 0x0001
570#define ABORT_TRANSMIT_PRIORITY_1 0x0002
571#define ABORT_TRANSMIT_PRIORITY_2 0x0004
572#define ABORT_TRANSMIT_PRIORITY_3 0x0008
573#define ABORT_TRANSMIT_PRIORITY_4 0x0010
574#define ABORT_TRANSMIT_PRIORITY_5 0x0020
575#define ABORT_TRANSMIT_PRIORITY_6 0x0040
576#define ABORT_TRANSMIT_PRIORITY_7 0x0080
577
578#define TX_PENDING_PRIORITY_0 0x0001
579#define TX_PENDING_PRIORITY_1 0x0002
580#define TX_PENDING_PRIORITY_2 0x0004
581#define TX_PENDING_PRIORITY_3 0x0008
582#define TX_PENDING_PRIORITY_4 0x0010
583#define TX_PENDING_PRIORITY_5 0x0020
584#define TX_PENDING_PRIORITY_6 0x0040
585#define TX_PENDING_PRIORITY_7 0x0080
586
587#define FCB_FRAME_LENGTH 0x100
588#define FCB_COMMAND_DONE 0x8000 /* FCB Word 0 */
589#define FCB_NOT_CHAIN_END 0x0000 /* FCB Word 1 */
590#define FCB_CHAIN_END 0x8000
591#define FCB_NO_WARNING 0x0000
592#define FCB_WARNING 0x4000
593#define FCB_INTERRUPT_DISABLE 0x0000
594#define FCB_INTERRUPT_ENABLE 0x2000
595
596#define FCB_ENABLE_IMA 0x0008
597#define FCB_ENABLE_TES 0x0004 /* Guarantee Tx before Int */
598#define FCB_ENABLE_TFS 0x0002 /* Post Tx Frame Status */
599#define FCB_ENABLE_NTC 0x0001 /* No Tx CRC */
600
601#define FCB_TX_STATUS_CR2 0x0004
602#define FCB_TX_STATUS_AR2 0x0008
603#define FCB_TX_STATUS_CR1 0x0040
604#define FCB_TX_STATUS_AR1 0x0080
605#define FCB_TX_AC_BITS (FCB_TX_STATUS_AR1+FCB_TX_STATUS_AR2+FCB_TX_STATUS_CR1+FCB_TX_STATUS_CR2)
606#define FCB_TX_STATUS_E 0x0100
607
608#define FCB_RX_STATUS_ANY_ERROR 0x0001
609#define FCB_RX_STATUS_FCS_ERROR 0x0002
610
611#define FCB_RX_STATUS_IA_MATCHED 0x0400
612#define FCB_RX_STATUS_IGA_BSGA_MATCHED 0x0500
613#define FCB_RX_STATUS_FA_MATCHED 0x0600
614#define FCB_RX_STATUS_BA_MATCHED 0x0700
615#define FCB_RX_STATUS_DA_MATCHED 0x0400
616#define FCB_RX_STATUS_SOURCE_ROUTING 0x0800
617
618#define BDB_BUFFER_SIZE 0x100
619#define BDB_NOT_CHAIN_END 0x0000
620#define BDB_CHAIN_END 0x8000
621#define BDB_NO_WARNING 0x0000
622#define BDB_WARNING 0x4000
623
624#define ERROR_COUNTERS_CHANGED 0x0001
625#define TI_NDIS_RING_STATUS_CHANGED 0x0002
626#define UNA_CHANGED 0x0004
627#define READY_TO_SEND_RQ_INIT 0x0008
628
629#define SCGB_ADDRESS_POINTER_FORMAT INTEL_ADDRESS_POINTER_FORMAT
630#define SCGB_DATA_FORMAT INTEL_DATA_FORMAT
631#define SCGB_MULTI_WORD_CONTROL 0
632#define SCGB_BURST_LENGTH 0x000E /* DMA Burst Length */
633
634#define SCGB_CONFIG (INTEL_ADDRESS_POINTER_FORMAT+INTEL_DATA_FORMAT+SCGB_BURST_LENGTH)
635
636#define ISCP_BLOCK_SIZE 0x0A
637#define RAM_SIZE 0x10000
638#define INIT_SYS_CONFIG_PTR_OFFSET (RAM_SIZE-ISCP_BLOCK_SIZE)
639#define SCGP_BLOCK_OFFSET 0
640
641#define SCLB_NOT_VALID 0x0000 /* Initially, SCLB not valid */
642#define SCLB_VALID 0x8000 /* Host tells TRC SCLB valid */
643#define SCLB_PROCESSED 0x0000 /* TRC says SCLB processed */
644#define SCLB_RESUME_CONTROL_NOT_VALID 0x0000 /* Initially, RC not valid */
645#define SCLB_RESUME_CONTROL_VALID 0x4000 /* Host tells TRC RC valid */
646#define SCLB_IACK_CODE_NOT_VALID 0x0000 /* Initially, IACK not valid */
647#define SCLB_IACK_CODE_VALID 0x2000 /* Host tells TRC IACK valid */
648#define SCLB_CMD_NOP 0x0000
649#define SCLB_CMD_REMOVE 0x0001
650#define SCLB_CMD_SUSPEND_ACB_CHAIN 0x0002
651#define SCLB_CMD_SET_INTERRUPT_MASK 0x0003
652#define SCLB_CMD_CLEAR_INTERRUPT_MASK 0x0004
653#define SCLB_CMD_RESERVED_5 0x0005
654#define SCLB_CMD_RESERVED_6 0x0006
655#define SCLB_CMD_RESERVED_7 0x0007
656#define SCLB_CMD_RESERVED_8 0x0008
657#define SCLB_CMD_RESERVED_9 0x0009
658#define SCLB_CMD_RESERVED_A 0x000A
659#define SCLB_CMD_RESERVED_B 0x000B
660#define SCLB_CMD_RESERVED_C 0x000C
661#define SCLB_CMD_RESERVED_D 0x000D
662#define SCLB_CMD_RESERVED_E 0x000E
663#define SCLB_CMD_RESERVED_F 0x000F
664
665#define SCLB_RC_ACB 0x0001 /* Action Command Block Chain */
666#define SCLB_RC_RES0 0x0002 /* Always Zero */
667#define SCLB_RC_RES1 0x0004 /* Always Zero */
668#define SCLB_RC_RES2 0x0008 /* Always Zero */
669#define SCLB_RC_RX_MAC_FCB 0x0010 /* RX_MAC_FCB Chain */
670#define SCLB_RC_RX_MAC_BDB 0x0020 /* RX_MAC_BDB Chain */
671#define SCLB_RC_RX_NON_MAC_FCB 0x0040 /* RX_NON_MAC_FCB Chain */
672#define SCLB_RC_RX_NON_MAC_BDB 0x0080 /* RX_NON_MAC_BDB Chain */
673#define SCLB_RC_TFCB0 0x0100 /* TX Priority 0 FCB Chain */
674#define SCLB_RC_TFCB1 0x0200 /* TX Priority 1 FCB Chain */
675#define SCLB_RC_TFCB2 0x0400 /* TX Priority 2 FCB Chain */
676#define SCLB_RC_TFCB3 0x0800 /* TX Priority 3 FCB Chain */
677#define SCLB_RC_TFCB4 0x1000 /* TX Priority 4 FCB Chain */
678#define SCLB_RC_TFCB5 0x2000 /* TX Priority 5 FCB Chain */
679#define SCLB_RC_TFCB6 0x4000 /* TX Priority 6 FCB Chain */
680#define SCLB_RC_TFCB7 0x8000 /* TX Priority 7 FCB Chain */
681
682#define SCLB_IMC_RES0 0x0001 /* */
683#define SCLB_IMC_MAC_TYPE_3 0x0002 /* MAC_ARC_INDICATE */
684#define SCLB_IMC_MAC_ERROR_COUNTERS 0x0004 /* */
685#define SCLB_IMC_RES1 0x0008 /* */
686#define SCLB_IMC_MAC_TYPE_2 0x0010 /* QUE_MAC_INDICATE */
687#define SCLB_IMC_TX_FRAME 0x0020 /* */
688#define SCLB_IMC_END_OF_TX_QUEUE 0x0040 /* */
689#define SCLB_IMC_NON_MAC_RX_RESOURCE 0x0080 /* */
690#define SCLB_IMC_MAC_RX_RESOURCE 0x0100 /* */
691#define SCLB_IMC_NON_MAC_RX_FRAME 0x0200 /* */
692#define SCLB_IMC_MAC_RX_FRAME 0x0400 /* */
693#define SCLB_IMC_TRC_FIFO_STATUS 0x0800 /* */
694#define SCLB_IMC_COMMAND_STATUS 0x1000 /* */
695#define SCLB_IMC_MAC_TYPE_1 0x2000 /* Self Removed */
696#define SCLB_IMC_TRC_INTRNL_TST_STATUS 0x4000 /* */
697#define SCLB_IMC_RES2 0x8000 /* */
698
699#define DMA_TRIGGER 0x0004
700#define FREQ_16MB_BIT 0x0010
701#define THDREN 0x0020
702#define CFG0_RSV1 0x0040
703#define CFG0_RSV2 0x0080
704#define ETREN 0x0100
705#define RX_OWN_BIT 0x0200
706#define RXATMAC 0x0400
707#define PROMISCUOUS_BIT 0x0800
708#define USETPT 0x1000
709#define SAVBAD_BIT 0x2000
710#define ONEQUE 0x4000
711#define NO_AUTOREMOVE 0x8000
712
713#define RX_FCB_AREA_8316 0x00000000
714#define RX_BUFF_AREA_8316 0x00000000
715
716#define TRC_POINTER(X) ((unsigned long)(X) - tp->ram_access)
717#define RX_FCB_TRC_POINTER(X) ((unsigned long)(X) - tp->ram_access + RX_FCB_AREA_8316)
718#define RX_BUFF_TRC_POINTER(X) ((unsigned long)(X) - tp->ram_access + RX_BUFF_AREA_8316)
719
720// Offset 0: MSR - Memory Select Register
721//
722#define r587_MSR 0x000 // Register Offset
723//#define MSR_RST 0x080 // LAN Controller Reset
724#define MSR_MENB 0x040 // Shared Memory Enable
725#define MSR_RA18 0x020 // Ram Address bit 18 (583, 584, 587)
726#define MSR_RA17 0x010 // Ram Address bit 17 (583, 584, 585/790)
727#define MSR_RA16 0x008 // Ram Address bit 16 (583, 584, 585/790)
728#define MSR_RA15 0x004 // Ram Address bit 15 (583, 584, 585/790)
729#define MSR_RA14 0x002 // Ram Address bit 14 (583, 584, 585/790)
730#define MSR_RA13 0x001 // Ram Address bit 13 (583, 584, 585/790)
731
732#define MSR_MASK 0x03F // Mask for Address bits RA18-RA13 (583, 584, 587)
733
734#define MSR 0x00
735#define IRR 0x04
736#define HWR 0x04
737#define LAAR 0x05
738#define IMCCR 0x05
739#define LAR0 0x08
740#define BDID 0x0E // Adapter ID byte register offset
741#define CSR 0x10
742#define PR 0x11
743
744#define MSR_RST 0x80
745#define MSR_MEMB 0x40
746#define MSR_0WS 0x20
747
748#define FORCED_16BIT_MODE 0x0002
749
750#define INTERFRAME_SPACING_16 0x0003 /* 6 bytes */
751#define INTERFRAME_SPACING_4 0x0001 /* 2 bytes */
752#define MULTICAST_ADDRESS_BIT 0x0010
753#define NON_SRC_ROUTING_BIT 0x0020
754
755#define LOOPING_MODE_MASK 0x0007
756
757/*
758 * Decode firmware defines.
759 */
760#define SWAP_BYTES(X) ((X & 0xff) << 8) | (X >> 8)
761#define WEIGHT_OFFSET 5
762#define TREE_SIZE_OFFSET 9
763#define TREE_OFFSET 11
764
765/* The Huffman Encoding Tree is constructed of these nodes. */
766typedef struct {
767 __u8 llink; /* Short version of above node. */
768 __u8 tag;
769 __u8 info; /* This node is used on decodes. */
770 __u8 rlink;
771} DECODE_TREE_NODE;
772
773#define ROOT 0 /* Branch value. */
774#define LEAF 0 /* Tag field value. */
775#define BRANCH 1 /* Tag field value. */
776
777/*
778 * Multicast Table Structure
779 */
780typedef struct {
781 __u8 address[6];
782 __u8 instance_count;
783} McTable;
784
785/*
786 * Fragment Descriptor Definition
787 */
788typedef struct {
789 __u8 *fragment_ptr;
790 __u32 fragment_length;
791} FragmentStructure;
792
793/*
794 * Data Buffer Structure Definition
795 */
796typedef struct {
797 __u32 fragment_count;
798 FragmentStructure fragment_list[MAXFRAGMENTS];
799} DataBufferStructure;
800
801#pragma pack(1)
802typedef struct {
803 __u8 IType;
804 __u8 ISubtype;
805} Interrupt_Status_Word;
806
807#pragma pack(1)
808typedef struct BDBlockType {
809 __u16 info; /* 02 */
810 __u32 trc_next_ptr; /* 06 */
811 __u32 trc_data_block_ptr; /* 10 */
812 __u16 buffer_length; /* 12 */
813
814 __u16 *data_block_ptr; /* 16 */
815 struct BDBlockType *next_ptr; /* 20 */
816 struct BDBlockType *back_ptr; /* 24 */
817 __u8 filler[8]; /* 32 */
818} BDBlock;
819
820#pragma pack(1)
821typedef struct FCBlockType {
822 __u16 frame_status; /* 02 */
823 __u16 info; /* 04 */
824 __u32 trc_next_ptr; /* 08 */
825 __u32 trc_bdb_ptr; /* 12 */
826 __u16 frame_length; /* 14 */
827
828 BDBlock *bdb_ptr; /* 18 */
829 struct FCBlockType *next_ptr; /* 22 */
830 struct FCBlockType *back_ptr; /* 26 */
831 __u16 memory_alloc; /* 28 */
832 __u8 filler[4]; /* 32 */
833
834} FCBlock;
835
836#pragma pack(1)
837typedef struct SBlockType{
838 __u8 Internal_Error_Count;
839 __u8 Line_Error_Count;
840 __u8 AC_Error_Count;
841 __u8 Burst_Error_Count;
842 __u8 RESERVED_COUNTER_0;
843 __u8 AD_TRANS_Count;
844 __u8 RCV_Congestion_Count;
845 __u8 Lost_FR_Error_Count;
846 __u8 FREQ_Error_Count;
847 __u8 FR_Copied_Error_Count;
848 __u8 RESERVED_COUNTER_1;
849 __u8 Token_Error_Count;
850
851 __u16 TI_NDIS_Ring_Status;
852 __u16 BCN_Type;
853 __u16 Error_Code;
854 __u16 SA_of_Last_AMP_SMP[3];
855 __u16 UNA[3];
856 __u16 Ucode_Version_Number;
857 __u16 Status_CHG_Indicate;
858 __u16 RESERVED_STATUS_0;
859} SBlock;
860
861#pragma pack(1)
862typedef struct ACBlockType {
863 __u16 cmd_done_status; /* 02 */
864 __u16 cmd_info; /* 04 */
865 __u32 trc_next_ptr; /* 08 */
866 __u16 cmd; /* 10 */
867 __u16 subcmd; /* 12 */
868 __u16 data_offset_lo; /* 14 */
869 __u16 data_offset_hi; /* 16 */
870
871 struct ACBlockType *next_ptr; /* 20 */
872
873 __u8 filler[12]; /* 32 */
874} ACBlock;
875
876#define NUM_OF_INTERRUPTS 0x20
877
878#pragma pack(1)
879typedef struct {
880 Interrupt_Status_Word IStatus[NUM_OF_INTERRUPTS];
881} ISBlock;
882
883#pragma pack(1)
884typedef struct {
885 __u16 valid_command; /* 02 */
886 __u16 iack_code; /* 04 */
887 __u16 resume_control; /* 06 */
888 __u16 int_mask_control; /* 08 */
889 __u16 int_mask_state; /* 10 */
890
891 __u8 filler[6]; /* 16 */
892} SCLBlock;
893
894#pragma pack(1)
895typedef struct
896{
897 __u16 config; /* 02 */
898 __u32 trc_sclb_ptr; /* 06 */
899 __u32 trc_acb_ptr; /* 10 */
900 __u32 trc_isb_ptr; /* 14 */
901 __u16 isbsiz; /* 16 */
902
903 SCLBlock *sclb_ptr; /* 20 */
904 ACBlock *acb_ptr; /* 24 */
905 ISBlock *isb_ptr; /* 28 */
906
907 __u16 Non_Mac_Rx_Bdbs; /* 30 DEBUG */
908 __u8 filler[2]; /* 32 */
909
910} SCGBlock;
911
912#pragma pack(1)
913typedef struct
914{
915 __u32 trc_scgb_ptr;
916 SCGBlock *scgb_ptr;
917} ISCPBlock;
918#pragma pack()
919
920typedef struct net_local {
921 ISCPBlock *iscpb_ptr;
922 SCGBlock *scgb_ptr;
923 SCLBlock *sclb_ptr;
924 ISBlock *isb_ptr;
925
926 ACBlock *acb_head;
927 ACBlock *acb_curr;
928 ACBlock *acb_next;
929
930 __u8 adapter_name[12];
931
932 __u16 num_rx_bdbs [NUM_RX_QS_USED];
933 __u16 num_rx_fcbs [NUM_RX_QS_USED];
934
935 __u16 num_tx_bdbs [NUM_TX_QS_USED];
936 __u16 num_tx_fcbs [NUM_TX_QS_USED];
937
938 __u16 num_of_tx_buffs;
939
940 __u16 tx_buff_size [NUM_TX_QS_USED];
941 __u16 tx_buff_used [NUM_TX_QS_USED];
942 __u16 tx_queue_status [NUM_TX_QS_USED];
943
944 FCBlock *tx_fcb_head[NUM_TX_QS_USED];
945 FCBlock *tx_fcb_curr[NUM_TX_QS_USED];
946 FCBlock *tx_fcb_end[NUM_TX_QS_USED];
947 BDBlock *tx_bdb_head[NUM_TX_QS_USED];
948 __u16 *tx_buff_head[NUM_TX_QS_USED];
949 __u16 *tx_buff_end[NUM_TX_QS_USED];
950 __u16 *tx_buff_curr[NUM_TX_QS_USED];
951 __u16 num_tx_fcbs_used[NUM_TX_QS_USED];
952
953 FCBlock *rx_fcb_head[NUM_RX_QS_USED];
954 FCBlock *rx_fcb_curr[NUM_RX_QS_USED];
955 BDBlock *rx_bdb_head[NUM_RX_QS_USED];
956 BDBlock *rx_bdb_curr[NUM_RX_QS_USED];
957 BDBlock *rx_bdb_end[NUM_RX_QS_USED];
958 __u16 *rx_buff_head[NUM_RX_QS_USED];
959 __u16 *rx_buff_end[NUM_RX_QS_USED];
960
961 __u32 *ptr_local_ring_num;
962
963 __u32 sh_mem_used;
964
965 __u16 page_offset_mask;
966
967 __u16 authorized_function_classes;
968 __u16 authorized_access_priority;
969
970 __u16 num_acbs;
971 __u16 num_acbs_used;
972 __u16 acb_pending;
973
974 __u16 current_isb_index;
975
976 __u8 monitor_state;
977 __u8 monitor_state_ready;
978 __u16 ring_status;
979 __u8 ring_status_flags;
980 __u8 state;
981
982 __u8 join_state;
983
984 __u8 slot_num;
985 __u16 pos_id;
986
987 __u32 *ptr_una;
988 __u32 *ptr_bcn_type;
989 __u32 *ptr_tx_fifo_underruns;
990 __u32 *ptr_rx_fifo_underruns;
991 __u32 *ptr_rx_fifo_overruns;
992 __u32 *ptr_tx_fifo_overruns;
993 __u32 *ptr_tx_fcb_overruns;
994 __u32 *ptr_rx_fcb_overruns;
995 __u32 *ptr_tx_bdb_overruns;
996 __u32 *ptr_rx_bdb_overruns;
997
998 __u16 receive_queue_number;
999
1000 __u8 rx_fifo_overrun_count;
1001 __u8 tx_fifo_overrun_count;
1002
1003 __u16 adapter_flags;
1004 __u16 adapter_flags1;
1005 __u16 *misc_command_data;
1006 __u16 max_packet_size;
1007
1008 __u16 config_word0;
1009 __u16 config_word1;
1010
1011 __u8 trc_mask;
1012
1013 __u16 source_ring_number;
1014 __u16 target_ring_number;
1015
1016 __u16 microcode_version;
1017
1018 __u16 bic_type;
1019 __u16 nic_type;
1020 __u16 board_id;
1021
1022 __u16 rom_size;
1023 __u32 rom_base;
1024 __u16 ram_size;
1025 __u16 ram_usable;
1026 __u32 ram_base;
1027 __u32 ram_access;
1028
1029 __u16 extra_info;
1030 __u16 mode_bits;
1031 __u16 media_menu;
1032 __u16 media_type;
1033 __u16 adapter_bus;
1034
1035 __u16 status;
1036 __u16 receive_mask;
1037
1038 __u16 group_address_0;
1039 __u16 group_address[2];
1040 __u16 functional_address_0;
1041 __u16 functional_address[2];
1042 __u16 bitwise_group_address[2];
1043
1044 __u8 cleanup;
1045
1046 struct sk_buff_head SendSkbQueue;
1047 __u16 QueueSkb;
1048
1049 struct tr_statistics MacStat; /* MAC statistics structure */
1050
1051 spinlock_t lock;
1052} NET_LOCAL;
1053
1054/************************************
1055 * SNMP-ON-BOARD Agent Link Structure
1056 ************************************/
1057
1058typedef struct {
1059 __u8 LnkSigStr[12]; /* signature string "SmcLinkTable" */
1060 __u8 LnkDrvTyp; /* 1=Redbox ODI, 2=ODI DOS, 3=ODI OS/2, 4=NDIS DOS */
1061 __u8 LnkFlg; /* 0 if no agent linked, 1 if agent linked */
1062 void *LnkNfo; /* routine which returns pointer to NIC info */
1063 void *LnkAgtRcv; /* pointer to agent receive trap entry */
1064 void *LnkAgtXmt; /* pointer to agent transmit trap
1065entry */
1066void *LnkGet; /* pointer to NIC receive data
1067copy routine */
1068 void *LnkSnd; /* pointer to NIC send routine
1069*/
1070 void *LnkRst; /* pointer to NIC driver reset
1071routine */
1072 void *LnkMib; /* pointer to MIB data base */
1073 void *LnkMibAct; /* pointer to MIB action routine list */
1074 __u16 LnkCntOffset; /* offset to error counters */
1075 __u16 LnkCntNum; /* number of error counters */
1076 __u16 LnkCntSize; /* size of error counters i.e. 32 = 32 bits */
1077 void *LnkISR; /* pointer to interrupt vector */
1078 __u8 LnkFrmTyp; /* 1=Ethernet, 2=Token Ring */
1079 __u8 LnkDrvVer1 ; /* driver major version */
1080 __u8 LnkDrvVer2 ; /* driver minor version */
1081} AgentLink;
1082
1083/*
1084 * Definitions for pcm_card_flags(bit_mapped)
1085 */
1086#define REG_COMPLETE 0x0001
1087#define INSERTED 0x0002
1088#define PCC_INSERTED 0x0004 /* 1=currently inserted, 0=cur removed */
1089
1090/*
1091 * Adapter RAM test patterns
1092 */
1093#define RAM_PATTERN_1 0x55AA
1094#define RAM_PATTERN_2 0x9249
1095#define RAM_PATTERN_3 0xDB6D
1096
1097/*
1098 * definitions for RAM test
1099 */
1100#define ROM_SIGNATURE 0xAA55
1101#define MIN_ROM_SIZE 0x2000
1102
1103/*
1104 * Return Codes
1105 */
1106#define SUCCESS 0x0000
1107#define ADAPTER_AND_CONFIG 0x0001
1108#define ADAPTER_NO_CONFIG 0x0002
1109#define NOT_MY_INTERRUPT 0x0003
1110#define FRAME_REJECTED 0x0004
1111#define EVENTS_DISABLED 0x0005
1112#define OUT_OF_RESOURCES 0x0006
1113#define INVALID_PARAMETER 0x0007
1114#define INVALID_FUNCTION 0x0008
1115#define INITIALIZE_FAILED 0x0009
1116#define CLOSE_FAILED 0x000A
1117#define MAX_COLLISIONS 0x000B
1118#define NO_SUCH_DESTINATION 0x000C
1119#define BUFFER_TOO_SMALL_ERROR 0x000D
1120#define ADAPTER_CLOSED 0x000E
1121#define UCODE_NOT_PRESENT 0x000F
1122#define FIFO_UNDERRUN 0x0010
1123#define DEST_OUT_OF_RESOURCES 0x0011
1124#define ADAPTER_NOT_INITIALIZED 0x0012
1125#define PENDING 0x0013
1126#define UCODE_PRESENT 0x0014
1127#define NOT_INIT_BY_BRIDGE 0x0015
1128
1129#define OPEN_FAILED 0x0080
1130#define HARDWARE_FAILED 0x0081
1131#define SELF_TEST_FAILED 0x0082
1132#define RAM_TEST_FAILED 0x0083
1133#define RAM_CONFLICT 0x0084
1134#define ROM_CONFLICT 0x0085
1135#define UNKNOWN_ADAPTER 0x0086
1136#define CONFIG_ERROR 0x0087
1137#define CONFIG_WARNING 0x0088
1138#define NO_FIXED_CNFG 0x0089
1139#define EEROM_CKSUM_ERROR 0x008A
1140#define ROM_SIGNATURE_ERROR 0x008B
1141#define ROM_CHECKSUM_ERROR 0x008C
1142#define ROM_SIZE_ERROR 0x008D
1143#define UNSUPPORTED_NIC_CHIP 0x008E
1144#define NIC_REG_ERROR 0x008F
1145#define BIC_REG_ERROR 0x0090
1146#define MICROCODE_TEST_ERROR 0x0091
1147#define LOBE_MEDIA_TEST_FAILED 0x0092
1148
1149#define ADAPTER_FOUND_LAN_CORRUPT 0x009B
1150
1151#define ADAPTER_NOT_FOUND 0xFFFF
1152
1153#define ILLEGAL_FUNCTION INVALID_FUNCTION
1154
1155/* Errors */
1156#define IO_BASE_INVALID 0x0001
1157#define IO_BASE_RANGE 0x0002
1158#define IRQ_INVALID 0x0004
1159#define IRQ_RANGE 0x0008
1160#define RAM_BASE_INVALID 0x0010
1161#define RAM_BASE_RANGE 0x0020
1162#define RAM_SIZE_RANGE 0x0040
1163#define MEDIA_INVALID 0x0800
1164
1165/* Warnings */
1166#define IRQ_MISMATCH 0x0080
1167#define RAM_BASE_MISMATCH 0x0100
1168#define RAM_SIZE_MISMATCH 0x0200
1169#define BUS_MODE_MISMATCH 0x0400
1170
1171#define RX_CRC_ERROR 0x01
1172#define RX_ALIGNMENT_ERROR 0x02
1173#define RX_HW_FAILED 0x80
1174
1175/*
1176 * Definitions for the field RING_STATUS_FLAGS
1177 */
1178#define RING_STATUS_CHANGED 0X01
1179#define MONITOR_STATE_CHANGED 0X02
1180#define JOIN_STATE_CHANGED 0X04
1181
1182/*
1183 * Definitions for the field JOIN_STATE
1184 */
1185#define JS_BYPASS_STATE 0x00
1186#define JS_LOBE_TEST_STATE 0x01
1187#define JS_DETECT_MONITOR_PRESENT_STATE 0x02
1188#define JS_AWAIT_NEW_MONITOR_STATE 0x03
1189#define JS_DUPLICATE_ADDRESS_TEST_STATE 0x04
1190#define JS_NEIGHBOR_NOTIFICATION_STATE 0x05
1191#define JS_REQUEST_INITIALIZATION_STATE 0x06
1192#define JS_JOIN_COMPLETE_STATE 0x07
1193#define JS_BYPASS_WAIT_STATE 0x08
1194
1195/*
1196 * Definitions for the field MONITOR_STATE
1197 */
1198#define MS_MONITOR_FSM_INACTIVE 0x00
1199#define MS_REPEAT_BEACON_STATE 0x01
1200#define MS_REPEAT_CLAIM_TOKEN_STATE 0x02
1201#define MS_TRANSMIT_CLAIM_TOKEN_STATE 0x03
1202#define MS_STANDBY_MONITOR_STATE 0x04
1203#define MS_TRANSMIT_BEACON_STATE 0x05
1204#define MS_ACTIVE_MONITOR_STATE 0x06
1205#define MS_TRANSMIT_RING_PURGE_STATE 0x07
1206#define MS_BEACON_TEST_STATE 0x09
1207
1208/*
1209 * Definitions for the bit-field RING_STATUS
1210 */
1211#define SIGNAL_LOSS 0x8000
1212#define HARD_ERROR 0x4000
1213#define SOFT_ERROR 0x2000
1214#define TRANSMIT_BEACON 0x1000
1215#define LOBE_WIRE_FAULT 0x0800
1216#define AUTO_REMOVAL_ERROR 0x0400
1217#define REMOVE_RECEIVED 0x0100
1218#define COUNTER_OVERFLOW 0x0080
1219#define SINGLE_STATION 0x0040
1220#define RING_RECOVERY 0x0020
1221
1222/*
1223 * Definitions for the field BUS_TYPE
1224 */
1225#define AT_BUS 0x00
1226#define MCA_BUS 0x01
1227#define EISA_BUS 0x02
1228#define PCI_BUS 0x03
1229#define PCMCIA_BUS 0x04
1230
1231/*
1232 * Definitions for adapter_flags
1233 */
1234#define RX_VALID_LOOKAHEAD 0x0001
1235#define FORCED_16BIT_MODE 0x0002
1236#define ADAPTER_DISABLED 0x0004
1237#define TRANSMIT_CHAIN_INT 0x0008
1238#define EARLY_RX_FRAME 0x0010
1239#define EARLY_TX 0x0020
1240#define EARLY_RX_COPY 0x0040
1241#define USES_PHYSICAL_ADDR 0x0080 /* Rsvd for DEC PCI and 9232 */
1242#define NEEDS_PHYSICAL_ADDR 0x0100 /* Reserved*/
1243#define RX_STATUS_PENDING 0x0200
1244#define ERX_DISABLED 0x0400 /* EARLY_RX_ENABLE rcv_mask */
1245#define ENABLE_TX_PENDING 0x0800
1246#define ENABLE_RX_PENDING 0x1000
1247#define PERM_CLOSE 0x2000
1248#define IO_MAPPED 0x4000 /* IOmapped bus interface 795 */
1249#define ETX_DISABLED 0x8000
1250
1251
1252/*
1253 * Definitions for adapter_flags1
1254 */
1255#define TX_PHY_RX_VIRT 0x0001
1256#define NEEDS_HOST_RAM 0x0002
1257#define NEEDS_MEDIA_TYPE 0x0004
1258#define EARLY_RX_DONE 0x0008
1259#define PNP_BOOT_BIT 0x0010 /* activates PnP & config on power-up */
1260 /* clear => regular PnP operation */
1261#define PNP_ENABLE 0x0020 /* regular PnP operation clear => */
1262 /* no PnP, overrides PNP_BOOT_BIT */
1263#define SATURN_ENABLE 0x0040
1264
1265#define ADAPTER_REMOVABLE 0x0080 /* adapter is hot swappable */
1266#define TX_PHY 0x0100 /* Uses physical address for tx bufs */
1267#define RX_PHY 0x0200 /* Uses physical address for rx bufs */
1268#define TX_VIRT 0x0400 /* Uses virtual addr for tx bufs */
1269#define RX_VIRT 0x0800
1270#define NEEDS_SERVICE 0x1000
1271
1272/*
1273 * Adapter Status Codes
1274 */
1275#define OPEN 0x0001
1276#define INITIALIZED 0x0002
1277#define CLOSED 0x0003
1278#define FAILED 0x0005
1279#define NOT_INITIALIZED 0x0006
1280#define IO_CONFLICT 0x0007
1281#define CARD_REMOVED 0x0008
1282#define CARD_INSERTED 0x0009
1283
1284/*
1285 * Mode Bit Definitions
1286 */
1287#define INTERRUPT_STATUS_BIT 0x8000 /* PC Interrupt Line: 0 = Not Enabled */
1288#define BOOT_STATUS_MASK 0x6000 /* Mask to isolate BOOT_STATUS */
1289#define BOOT_INHIBIT 0x0000 /* BOOT_STATUS is 'inhibited' */
1290#define BOOT_TYPE_1 0x2000 /* Unused BOOT_STATUS value */
1291#define BOOT_TYPE_2 0x4000 /* Unused BOOT_STATUS value */
1292#define BOOT_TYPE_3 0x6000 /* Unused BOOT_STATUS value */
1293#define ZERO_WAIT_STATE_MASK 0x1800 /* Mask to isolate Wait State flags */
1294#define ZERO_WAIT_STATE_8_BIT 0x1000 /* 0 = Disabled (Inserts Wait States) */
1295#define ZERO_WAIT_STATE_16_BIT 0x0800 /* 0 = Disabled (Inserts Wait States) */
1296#define LOOPING_MODE_MASK 0x0007
1297#define LOOPBACK_MODE_0 0x0000
1298#define LOOPBACK_MODE_1 0x0001
1299#define LOOPBACK_MODE_2 0x0002
1300#define LOOPBACK_MODE_3 0x0003
1301#define LOOPBACK_MODE_4 0x0004
1302#define LOOPBACK_MODE_5 0x0005
1303#define LOOPBACK_MODE_6 0x0006
1304#define LOOPBACK_MODE_7 0x0007
1305#define AUTO_MEDIA_DETECT 0x0008
1306#define MANUAL_CRC 0x0010
1307#define EARLY_TOKEN_REL 0x0020 /* Early Token Release for Token Ring */
1308#define UMAC 0x0040
1309#define UTP2_PORT 0x0080 /* For 8216T2, 0=port A, 1=Port B. */
1310#define BNC_10BT_INTERFACE 0x0600 /* BNC and UTP current media set */
1311#define UTP_INTERFACE 0x0500 /* Ethernet UTP Only. */
1312#define BNC_INTERFACE 0x0400
1313#define AUI_INTERFACE 0x0300
1314#define AUI_10BT_INTERFACE 0x0200
1315#define STARLAN_10_INTERFACE 0x0100
1316#define INTERFACE_TYPE_MASK 0x0700
1317
1318/*
1319 * Media Type Bit Definitions
1320 *
1321 * legend: TP = Twisted Pair
1322 * STP = Shielded twisted pair
1323 * UTP = Unshielded twisted pair
1324 */
1325
1326#define CNFG_MEDIA_TYPE_MASK 0x001e /* POS Register 3 Mask */
1327
1328#define MEDIA_S10 0x0000 /* Ethernet adapter, TP. */
1329#define MEDIA_AUI_UTP 0x0001 /* Ethernet adapter, AUI/UTP media */
1330#define MEDIA_BNC 0x0002 /* Ethernet adapter, BNC media. */
1331#define MEDIA_AUI 0x0003 /* Ethernet Adapter, AUI media. */
1332#define MEDIA_STP_16 0x0004 /* TokenRing adap, 16Mbit STP. */
1333#define MEDIA_STP_4 0x0005 /* TokenRing adap, 4Mbit STP. */
1334#define MEDIA_UTP_16 0x0006 /* TokenRing adap, 16Mbit UTP. */
1335#define MEDIA_UTP_4 0x0007 /* TokenRing adap, 4Mbit UTP. */
1336#define MEDIA_UTP 0x0008 /* Ethernet adapter, UTP media (no AUI)
1337*/
1338#define MEDIA_BNC_UTP 0x0010 /* Ethernet adapter, BNC/UTP media */
1339#define MEDIA_UTPFD 0x0011 /* Ethernet adapter, TP full duplex */
1340#define MEDIA_UTPNL 0x0012 /* Ethernet adapter, TP with link integrity test disabled */
1341#define MEDIA_AUI_BNC 0x0013 /* Ethernet adapter, AUI/BNC media */
1342#define MEDIA_AUI_BNC_UTP 0x0014 /* Ethernet adapter, AUI_BNC/UTP */
1343#define MEDIA_UTPA 0x0015 /* Ethernet UTP-10Mbps Ports A */
1344#define MEDIA_UTPB 0x0016 /* Ethernet UTP-10Mbps Ports B */
1345#define MEDIA_STP_16_UTP_16 0x0017 /* Token Ring STP-16Mbps/UTP-16Mbps */
1346#define MEDIA_STP_4_UTP_4 0x0018 /* Token Ring STP-4Mbps/UTP-4Mbps */
1347
1348#define MEDIA_STP100_UTP100 0x0020 /* Ethernet STP-100Mbps/UTP-100Mbps */
1349#define MEDIA_UTP100FD 0x0021 /* Ethernet UTP-100Mbps, full duplex */
1350#define MEDIA_UTP100 0x0022 /* Ethernet UTP-100Mbps */
1351
1352
1353#define MEDIA_UNKNOWN 0xFFFF /* Unknown adapter/media type */
1354
1355/*
1356 * Definitions for the field:
1357 * media_type2
1358 */
1359#define MEDIA_TYPE_MII 0x0001
1360#define MEDIA_TYPE_UTP 0x0002
1361#define MEDIA_TYPE_BNC 0x0004
1362#define MEDIA_TYPE_AUI 0x0008
1363#define MEDIA_TYPE_S10 0x0010
1364#define MEDIA_TYPE_AUTO_SENSE 0x1000
1365#define MEDIA_TYPE_AUTO_DETECT 0x4000
1366#define MEDIA_TYPE_AUTO_NEGOTIATE 0x8000
1367
1368/*
1369 * Definitions for the field:
1370 * line_speed
1371 */
1372#define LINE_SPEED_UNKNOWN 0x0000
1373#define LINE_SPEED_4 0x0001
1374#define LINE_SPEED_10 0x0002
1375#define LINE_SPEED_16 0x0004
1376#define LINE_SPEED_100 0x0008
1377#define LINE_SPEED_T4 0x0008 /* 100BaseT4 aliased for 9332BVT */
1378#define LINE_SPEED_FULL_DUPLEX 0x8000
1379
1380/*
1381 * Definitions for the field:
1382 * bic_type (Bus interface chip type)
1383 */
1384#define BIC_NO_CHIP 0x0000 /* Bus interface chip not implemented */
1385#define BIC_583_CHIP 0x0001 /* 83C583 bus interface chip */
1386#define BIC_584_CHIP 0x0002 /* 83C584 bus interface chip */
1387#define BIC_585_CHIP 0x0003 /* 83C585 bus interface chip */
1388#define BIC_593_CHIP 0x0004 /* 83C593 bus interface chip */
1389#define BIC_594_CHIP 0x0005 /* 83C594 bus interface chip */
1390#define BIC_564_CHIP 0x0006 /* PCMCIA Bus interface chip */
1391#define BIC_790_CHIP 0x0007 /* 83C790 bus i-face/Ethernet NIC chip */
1392#define BIC_571_CHIP 0x0008 /* 83C571 EISA bus master i-face */
1393#define BIC_587_CHIP 0x0009 /* Token Ring AT bus master i-face */
1394#define BIC_574_CHIP 0x0010 /* FEAST bus interface chip */
1395#define BIC_8432_CHIP 0x0011 /* 8432 bus i-face/Ethernet NIC(DEC PCI) */
1396#define BIC_9332_CHIP 0x0012 /* 9332 bus i-face/100Mbps Ether NIC(DEC PCI) */
1397#define BIC_8432E_CHIP 0x0013 /* 8432 Enhanced bus iface/Ethernet NIC(DEC) */
1398#define BIC_EPIC100_CHIP 0x0014 /* EPIC/100 10/100 Mbps Ethernet BIC/NIC */
1399#define BIC_C94_CHIP 0x0015 /* 91C94 bus i-face in PCMCIA mode */
1400#define BIC_X8020_CHIP 0x0016 /* Xilinx PCMCIA multi-func i-face */
1401
1402/*
1403 * Definitions for the field:
1404 * nic_type (Bus interface chip type)
1405 */
1406#define NIC_UNK_CHIP 0x0000 /* Unknown NIC chip */
1407#define NIC_8390_CHIP 0x0001 /* DP8390 Ethernet NIC */
1408#define NIC_690_CHIP 0x0002 /* 83C690 Ethernet NIC */
1409#define NIC_825_CHIP 0x0003 /* 83C825 Token Ring NIC */
1410/* #define NIC_???_CHIP 0x0004 */ /* Not used */
1411/* #define NIC_???_CHIP 0x0005 */ /* Not used */
1412/* #define NIC_???_CHIP 0x0006 */ /* Not used */
1413#define NIC_790_CHIP 0x0007 /* 83C790 bus i-face/Ethernet NIC chip */
1414#define NIC_C100_CHIP 0x0010 /* FEAST 100Mbps Ethernet NIC */
1415#define NIC_8432_CHIP 0x0011 /* 8432 bus i-face/Ethernet NIC(DEC PCI) */
1416#define NIC_9332_CHIP 0x0012 /* 9332 bus i-face/100Mbps Ether NIC(DEC PCI) */
1417#define NIC_8432E_CHIP 0x0013 /* 8432 enhanced bus iface/Ethernet NIC(DEC) */
1418#define NIC_EPIC100_CHIP 0x0014 /* EPIC/100 10/100 Mbps Ethernet BIC/NIC */
1419#define NIC_C94_CHIP 0x0015 /* 91C94 PC Card with multi func */
1420
1421/*
1422 * Definitions for the field:
1423 * adapter_type The adapter_type field describes the adapter/bus
1424 * configuration.
1425 */
1426#define BUS_ISA16_TYPE 0x0001 /* 16 bit adap in 16 bit (E)ISA slot */
1427#define BUS_ISA8_TYPE 0x0002 /* 8/16b adap in 8 bit XT/(E)ISA slot */
1428#define BUS_MCA_TYPE 0x0003 /* Micro Channel adapter */
1429
1430/*
1431 * Receive Mask definitions
1432 */
1433#define ACCEPT_MULTICAST 0x0001
1434#define ACCEPT_BROADCAST 0x0002
1435#define PROMISCUOUS_MODE 0x0004
1436#define ACCEPT_SOURCE_ROUTING 0x0008
1437#define ACCEPT_ERR_PACKETS 0x0010
1438#define ACCEPT_ATT_MAC_FRAMES 0x0020
1439#define ACCEPT_MULTI_PROM 0x0040
1440#define TRANSMIT_ONLY 0x0080
1441#define ACCEPT_EXT_MAC_FRAMES 0x0100
1442#define EARLY_RX_ENABLE 0x0200
1443#define PKT_SIZE_NOT_NEEDED 0x0400
1444#define ACCEPT_SOURCE_ROUTING_SPANNING 0x0808
1445
1446#define ACCEPT_ALL_MAC_FRAMES 0x0120
1447
1448/*
1449 * config_mode defs
1450 */
1451#define STORE_EEROM 0x0001 /* Store config in EEROM. */
1452#define STORE_REGS 0x0002 /* Store config in register set. */
1453
1454/*
1455 * equates for lmac_flags in adapter structure (Ethernet)
1456 */
1457#define MEM_DISABLE 0x0001
1458#define RX_STATUS_POLL 0x0002
1459#define USE_RE_BIT 0x0004
1460/*#define RESERVED 0x0008 */
1461/*#define RESERVED 0x0010 */
1462/*#define RESERVED 0x0020 */
1463/*#define RESERVED 0x0040 */
1464/*#define RESERVED 0x0080 */
1465/*#define RESERVED 0x0100 */
1466/*#define RESERVED 0x0200 */
1467/*#define RESERVED 0x0400 */
1468/*#define RESERVED 0x0800 */
1469/*#define RESERVED 0x1000 */
1470/*#define RESERVED 0x2000 */
1471/*#define RESERVED 0x4000 */
1472/*#define RESERVED 0x8000 */
1473
1474/* media_opts & media_set Fields bit defs for Ethernet ... */
1475#define MED_OPT_BNC 0x01
1476#define MED_OPT_UTP 0x02
1477#define MED_OPT_AUI 0x04
1478#define MED_OPT_10MB 0x08
1479#define MED_OPT_100MB 0x10
1480#define MED_OPT_S10 0x20
1481
1482/* media_opts & media_set Fields bit defs for Token Ring ... */
1483#define MED_OPT_4MB 0x08
1484#define MED_OPT_16MB 0x10
1485#define MED_OPT_STP 0x40
1486
1487#define MAX_8023_SIZE 1500 /* Max 802.3 size of frame. */
1488#define DEFAULT_ERX_VALUE 4 /* Number of 16-byte blocks for 790B early Rx. */
1489#define DEFAULT_ETX_VALUE 32 /* Number of bytes for 790B early Tx. */
1490#define DEFAULT_TX_RETRIES 3 /* Number of transmit retries */
1491#define LPBK_FRAME_SIZE 1024 /* Default loopback frame for Rx calibration test. */
1492#define MAX_LOOKAHEAD_SIZE 252 /* Max lookahead size for ethernet. */
1493
1494#define RW_MAC_STATE 0x1101
1495#define RW_SA_OF_LAST_AMP_OR_SMP 0x2803
1496#define RW_PHYSICAL_DROP_NUMBER 0x3B02
1497#define RW_UPSTREAM_NEIGHBOR_ADDRESS 0x3E03
1498#define RW_PRODUCT_INSTANCE_ID 0x4B09
1499
1500#define RW_TRC_STATUS_BLOCK 0x5412
1501
1502#define RW_MAC_ERROR_COUNTERS_NO_CLEAR 0x8006
1503#define RW_MAC_ERROR_COUNTER_CLEAR 0x7A06
1504#define RW_CONFIG_REGISTER_0 0xA001
1505#define RW_CONFIG_REGISTER_1 0xA101
1506#define RW_PRESCALE_TIMER_THRESHOLD 0xA201
1507#define RW_TPT_THRESHOLD 0xA301
1508#define RW_TQP_THRESHOLD 0xA401
1509#define RW_TNT_THRESHOLD 0xA501
1510#define RW_TBT_THRESHOLD 0xA601
1511#define RW_TSM_THRESHOLD 0xA701
1512#define RW_TAM_THRESHOLD 0xA801
1513#define RW_TBR_THRESHOLD 0xA901
1514#define RW_TER_THRESHOLD 0xAA01
1515#define RW_TGT_THRESHOLD 0xAB01
1516#define RW_THT_THRESHOLD 0xAC01
1517#define RW_TRR_THRESHOLD 0xAD01
1518#define RW_TVX_THRESHOLD 0xAE01
1519#define RW_INDIVIDUAL_MAC_ADDRESS 0xB003
1520
1521#define RW_INDIVIDUAL_GROUP_ADDRESS 0xB303 /* all of group addr */
1522#define RW_INDIVIDUAL_GROUP_ADDR_WORD_0 0xB301 /* 1st word of group addr */
1523#define RW_INDIVIDUAL_GROUP_ADDR 0xB402 /* 2nd-3rd word of group addr */
1524#define RW_FUNCTIONAL_ADDRESS 0xB603 /* all of functional addr */
1525#define RW_FUNCTIONAL_ADDR_WORD_0 0xB601 /* 1st word of func addr */
1526#define RW_FUNCTIONAL_ADDR 0xB702 /* 2nd-3rd word func addr */
1527
1528#define RW_BIT_SIGNIFICANT_GROUP_ADDR 0xB902
1529#define RW_SOURCE_RING_BRIDGE_NUMBER 0xBB01
1530#define RW_TARGET_RING_NUMBER 0xBC01
1531
1532#define RW_HIC_INTERRUPT_MASK 0xC601
1533
1534#define SOURCE_ROUTING_SPANNING_BITS 0x00C0 /* Spanning Tree Frames */
1535#define SOURCE_ROUTING_EXPLORER_BIT 0x0040 /* Explorer and Single Route */
1536
1537 /* write */
1538
1539#define CSR_MSK_ALL 0x80 // Bic 587 Only
1540#define CSR_MSKTINT 0x20
1541#define CSR_MSKCBUSY 0x10
1542#define CSR_CLRTINT 0x08
1543#define CSR_CLRCBUSY 0x04
1544#define CSR_WCSS 0x02
1545#define CSR_CA 0x01
1546
1547 /* read */
1548
1549#define CSR_TINT 0x20
1550#define CSR_CINT 0x10
1551#define CSR_TSTAT 0x08
1552#define CSR_CSTAT 0x04
1553#define CSR_FAULT 0x02
1554#define CSR_CBUSY 0x01
1555
1556#define LAAR_MEM16ENB 0x80
1557#define Zws16 0x20
1558
1559#define IRR_IEN 0x80
1560#define Zws8 0x01
1561
1562#define IMCCR_EIL 0x04
1563
1564typedef struct {
1565 __u8 ac; /* Access Control */
1566 __u8 fc; /* Frame Control */
1567 __u8 da[6]; /* Dest Addr */
1568 __u8 sa[6]; /* Source Addr */
1569
1570 __u16 vl; /* Vector Length */
1571 __u8 dc_sc; /* Dest/Source Class */
1572 __u8 vc; /* Vector Code */
1573 } MAC_HEADER;
1574
1575#define MAX_SUB_VECTOR_INFO (RX_DATA_BUFFER_SIZE - sizeof(MAC_HEADER) - 2)
1576
1577typedef struct
1578 {
1579 __u8 svl; /* Sub-vector Length */
1580 __u8 svi; /* Sub-vector Code */
1581 __u8 svv[MAX_SUB_VECTOR_INFO]; /* Sub-vector Info */
1582 } MAC_SUB_VECTOR;
1583
1584#endif /* __KERNEL__ */
1585#endif /* __LINUX_SMCTR_H */
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
deleted file mode 100644
index be4813e0366c..000000000000
--- a/drivers/net/tokenring/tms380tr.c
+++ /dev/null
@@ -1,2306 +0,0 @@
1/*
2 * tms380tr.c: A network driver library for Texas Instruments TMS380-based
3 * Token Ring Adapters.
4 *
5 * Originally sktr.c: Written 1997 by Christoph Goos
6 *
7 * A fine result of the Linux Systems Network Architecture Project.
8 * http://www.vanheusden.com/sna/
9 *
10 * This software may be used and distributed according to the terms
11 * of the GNU General Public License, incorporated herein by reference.
12 *
13 * The following modules are currently available for card support:
14 * - tmspci (Generic PCI card support)
15 * - abyss (Madge PCI support)
16 * - tmsisa (SysKonnect TR4/16 ISA)
17 *
18 * Sources:
19 * - The hardware related parts of this driver are take from
20 * the SysKonnect Token Ring driver for Windows NT.
21 * - I used the IBM Token Ring driver 'ibmtr.c' as a base for this
22 * driver, as well as the 'skeleton.c' driver by Donald Becker.
23 * - Also various other drivers in the linux source tree were taken
24 * as samples for some tasks.
25 * - TI TMS380 Second-Generation Token Ring User's Guide
26 * - TI datasheets for respective chips
27 * - David Hein at Texas Instruments
28 * - Various Madge employees
29 *
30 * Maintainer(s):
31 * JS Jay Schulist jschlst@samba.org
32 * CG Christoph Goos cgoos@syskonnect.de
33 * AF Adam Fritzler
34 * MLP Mike Phillips phillim@amtrak.com
35 * JF Jochen Friedrich jochen@scram.de
36 *
37 * Modification History:
38 * 29-Aug-97 CG Created
39 * 04-Apr-98 CG Fixed problems caused by tok_timer_check
40 * 10-Apr-98 CG Fixed lockups at cable disconnection
41 * 27-May-98 JS Formated to Linux Kernel Format
42 * 31-May-98 JS Hacked in PCI support
43 * 16-Jun-98 JS Modulized for multiple cards with one driver
44 * Sep-99 AF Renamed to tms380tr (supports more than SK's)
45 * 23-Sep-99 AF Added Compaq and Thomas-Conrad PCI support
46 * Fixed a bug causing double copies on PCI
47 * Fixed for new multicast stuff (2.2/2.3)
48 * 25-Sep-99 AF Uped TPL_NUM from 3 to 9
49 * Removed extraneous 'No free TPL'
50 * 22-Dec-99 AF Added Madge PCI Mk2 support and generalized
51 * parts of the initilization procedure.
52 * 30-Dec-99 AF Turned tms380tr into a library ala 8390.
53 * Madge support is provided in the abyss module
54 * Generic PCI support is in the tmspci module.
55 * 30-Nov-00 JF Updated PCI code to support IO MMU via
56 * pci_map_static(). Alpha uses this MMU for ISA
57 * as well.
58 * 14-Jan-01 JF Fix DMA on ifdown/ifup sequences. Some
59 * cleanup.
60 * 13-Jan-02 JF Add spinlock to fix race condition.
61 * 09-Nov-02 JF Fixed printks to not SPAM the console during
62 * normal operation.
63 * 30-Dec-02 JF Removed incorrect __init from
64 * tms380tr_init_card.
65 * 22-Jul-05 JF Converted to dma-mapping.
66 *
67 * To do:
68 * 1. Multi/Broadcast packet handling (this may have fixed itself)
69 * 2. Write a sktrisa module that includes the old ISA support (done)
70 * 3. Allow modules to load their own microcode
71 * 4. Speed up the BUD process -- freezing the kernel for 3+sec is
72 * quite unacceptable.
73 * 5. Still a few remaining stalls when the cable is unplugged.
74 */
75
76#ifdef MODULE
77static const char version[] = "tms380tr.c: v1.10 30/12/2002 by Christoph Goos, Adam Fritzler\n";
78#endif
79
80#include <linux/module.h>
81#include <linux/kernel.h>
82#include <linux/types.h>
83#include <linux/fcntl.h>
84#include <linux/interrupt.h>
85#include <linux/ptrace.h>
86#include <linux/ioport.h>
87#include <linux/in.h>
88#include <linux/string.h>
89#include <linux/time.h>
90#include <linux/errno.h>
91#include <linux/init.h>
92#include <linux/dma-mapping.h>
93#include <linux/delay.h>
94#include <linux/netdevice.h>
95#include <linux/etherdevice.h>
96#include <linux/skbuff.h>
97#include <linux/trdevice.h>
98#include <linux/firmware.h>
99#include <linux/bitops.h>
100
101#include <asm/io.h>
102#include <asm/dma.h>
103#include <asm/irq.h>
104#include <asm/uaccess.h>
105
106#include "tms380tr.h" /* Our Stuff */
107
108/* Use 0 for production, 1 for verification, 2 for debug, and
109 * 3 for very verbose debug.
110 */
111#ifndef TMS380TR_DEBUG
112#define TMS380TR_DEBUG 0
113#endif
114static unsigned int tms380tr_debug = TMS380TR_DEBUG;
115
116/* Index to functions, as function prototypes.
117 * Alphabetical by function name.
118 */
119
120/* "A" */
121/* "B" */
122static int tms380tr_bringup_diags(struct net_device *dev);
123/* "C" */
124static void tms380tr_cancel_tx_queue(struct net_local* tp);
125static int tms380tr_chipset_init(struct net_device *dev);
126static void tms380tr_chk_irq(struct net_device *dev);
127static void tms380tr_chk_outstanding_cmds(struct net_device *dev);
128static void tms380tr_chk_src_addr(unsigned char *frame, unsigned char *hw_addr);
129static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqType);
130int tms380tr_close(struct net_device *dev);
131static void tms380tr_cmd_status_irq(struct net_device *dev);
132/* "D" */
133static void tms380tr_disable_interrupts(struct net_device *dev);
134#if TMS380TR_DEBUG > 0
135static void tms380tr_dump(unsigned char *Data, int length);
136#endif
137/* "E" */
138static void tms380tr_enable_interrupts(struct net_device *dev);
139static void tms380tr_exec_cmd(struct net_device *dev, unsigned short Command);
140static void tms380tr_exec_sifcmd(struct net_device *dev, unsigned int WriteValue);
141/* "F" */
142/* "G" */
143static struct net_device_stats *tms380tr_get_stats(struct net_device *dev);
144/* "H" */
145static netdev_tx_t tms380tr_hardware_send_packet(struct sk_buff *skb,
146 struct net_device *dev);
147/* "I" */
148static int tms380tr_init_adapter(struct net_device *dev);
149static void tms380tr_init_ipb(struct net_local *tp);
150static void tms380tr_init_net_local(struct net_device *dev);
151static void tms380tr_init_opb(struct net_device *dev);
152/* "M" */
153/* "O" */
154int tms380tr_open(struct net_device *dev);
155static void tms380tr_open_adapter(struct net_device *dev);
156/* "P" */
157/* "R" */
158static void tms380tr_rcv_status_irq(struct net_device *dev);
159static int tms380tr_read_ptr(struct net_device *dev);
160static void tms380tr_read_ram(struct net_device *dev, unsigned char *Data,
161 unsigned short Address, int Length);
162static int tms380tr_reset_adapter(struct net_device *dev);
163static void tms380tr_reset_interrupt(struct net_device *dev);
164static void tms380tr_ring_status_irq(struct net_device *dev);
165/* "S" */
166static netdev_tx_t tms380tr_send_packet(struct sk_buff *skb,
167 struct net_device *dev);
168static void tms380tr_set_multicast_list(struct net_device *dev);
169static int tms380tr_set_mac_address(struct net_device *dev, void *addr);
170/* "T" */
171static void tms380tr_timer_chk(unsigned long data);
172static void tms380tr_timer_end_wait(unsigned long data);
173static void tms380tr_tx_status_irq(struct net_device *dev);
174/* "U" */
175static void tms380tr_update_rcv_stats(struct net_local *tp,
176 unsigned char DataPtr[], unsigned int Length);
177/* "W" */
178void tms380tr_wait(unsigned long time);
179static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status);
180static void tms380tr_write_tpl_status(TPL *tpl, unsigned int Status);
181
182#define SIFREADB(reg) \
183 (((struct net_local *)netdev_priv(dev))->sifreadb(dev, reg))
184#define SIFWRITEB(val, reg) \
185 (((struct net_local *)netdev_priv(dev))->sifwriteb(dev, val, reg))
186#define SIFREADW(reg) \
187 (((struct net_local *)netdev_priv(dev))->sifreadw(dev, reg))
188#define SIFWRITEW(val, reg) \
189 (((struct net_local *)netdev_priv(dev))->sifwritew(dev, val, reg))
190
191
192
193#if 0 /* TMS380TR_DEBUG > 0 */
194static int madgemc_sifprobe(struct net_device *dev)
195{
196 unsigned char old, chk1, chk2;
197
198 old = SIFREADB(SIFADR); /* Get the old SIFADR value */
199
200 chk1 = 0; /* Begin with check value 0 */
201 do {
202 madgemc_setregpage(dev, 0);
203 /* Write new SIFADR value */
204 SIFWRITEB(chk1, SIFADR);
205 chk2 = SIFREADB(SIFADR);
206 if (chk2 != chk1)
207 return -1;
208
209 madgemc_setregpage(dev, 1);
210 /* Read, invert and write */
211 chk2 = SIFREADB(SIFADD);
212 if (chk2 != chk1)
213 return -1;
214
215 madgemc_setregpage(dev, 0);
216 chk2 ^= 0x0FE;
217 SIFWRITEB(chk2, SIFADR);
218
219 /* Read, invert and compare */
220 madgemc_setregpage(dev, 1);
221 chk2 = SIFREADB(SIFADD);
222 madgemc_setregpage(dev, 0);
223 chk2 ^= 0x0FE;
224
225 if(chk1 != chk2)
226 return -1; /* No adapter */
227 chk1 -= 2;
228 } while(chk1 != 0); /* Repeat 128 times (all byte values) */
229
230 madgemc_setregpage(dev, 0); /* sanity */
231 /* Restore the SIFADR value */
232 SIFWRITEB(old, SIFADR);
233
234 return 0;
235}
236#endif
237
238/*
239 * Open/initialize the board. This is called sometime after
240 * booting when the 'ifconfig' program is run.
241 *
242 * This routine should set everything up anew at each open, even
243 * registers that "should" only need to be set once at boot, so that
244 * there is non-reboot way to recover if something goes wrong.
245 */
246int tms380tr_open(struct net_device *dev)
247{
248 struct net_local *tp = netdev_priv(dev);
249 int err;
250
251 /* init the spinlock */
252 spin_lock_init(&tp->lock);
253 init_timer(&tp->timer);
254
255 /* Reset the hardware here. Don't forget to set the station address. */
256
257#ifdef CONFIG_ISA
258 if(dev->dma > 0)
259 {
260 unsigned long flags=claim_dma_lock();
261 disable_dma(dev->dma);
262 set_dma_mode(dev->dma, DMA_MODE_CASCADE);
263 enable_dma(dev->dma);
264 release_dma_lock(flags);
265 }
266#endif
267
268 err = tms380tr_chipset_init(dev);
269 if(err)
270 {
271 printk(KERN_INFO "%s: Chipset initialization error\n",
272 dev->name);
273 return -1;
274 }
275
276 tp->timer.expires = jiffies + 30*HZ;
277 tp->timer.function = tms380tr_timer_end_wait;
278 tp->timer.data = (unsigned long)dev;
279 add_timer(&tp->timer);
280
281 printk(KERN_DEBUG "%s: Adapter RAM size: %dK\n",
282 dev->name, tms380tr_read_ptr(dev));
283
284 tms380tr_enable_interrupts(dev);
285 tms380tr_open_adapter(dev);
286
287 netif_start_queue(dev);
288
289 /* Wait for interrupt from hardware. If interrupt does not come,
290 * there will be a timeout from the timer.
291 */
292 tp->Sleeping = 1;
293 interruptible_sleep_on(&tp->wait_for_tok_int);
294 del_timer(&tp->timer);
295
296 /* If AdapterVirtOpenFlag is 1, the adapter is now open for use */
297 if(tp->AdapterVirtOpenFlag == 0)
298 {
299 tms380tr_disable_interrupts(dev);
300 return -1;
301 }
302
303 tp->StartTime = jiffies;
304
305 /* Start function control timer */
306 tp->timer.expires = jiffies + 2*HZ;
307 tp->timer.function = tms380tr_timer_chk;
308 tp->timer.data = (unsigned long)dev;
309 add_timer(&tp->timer);
310
311 return 0;
312}
313
314/*
315 * Timeout function while waiting for event
316 */
317static void tms380tr_timer_end_wait(unsigned long data)
318{
319 struct net_device *dev = (struct net_device*)data;
320 struct net_local *tp = netdev_priv(dev);
321
322 if(tp->Sleeping)
323 {
324 tp->Sleeping = 0;
325 wake_up_interruptible(&tp->wait_for_tok_int);
326 }
327}
328
329/*
330 * Initialize the chipset
331 */
332static int tms380tr_chipset_init(struct net_device *dev)
333{
334 struct net_local *tp = netdev_priv(dev);
335 int err;
336
337 tms380tr_init_ipb(tp);
338 tms380tr_init_opb(dev);
339 tms380tr_init_net_local(dev);
340
341 if(tms380tr_debug > 3)
342 printk(KERN_DEBUG "%s: Resetting adapter...\n", dev->name);
343 err = tms380tr_reset_adapter(dev);
344 if(err < 0)
345 return -1;
346
347 if(tms380tr_debug > 3)
348 printk(KERN_DEBUG "%s: Bringup diags...\n", dev->name);
349 err = tms380tr_bringup_diags(dev);
350 if(err < 0)
351 return -1;
352
353 if(tms380tr_debug > 3)
354 printk(KERN_DEBUG "%s: Init adapter...\n", dev->name);
355 err = tms380tr_init_adapter(dev);
356 if(err < 0)
357 return -1;
358
359 if(tms380tr_debug > 3)
360 printk(KERN_DEBUG "%s: Done!\n", dev->name);
361 return 0;
362}
363
364/*
365 * Initializes the net_local structure.
366 */
367static void tms380tr_init_net_local(struct net_device *dev)
368{
369 struct net_local *tp = netdev_priv(dev);
370 int i;
371 dma_addr_t dmabuf;
372
373 tp->scb.CMD = 0;
374 tp->scb.Parm[0] = 0;
375 tp->scb.Parm[1] = 0;
376
377 tp->ssb.STS = 0;
378 tp->ssb.Parm[0] = 0;
379 tp->ssb.Parm[1] = 0;
380 tp->ssb.Parm[2] = 0;
381
382 tp->CMDqueue = 0;
383
384 tp->AdapterOpenFlag = 0;
385 tp->AdapterVirtOpenFlag = 0;
386 tp->ScbInUse = 0;
387 tp->OpenCommandIssued = 0;
388 tp->ReOpenInProgress = 0;
389 tp->HaltInProgress = 0;
390 tp->TransmitHaltScheduled = 0;
391 tp->LobeWireFaultLogged = 0;
392 tp->LastOpenStatus = 0;
393 tp->MaxPacketSize = DEFAULT_PACKET_SIZE;
394
395 /* Create circular chain of transmit lists */
396 for (i = 0; i < TPL_NUM; i++)
397 {
398 tp->Tpl[i].NextTPLAddr = htonl(((char *)(&tp->Tpl[(i+1) % TPL_NUM]) - (char *)tp) + tp->dmabuffer); /* DMA buffer may be MMU driven */
399 tp->Tpl[i].Status = 0;
400 tp->Tpl[i].FrameSize = 0;
401 tp->Tpl[i].FragList[0].DataCount = 0;
402 tp->Tpl[i].FragList[0].DataAddr = 0;
403 tp->Tpl[i].NextTPLPtr = &tp->Tpl[(i+1) % TPL_NUM];
404 tp->Tpl[i].MData = NULL;
405 tp->Tpl[i].TPLIndex = i;
406 tp->Tpl[i].DMABuff = 0;
407 tp->Tpl[i].BusyFlag = 0;
408 }
409
410 tp->TplFree = tp->TplBusy = &tp->Tpl[0];
411
412 /* Create circular chain of receive lists */
413 for (i = 0; i < RPL_NUM; i++)
414 {
415 tp->Rpl[i].NextRPLAddr = htonl(((char *)(&tp->Rpl[(i+1) % RPL_NUM]) - (char *)tp) + tp->dmabuffer); /* DMA buffer may be MMU driven */
416 tp->Rpl[i].Status = (RX_VALID | RX_START_FRAME | RX_END_FRAME | RX_FRAME_IRQ);
417 tp->Rpl[i].FrameSize = 0;
418 tp->Rpl[i].FragList[0].DataCount = cpu_to_be16((unsigned short)tp->MaxPacketSize);
419
420 /* Alloc skb and point adapter to data area */
421 tp->Rpl[i].Skb = dev_alloc_skb(tp->MaxPacketSize);
422 tp->Rpl[i].DMABuff = 0;
423
424 /* skb == NULL ? then use local buffer */
425 if(tp->Rpl[i].Skb == NULL)
426 {
427 tp->Rpl[i].SkbStat = SKB_UNAVAILABLE;
428 tp->Rpl[i].FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[i] - (char *)tp) + tp->dmabuffer);
429 tp->Rpl[i].MData = tp->LocalRxBuffers[i];
430 }
431 else /* SKB != NULL */
432 {
433 tp->Rpl[i].Skb->dev = dev;
434 skb_put(tp->Rpl[i].Skb, tp->MaxPacketSize);
435
436 /* data unreachable for DMA ? then use local buffer */
437 dmabuf = dma_map_single(tp->pdev, tp->Rpl[i].Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE);
438 if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit))
439 {
440 tp->Rpl[i].SkbStat = SKB_DATA_COPY;
441 tp->Rpl[i].FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[i] - (char *)tp) + tp->dmabuffer);
442 tp->Rpl[i].MData = tp->LocalRxBuffers[i];
443 }
444 else /* DMA directly in skb->data */
445 {
446 tp->Rpl[i].SkbStat = SKB_DMA_DIRECT;
447 tp->Rpl[i].FragList[0].DataAddr = htonl(dmabuf);
448 tp->Rpl[i].MData = tp->Rpl[i].Skb->data;
449 tp->Rpl[i].DMABuff = dmabuf;
450 }
451 }
452
453 tp->Rpl[i].NextRPLPtr = &tp->Rpl[(i+1) % RPL_NUM];
454 tp->Rpl[i].RPLIndex = i;
455 }
456
457 tp->RplHead = &tp->Rpl[0];
458 tp->RplTail = &tp->Rpl[RPL_NUM-1];
459 tp->RplTail->Status = (RX_START_FRAME | RX_END_FRAME | RX_FRAME_IRQ);
460}
461
462/*
463 * Initializes the initialisation parameter block.
464 */
465static void tms380tr_init_ipb(struct net_local *tp)
466{
467 tp->ipb.Init_Options = BURST_MODE;
468 tp->ipb.CMD_Status_IV = 0;
469 tp->ipb.TX_IV = 0;
470 tp->ipb.RX_IV = 0;
471 tp->ipb.Ring_Status_IV = 0;
472 tp->ipb.SCB_Clear_IV = 0;
473 tp->ipb.Adapter_CHK_IV = 0;
474 tp->ipb.RX_Burst_Size = BURST_SIZE;
475 tp->ipb.TX_Burst_Size = BURST_SIZE;
476 tp->ipb.DMA_Abort_Thrhld = DMA_RETRIES;
477 tp->ipb.SCB_Addr = 0;
478 tp->ipb.SSB_Addr = 0;
479}
480
481/*
482 * Initializes the open parameter block.
483 */
484static void tms380tr_init_opb(struct net_device *dev)
485{
486 struct net_local *tp;
487 unsigned long Addr;
488 unsigned short RplSize = RPL_SIZE;
489 unsigned short TplSize = TPL_SIZE;
490 unsigned short BufferSize = BUFFER_SIZE;
491 int i;
492
493 tp = netdev_priv(dev);
494
495 tp->ocpl.OPENOptions = 0;
496 tp->ocpl.OPENOptions |= ENABLE_FULL_DUPLEX_SELECTION;
497 tp->ocpl.FullDuplex = 0;
498 tp->ocpl.FullDuplex |= OPEN_FULL_DUPLEX_OFF;
499
500 /*
501 * Set node address
502 *
503 * We go ahead and put it in the OPB even though on
504 * most of the generic adapters this isn't required.
505 * Its simpler this way. -- ASF
506 */
507 for (i=0;i<6;i++)
508 tp->ocpl.NodeAddr[i] = ((unsigned char *)dev->dev_addr)[i];
509
510 tp->ocpl.GroupAddr = 0;
511 tp->ocpl.FunctAddr = 0;
512 tp->ocpl.RxListSize = cpu_to_be16((unsigned short)RplSize);
513 tp->ocpl.TxListSize = cpu_to_be16((unsigned short)TplSize);
514 tp->ocpl.BufSize = cpu_to_be16((unsigned short)BufferSize);
515 tp->ocpl.Reserved = 0;
516 tp->ocpl.TXBufMin = TX_BUF_MIN;
517 tp->ocpl.TXBufMax = TX_BUF_MAX;
518
519 Addr = htonl(((char *)tp->ProductID - (char *)tp) + tp->dmabuffer);
520
521 tp->ocpl.ProdIDAddr[0] = LOWORD(Addr);
522 tp->ocpl.ProdIDAddr[1] = HIWORD(Addr);
523}
524
525/*
526 * Send OPEN command to adapter
527 */
528static void tms380tr_open_adapter(struct net_device *dev)
529{
530 struct net_local *tp = netdev_priv(dev);
531
532 if(tp->OpenCommandIssued)
533 return;
534
535 tp->OpenCommandIssued = 1;
536 tms380tr_exec_cmd(dev, OC_OPEN);
537}
538
539/*
540 * Clear the adapter's interrupt flag. Clear system interrupt enable
541 * (SINTEN): disable adapter to system interrupts.
542 */
543static void tms380tr_disable_interrupts(struct net_device *dev)
544{
545 SIFWRITEB(0, SIFACL);
546}
547
548/*
549 * Set the adapter's interrupt flag. Set system interrupt enable
550 * (SINTEN): enable adapter to system interrupts.
551 */
552static void tms380tr_enable_interrupts(struct net_device *dev)
553{
554 SIFWRITEB(ACL_SINTEN, SIFACL);
555}
556
557/*
558 * Put command in command queue, try to execute it.
559 */
560static void tms380tr_exec_cmd(struct net_device *dev, unsigned short Command)
561{
562 struct net_local *tp = netdev_priv(dev);
563
564 tp->CMDqueue |= Command;
565 tms380tr_chk_outstanding_cmds(dev);
566}
567
568static void tms380tr_timeout(struct net_device *dev)
569{
570 /*
571 * If we get here, some higher level has decided we are broken.
572 * There should really be a "kick me" function call instead.
573 *
574 * Resetting the token ring adapter takes a long time so just
575 * fake transmission time and go on trying. Our own timeout
576 * routine is in tms380tr_timer_chk()
577 */
578 dev->trans_start = jiffies; /* prevent tx timeout */
579 netif_wake_queue(dev);
580}
581
582/*
583 * Gets skb from system, queues it and checks if it can be sent
584 */
585static netdev_tx_t tms380tr_send_packet(struct sk_buff *skb,
586 struct net_device *dev)
587{
588 struct net_local *tp = netdev_priv(dev);
589 netdev_tx_t rc;
590
591 rc = tms380tr_hardware_send_packet(skb, dev);
592 if(tp->TplFree->NextTPLPtr->BusyFlag)
593 netif_stop_queue(dev);
594 return rc;
595}
596
597/*
598 * Move frames into adapter tx queue
599 */
600static netdev_tx_t tms380tr_hardware_send_packet(struct sk_buff *skb,
601 struct net_device *dev)
602{
603 TPL *tpl;
604 short length;
605 unsigned char *buf;
606 unsigned long flags;
607 int i;
608 dma_addr_t dmabuf, newbuf;
609 struct net_local *tp = netdev_priv(dev);
610
611 /* Try to get a free TPL from the chain.
612 *
613 * NOTE: We *must* always leave one unused TPL in the chain,
614 * because otherwise the adapter might send frames twice.
615 */
616 spin_lock_irqsave(&tp->lock, flags);
617 if(tp->TplFree->NextTPLPtr->BusyFlag) { /* No free TPL */
618 if (tms380tr_debug > 0)
619 printk(KERN_DEBUG "%s: No free TPL\n", dev->name);
620 spin_unlock_irqrestore(&tp->lock, flags);
621 return NETDEV_TX_BUSY;
622 }
623
624 dmabuf = 0;
625
626 /* Is buffer reachable for Busmaster-DMA? */
627
628 length = skb->len;
629 dmabuf = dma_map_single(tp->pdev, skb->data, length, DMA_TO_DEVICE);
630 if(tp->dmalimit && (dmabuf + length > tp->dmalimit)) {
631 /* Copy frame to local buffer */
632 dma_unmap_single(tp->pdev, dmabuf, length, DMA_TO_DEVICE);
633 dmabuf = 0;
634 i = tp->TplFree->TPLIndex;
635 buf = tp->LocalTxBuffers[i];
636 skb_copy_from_linear_data(skb, buf, length);
637 newbuf = ((char *)buf - (char *)tp) + tp->dmabuffer;
638 }
639 else {
640 /* Send direct from skb->data */
641 newbuf = dmabuf;
642 buf = skb->data;
643 }
644 /* Source address in packet? */
645 tms380tr_chk_src_addr(buf, dev->dev_addr);
646 tp->LastSendTime = jiffies;
647 tpl = tp->TplFree; /* Get the "free" TPL */
648 tpl->BusyFlag = 1; /* Mark TPL as busy */
649 tp->TplFree = tpl->NextTPLPtr;
650
651 /* Save the skb for delayed return of skb to system */
652 tpl->Skb = skb;
653 tpl->DMABuff = dmabuf;
654 tpl->FragList[0].DataCount = cpu_to_be16((unsigned short)length);
655 tpl->FragList[0].DataAddr = htonl(newbuf);
656
657 /* Write the data length in the transmit list. */
658 tpl->FrameSize = cpu_to_be16((unsigned short)length);
659 tpl->MData = buf;
660
661 /* Transmit the frame and set the status values. */
662 tms380tr_write_tpl_status(tpl, TX_VALID | TX_START_FRAME
663 | TX_END_FRAME | TX_PASS_SRC_ADDR
664 | TX_FRAME_IRQ);
665
666 /* Let adapter send the frame. */
667 tms380tr_exec_sifcmd(dev, CMD_TX_VALID);
668 spin_unlock_irqrestore(&tp->lock, flags);
669
670 return NETDEV_TX_OK;
671}
672
673/*
674 * Write the given value to the 'Status' field of the specified TPL.
675 * NOTE: This function should be used whenever the status of any TPL must be
676 * modified by the driver, because the compiler may otherwise change the
677 * order of instructions such that writing the TPL status may be executed at
678 * an undesirable time. When this function is used, the status is always
679 * written when the function is called.
680 */
681static void tms380tr_write_tpl_status(TPL *tpl, unsigned int Status)
682{
683 tpl->Status = Status;
684}
685
686static void tms380tr_chk_src_addr(unsigned char *frame, unsigned char *hw_addr)
687{
688 unsigned char SRBit;
689
690 if((((unsigned long)frame[8]) & ~0x80) != 0) /* Compare 4 bytes */
691 return;
692 if((unsigned short)frame[12] != 0) /* Compare 2 bytes */
693 return;
694
695 SRBit = frame[8] & 0x80;
696 memcpy(&frame[8], hw_addr, 6);
697 frame[8] |= SRBit;
698}
699
700/*
701 * The timer routine: Check if adapter still open and working, reopen if not.
702 */
703static void tms380tr_timer_chk(unsigned long data)
704{
705 struct net_device *dev = (struct net_device*)data;
706 struct net_local *tp = netdev_priv(dev);
707
708 if(tp->HaltInProgress)
709 return;
710
711 tms380tr_chk_outstanding_cmds(dev);
712 if(time_before(tp->LastSendTime + SEND_TIMEOUT, jiffies) &&
713 (tp->TplFree != tp->TplBusy))
714 {
715 /* Anything to send, but stalled too long */
716 tp->LastSendTime = jiffies;
717 tms380tr_exec_cmd(dev, OC_CLOSE); /* Does reopen automatically */
718 }
719
720 tp->timer.expires = jiffies + 2*HZ;
721 add_timer(&tp->timer);
722
723 if(tp->AdapterOpenFlag || tp->ReOpenInProgress)
724 return;
725 tp->ReOpenInProgress = 1;
726 tms380tr_open_adapter(dev);
727}
728
729/*
730 * The typical workload of the driver: Handle the network interface interrupts.
731 */
732irqreturn_t tms380tr_interrupt(int irq, void *dev_id)
733{
734 struct net_device *dev = dev_id;
735 struct net_local *tp;
736 unsigned short irq_type;
737 int handled = 0;
738
739 tp = netdev_priv(dev);
740
741 irq_type = SIFREADW(SIFSTS);
742
743 while(irq_type & STS_SYSTEM_IRQ) {
744 handled = 1;
745 irq_type &= STS_IRQ_MASK;
746
747 if(!tms380tr_chk_ssb(tp, irq_type)) {
748 printk(KERN_DEBUG "%s: DATA LATE occurred\n", dev->name);
749 break;
750 }
751
752 switch(irq_type) {
753 case STS_IRQ_RECEIVE_STATUS:
754 tms380tr_reset_interrupt(dev);
755 tms380tr_rcv_status_irq(dev);
756 break;
757
758 case STS_IRQ_TRANSMIT_STATUS:
759 /* Check if TRANSMIT.HALT command is complete */
760 if(tp->ssb.Parm[0] & COMMAND_COMPLETE) {
761 tp->TransmitCommandActive = 0;
762 tp->TransmitHaltScheduled = 0;
763
764 /* Issue a new transmit command. */
765 tms380tr_exec_cmd(dev, OC_TRANSMIT);
766 }
767
768 tms380tr_reset_interrupt(dev);
769 tms380tr_tx_status_irq(dev);
770 break;
771
772 case STS_IRQ_COMMAND_STATUS:
773 /* The SSB contains status of last command
774 * other than receive/transmit.
775 */
776 tms380tr_cmd_status_irq(dev);
777 break;
778
779 case STS_IRQ_SCB_CLEAR:
780 /* The SCB is free for another command. */
781 tp->ScbInUse = 0;
782 tms380tr_chk_outstanding_cmds(dev);
783 break;
784
785 case STS_IRQ_RING_STATUS:
786 tms380tr_ring_status_irq(dev);
787 break;
788
789 case STS_IRQ_ADAPTER_CHECK:
790 tms380tr_chk_irq(dev);
791 break;
792
793 case STS_IRQ_LLC_STATUS:
794 printk(KERN_DEBUG "tms380tr: unexpected LLC status IRQ\n");
795 break;
796
797 case STS_IRQ_TIMER:
798 printk(KERN_DEBUG "tms380tr: unexpected Timer IRQ\n");
799 break;
800
801 case STS_IRQ_RECEIVE_PENDING:
802 printk(KERN_DEBUG "tms380tr: unexpected Receive Pending IRQ\n");
803 break;
804
805 default:
806 printk(KERN_DEBUG "Unknown Token Ring IRQ (0x%04x)\n", irq_type);
807 break;
808 }
809
810 /* Reset system interrupt if not already done. */
811 if(irq_type != STS_IRQ_TRANSMIT_STATUS &&
812 irq_type != STS_IRQ_RECEIVE_STATUS) {
813 tms380tr_reset_interrupt(dev);
814 }
815
816 irq_type = SIFREADW(SIFSTS);
817 }
818
819 return IRQ_RETVAL(handled);
820}
821
822/*
823 * Reset the INTERRUPT SYSTEM bit and issue SSB CLEAR command.
824 */
825static void tms380tr_reset_interrupt(struct net_device *dev)
826{
827 struct net_local *tp = netdev_priv(dev);
828 SSB *ssb = &tp->ssb;
829
830 /*
831 * [Workaround for "Data Late"]
832 * Set all fields of the SSB to well-defined values so we can
833 * check if the adapter has written the SSB.
834 */
835
836 ssb->STS = (unsigned short) -1;
837 ssb->Parm[0] = (unsigned short) -1;
838 ssb->Parm[1] = (unsigned short) -1;
839 ssb->Parm[2] = (unsigned short) -1;
840
841 /* Free SSB by issuing SSB_CLEAR command after reading IRQ code
842 * and clear STS_SYSTEM_IRQ bit: enable adapter for further interrupts.
843 */
844 tms380tr_exec_sifcmd(dev, CMD_SSB_CLEAR | CMD_CLEAR_SYSTEM_IRQ);
845}
846
847/*
848 * Check if the SSB has actually been written by the adapter.
849 */
850static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqType)
851{
852 SSB *ssb = &tp->ssb; /* The address of the SSB. */
853
854 /* C 0 1 2 INTERRUPT CODE
855 * - - - - --------------
856 * 1 1 1 1 TRANSMIT STATUS
857 * 1 1 1 1 RECEIVE STATUS
858 * 1 ? ? 0 COMMAND STATUS
859 * 0 0 0 0 SCB CLEAR
860 * 1 1 0 0 RING STATUS
861 * 0 0 0 0 ADAPTER CHECK
862 *
863 * 0 = SSB field not affected by interrupt
864 * 1 = SSB field is affected by interrupt
865 *
866 * C = SSB ADDRESS +0: COMMAND
867 * 0 = SSB ADDRESS +2: STATUS 0
868 * 1 = SSB ADDRESS +4: STATUS 1
869 * 2 = SSB ADDRESS +6: STATUS 2
870 */
871
872 /* Check if this interrupt does use the SSB. */
873
874 if(IrqType != STS_IRQ_TRANSMIT_STATUS &&
875 IrqType != STS_IRQ_RECEIVE_STATUS &&
876 IrqType != STS_IRQ_COMMAND_STATUS &&
877 IrqType != STS_IRQ_RING_STATUS)
878 {
879 return 1; /* SSB not involved. */
880 }
881
882 /* Note: All fields of the SSB have been set to all ones (-1) after it
883 * has last been used by the software (see DriverIsr()).
884 *
885 * Check if the affected SSB fields are still unchanged.
886 */
887
888 if(ssb->STS == (unsigned short) -1)
889 return 0; /* Command field not yet available. */
890 if(IrqType == STS_IRQ_COMMAND_STATUS)
891 return 1; /* Status fields not always affected. */
892 if(ssb->Parm[0] == (unsigned short) -1)
893 return 0; /* Status 1 field not yet available. */
894 if(IrqType == STS_IRQ_RING_STATUS)
895 return 1; /* Status 2 & 3 fields not affected. */
896
897 /* Note: At this point, the interrupt is either TRANSMIT or RECEIVE. */
898 if(ssb->Parm[1] == (unsigned short) -1)
899 return 0; /* Status 2 field not yet available. */
900 if(ssb->Parm[2] == (unsigned short) -1)
901 return 0; /* Status 3 field not yet available. */
902
903 return 1; /* All SSB fields have been written by the adapter. */
904}
905
906/*
907 * Evaluates the command results status in the SSB status field.
908 */
909static void tms380tr_cmd_status_irq(struct net_device *dev)
910{
911 struct net_local *tp = netdev_priv(dev);
912 unsigned short ssb_cmd, ssb_parm_0;
913 unsigned short ssb_parm_1;
914 char *open_err = "Open error -";
915 char *code_err = "Open code -";
916
917 /* Copy the ssb values to local variables */
918 ssb_cmd = tp->ssb.STS;
919 ssb_parm_0 = tp->ssb.Parm[0];
920 ssb_parm_1 = tp->ssb.Parm[1];
921
922 if(ssb_cmd == OPEN)
923 {
924 tp->Sleeping = 0;
925 if(!tp->ReOpenInProgress)
926 wake_up_interruptible(&tp->wait_for_tok_int);
927
928 tp->OpenCommandIssued = 0;
929 tp->ScbInUse = 0;
930
931 if((ssb_parm_0 & 0x00FF) == GOOD_COMPLETION)
932 {
933 /* Success, the adapter is open. */
934 tp->LobeWireFaultLogged = 0;
935 tp->AdapterOpenFlag = 1;
936 tp->AdapterVirtOpenFlag = 1;
937 tp->TransmitCommandActive = 0;
938 tms380tr_exec_cmd(dev, OC_TRANSMIT);
939 tms380tr_exec_cmd(dev, OC_RECEIVE);
940
941 if(tp->ReOpenInProgress)
942 tp->ReOpenInProgress = 0;
943
944 return;
945 }
946 else /* The adapter did not open. */
947 {
948 if(ssb_parm_0 & NODE_ADDR_ERROR)
949 printk(KERN_INFO "%s: Node address error\n",
950 dev->name);
951 if(ssb_parm_0 & LIST_SIZE_ERROR)
952 printk(KERN_INFO "%s: List size error\n",
953 dev->name);
954 if(ssb_parm_0 & BUF_SIZE_ERROR)
955 printk(KERN_INFO "%s: Buffer size error\n",
956 dev->name);
957 if(ssb_parm_0 & TX_BUF_COUNT_ERROR)
958 printk(KERN_INFO "%s: Tx buffer count error\n",
959 dev->name);
960 if(ssb_parm_0 & INVALID_OPEN_OPTION)
961 printk(KERN_INFO "%s: Invalid open option\n",
962 dev->name);
963 if(ssb_parm_0 & OPEN_ERROR)
964 {
965 /* Show the open phase. */
966 switch(ssb_parm_0 & OPEN_PHASES_MASK)
967 {
968 case LOBE_MEDIA_TEST:
969 if(!tp->LobeWireFaultLogged)
970 {
971 tp->LobeWireFaultLogged = 1;
972 printk(KERN_INFO "%s: %s Lobe wire fault (check cable !).\n", dev->name, open_err);
973 }
974 tp->ReOpenInProgress = 1;
975 tp->AdapterOpenFlag = 0;
976 tp->AdapterVirtOpenFlag = 1;
977 tms380tr_open_adapter(dev);
978 return;
979
980 case PHYSICAL_INSERTION:
981 printk(KERN_INFO "%s: %s Physical insertion.\n", dev->name, open_err);
982 break;
983
984 case ADDRESS_VERIFICATION:
985 printk(KERN_INFO "%s: %s Address verification.\n", dev->name, open_err);
986 break;
987
988 case PARTICIPATION_IN_RING_POLL:
989 printk(KERN_INFO "%s: %s Participation in ring poll.\n", dev->name, open_err);
990 break;
991
992 case REQUEST_INITIALISATION:
993 printk(KERN_INFO "%s: %s Request initialisation.\n", dev->name, open_err);
994 break;
995
996 case FULLDUPLEX_CHECK:
997 printk(KERN_INFO "%s: %s Full duplex check.\n", dev->name, open_err);
998 break;
999
1000 default:
1001 printk(KERN_INFO "%s: %s Unknown open phase\n", dev->name, open_err);
1002 break;
1003 }
1004
1005 /* Show the open errors. */
1006 switch(ssb_parm_0 & OPEN_ERROR_CODES_MASK)
1007 {
1008 case OPEN_FUNCTION_FAILURE:
1009 printk(KERN_INFO "%s: %s OPEN_FUNCTION_FAILURE", dev->name, code_err);
1010 tp->LastOpenStatus =
1011 OPEN_FUNCTION_FAILURE;
1012 break;
1013
1014 case OPEN_SIGNAL_LOSS:
1015 printk(KERN_INFO "%s: %s OPEN_SIGNAL_LOSS\n", dev->name, code_err);
1016 tp->LastOpenStatus =
1017 OPEN_SIGNAL_LOSS;
1018 break;
1019
1020 case OPEN_TIMEOUT:
1021 printk(KERN_INFO "%s: %s OPEN_TIMEOUT\n", dev->name, code_err);
1022 tp->LastOpenStatus =
1023 OPEN_TIMEOUT;
1024 break;
1025
1026 case OPEN_RING_FAILURE:
1027 printk(KERN_INFO "%s: %s OPEN_RING_FAILURE\n", dev->name, code_err);
1028 tp->LastOpenStatus =
1029 OPEN_RING_FAILURE;
1030 break;
1031
1032 case OPEN_RING_BEACONING:
1033 printk(KERN_INFO "%s: %s OPEN_RING_BEACONING\n", dev->name, code_err);
1034 tp->LastOpenStatus =
1035 OPEN_RING_BEACONING;
1036 break;
1037
1038 case OPEN_DUPLICATE_NODEADDR:
1039 printk(KERN_INFO "%s: %s OPEN_DUPLICATE_NODEADDR\n", dev->name, code_err);
1040 tp->LastOpenStatus =
1041 OPEN_DUPLICATE_NODEADDR;
1042 break;
1043
1044 case OPEN_REQUEST_INIT:
1045 printk(KERN_INFO "%s: %s OPEN_REQUEST_INIT\n", dev->name, code_err);
1046 tp->LastOpenStatus =
1047 OPEN_REQUEST_INIT;
1048 break;
1049
1050 case OPEN_REMOVE_RECEIVED:
1051 printk(KERN_INFO "%s: %s OPEN_REMOVE_RECEIVED", dev->name, code_err);
1052 tp->LastOpenStatus =
1053 OPEN_REMOVE_RECEIVED;
1054 break;
1055
1056 case OPEN_FULLDUPLEX_SET:
1057 printk(KERN_INFO "%s: %s OPEN_FULLDUPLEX_SET\n", dev->name, code_err);
1058 tp->LastOpenStatus =
1059 OPEN_FULLDUPLEX_SET;
1060 break;
1061
1062 default:
1063 printk(KERN_INFO "%s: %s Unknown open err code", dev->name, code_err);
1064 tp->LastOpenStatus =
1065 OPEN_FUNCTION_FAILURE;
1066 break;
1067 }
1068 }
1069
1070 tp->AdapterOpenFlag = 0;
1071 tp->AdapterVirtOpenFlag = 0;
1072
1073 return;
1074 }
1075 }
1076 else
1077 {
1078 if(ssb_cmd != READ_ERROR_LOG)
1079 return;
1080
1081 /* Add values from the error log table to the MAC
1082 * statistics counters and update the errorlogtable
1083 * memory.
1084 */
1085 tp->MacStat.line_errors += tp->errorlogtable.Line_Error;
1086 tp->MacStat.burst_errors += tp->errorlogtable.Burst_Error;
1087 tp->MacStat.A_C_errors += tp->errorlogtable.ARI_FCI_Error;
1088 tp->MacStat.lost_frames += tp->errorlogtable.Lost_Frame_Error;
1089 tp->MacStat.recv_congest_count += tp->errorlogtable.Rx_Congest_Error;
1090 tp->MacStat.rx_errors += tp->errorlogtable.Rx_Congest_Error;
1091 tp->MacStat.frame_copied_errors += tp->errorlogtable.Frame_Copied_Error;
1092 tp->MacStat.token_errors += tp->errorlogtable.Token_Error;
1093 tp->MacStat.dummy1 += tp->errorlogtable.DMA_Bus_Error;
1094 tp->MacStat.dummy1 += tp->errorlogtable.DMA_Parity_Error;
1095 tp->MacStat.abort_delimiters += tp->errorlogtable.AbortDelimeters;
1096 tp->MacStat.frequency_errors += tp->errorlogtable.Frequency_Error;
1097 tp->MacStat.internal_errors += tp->errorlogtable.Internal_Error;
1098 }
1099}
1100
1101/*
1102 * The inverse routine to tms380tr_open().
1103 */
1104int tms380tr_close(struct net_device *dev)
1105{
1106 struct net_local *tp = netdev_priv(dev);
1107 netif_stop_queue(dev);
1108
1109 del_timer(&tp->timer);
1110
1111 /* Flush the Tx and disable Rx here. */
1112
1113 tp->HaltInProgress = 1;
1114 tms380tr_exec_cmd(dev, OC_CLOSE);
1115 tp->timer.expires = jiffies + 1*HZ;
1116 tp->timer.function = tms380tr_timer_end_wait;
1117 tp->timer.data = (unsigned long)dev;
1118 add_timer(&tp->timer);
1119
1120 tms380tr_enable_interrupts(dev);
1121
1122 tp->Sleeping = 1;
1123 interruptible_sleep_on(&tp->wait_for_tok_int);
1124 tp->TransmitCommandActive = 0;
1125
1126 del_timer(&tp->timer);
1127 tms380tr_disable_interrupts(dev);
1128
1129#ifdef CONFIG_ISA
1130 if(dev->dma > 0)
1131 {
1132 unsigned long flags=claim_dma_lock();
1133 disable_dma(dev->dma);
1134 release_dma_lock(flags);
1135 }
1136#endif
1137
1138 SIFWRITEW(0xFF00, SIFCMD);
1139#if 0
1140 if(dev->dma > 0) /* what the? */
1141 SIFWRITEB(0xff, POSREG);
1142#endif
1143 tms380tr_cancel_tx_queue(tp);
1144
1145 return 0;
1146}
1147
1148/*
1149 * Get the current statistics. This may be called with the card open
1150 * or closed.
1151 */
1152static struct net_device_stats *tms380tr_get_stats(struct net_device *dev)
1153{
1154 struct net_local *tp = netdev_priv(dev);
1155
1156 return (struct net_device_stats *)&tp->MacStat;
1157}
1158
1159/*
1160 * Set or clear the multicast filter for this adapter.
1161 */
1162static void tms380tr_set_multicast_list(struct net_device *dev)
1163{
1164 struct net_local *tp = netdev_priv(dev);
1165 unsigned int OpenOptions;
1166
1167 OpenOptions = tp->ocpl.OPENOptions &
1168 ~(PASS_ADAPTER_MAC_FRAMES
1169 | PASS_ATTENTION_FRAMES
1170 | PASS_BEACON_MAC_FRAMES
1171 | COPY_ALL_MAC_FRAMES
1172 | COPY_ALL_NON_MAC_FRAMES);
1173
1174 tp->ocpl.FunctAddr = 0;
1175
1176 if(dev->flags & IFF_PROMISC)
1177 /* Enable promiscuous mode */
1178 OpenOptions |= COPY_ALL_NON_MAC_FRAMES |
1179 COPY_ALL_MAC_FRAMES;
1180 else
1181 {
1182 if(dev->flags & IFF_ALLMULTI)
1183 {
1184 /* Disable promiscuous mode, use normal mode. */
1185 tp->ocpl.FunctAddr = 0xFFFFFFFF;
1186 }
1187 else
1188 {
1189 struct netdev_hw_addr *ha;
1190
1191 netdev_for_each_mc_addr(ha, dev) {
1192 ((char *)(&tp->ocpl.FunctAddr))[0] |=
1193 ha->addr[2];
1194 ((char *)(&tp->ocpl.FunctAddr))[1] |=
1195 ha->addr[3];
1196 ((char *)(&tp->ocpl.FunctAddr))[2] |=
1197 ha->addr[4];
1198 ((char *)(&tp->ocpl.FunctAddr))[3] |=
1199 ha->addr[5];
1200 }
1201 }
1202 tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR);
1203 }
1204
1205 tp->ocpl.OPENOptions = OpenOptions;
1206 tms380tr_exec_cmd(dev, OC_MODIFY_OPEN_PARMS);
1207}
1208
1209/*
1210 * Wait for some time (microseconds)
1211 */
1212void tms380tr_wait(unsigned long time)
1213{
1214#if 0
1215 long tmp;
1216
1217 tmp = jiffies + time/(1000000/HZ);
1218 do {
1219 tmp = schedule_timeout_interruptible(tmp);
1220 } while(time_after(tmp, jiffies));
1221#else
1222 mdelay(time / 1000);
1223#endif
1224}
1225
1226/*
1227 * Write a command value to the SIFCMD register
1228 */
1229static void tms380tr_exec_sifcmd(struct net_device *dev, unsigned int WriteValue)
1230{
1231 unsigned short cmd;
1232 unsigned short SifStsValue;
1233 unsigned long loop_counter;
1234
1235 WriteValue = ((WriteValue ^ CMD_SYSTEM_IRQ) | CMD_INTERRUPT_ADAPTER);
1236 cmd = (unsigned short)WriteValue;
1237 loop_counter = 0,5 * 800000;
1238 do {
1239 SifStsValue = SIFREADW(SIFSTS);
1240 } while((SifStsValue & CMD_INTERRUPT_ADAPTER) && loop_counter--);
1241 SIFWRITEW(cmd, SIFCMD);
1242}
1243
1244/*
1245 * Processes adapter hardware reset, halts adapter and downloads firmware,
1246 * clears the halt bit.
1247 */
1248static int tms380tr_reset_adapter(struct net_device *dev)
1249{
1250 struct net_local *tp = netdev_priv(dev);
1251 unsigned short *fw_ptr;
1252 unsigned short count, c, count2;
1253 const struct firmware *fw_entry = NULL;
1254
1255 if (request_firmware(&fw_entry, "tms380tr.bin", tp->pdev) != 0) {
1256 printk(KERN_ALERT "%s: firmware %s is missing, cannot start.\n",
1257 dev->name, "tms380tr.bin");
1258 return -1;
1259 }
1260
1261 fw_ptr = (unsigned short *)fw_entry->data;
1262 count2 = fw_entry->size / 2;
1263
1264 /* Hardware adapter reset */
1265 SIFWRITEW(ACL_ARESET, SIFACL);
1266 tms380tr_wait(40);
1267
1268 c = SIFREADW(SIFACL);
1269 tms380tr_wait(20);
1270
1271 if(dev->dma == 0) /* For PCI adapters */
1272 {
1273 c &= ~(ACL_NSELOUT0 | ACL_NSELOUT1); /* Clear bits */
1274 if(tp->setnselout)
1275 c |= (*tp->setnselout)(dev);
1276 }
1277
1278 /* In case a command is pending - forget it */
1279 tp->ScbInUse = 0;
1280
1281 c &= ~ACL_ARESET; /* Clear adapter reset bit */
1282 c |= ACL_CPHALT; /* Halt adapter CPU, allow download */
1283 c |= ACL_BOOT;
1284 c |= ACL_SINTEN;
1285 c &= ~ACL_PSDMAEN; /* Clear pseudo dma bit */
1286 SIFWRITEW(c, SIFACL);
1287 tms380tr_wait(40);
1288
1289 count = 0;
1290 /* Download firmware via DIO interface: */
1291 do {
1292 if (count2 < 3) continue;
1293
1294 /* Download first address part */
1295 SIFWRITEW(*fw_ptr, SIFADX);
1296 fw_ptr++;
1297 count2--;
1298 /* Download second address part */
1299 SIFWRITEW(*fw_ptr, SIFADD);
1300 fw_ptr++;
1301 count2--;
1302
1303 if((count = *fw_ptr) != 0) /* Load loop counter */
1304 {
1305 fw_ptr++; /* Download block data */
1306 count2--;
1307 if (count > count2) continue;
1308
1309 for(; count > 0; count--)
1310 {
1311 SIFWRITEW(*fw_ptr, SIFINC);
1312 fw_ptr++;
1313 count2--;
1314 }
1315 }
1316 else /* Stop, if last block downloaded */
1317 {
1318 c = SIFREADW(SIFACL);
1319 c &= (~ACL_CPHALT | ACL_SINTEN);
1320
1321 /* Clear CPHALT and start BUD */
1322 SIFWRITEW(c, SIFACL);
1323 release_firmware(fw_entry);
1324 return 1;
1325 }
1326 } while(count == 0);
1327
1328 release_firmware(fw_entry);
1329 printk(KERN_INFO "%s: Adapter Download Failed\n", dev->name);
1330 return -1;
1331}
1332
1333MODULE_FIRMWARE("tms380tr.bin");
1334
1335/*
1336 * Starts bring up diagnostics of token ring adapter and evaluates
1337 * diagnostic results.
1338 */
1339static int tms380tr_bringup_diags(struct net_device *dev)
1340{
1341 int loop_cnt, retry_cnt;
1342 unsigned short Status;
1343
1344 tms380tr_wait(HALF_SECOND);
1345 tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET);
1346 tms380tr_wait(HALF_SECOND);
1347
1348 retry_cnt = BUD_MAX_RETRIES; /* maximal number of retrys */
1349
1350 do {
1351 retry_cnt--;
1352 if(tms380tr_debug > 3)
1353 printk(KERN_DEBUG "BUD-Status: ");
1354 loop_cnt = BUD_MAX_LOOPCNT; /* maximum: three seconds*/
1355 do { /* Inspect BUD results */
1356 loop_cnt--;
1357 tms380tr_wait(HALF_SECOND);
1358 Status = SIFREADW(SIFSTS);
1359 Status &= STS_MASK;
1360
1361 if(tms380tr_debug > 3)
1362 printk(KERN_DEBUG " %04X\n", Status);
1363 /* BUD successfully completed */
1364 if(Status == STS_INITIALIZE)
1365 return 1;
1366 /* Unrecoverable hardware error, BUD not completed? */
1367 } while((loop_cnt > 0) && ((Status & (STS_ERROR | STS_TEST))
1368 != (STS_ERROR | STS_TEST)));
1369
1370 /* Error preventing completion of BUD */
1371 if(retry_cnt > 0)
1372 {
1373 printk(KERN_INFO "%s: Adapter Software Reset.\n",
1374 dev->name);
1375 tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET);
1376 tms380tr_wait(HALF_SECOND);
1377 }
1378 } while(retry_cnt > 0);
1379
1380 Status = SIFREADW(SIFSTS);
1381
1382 printk(KERN_INFO "%s: Hardware error\n", dev->name);
1383 /* Hardware error occurred! */
1384 Status &= 0x001f;
1385 if (Status & 0x0010)
1386 printk(KERN_INFO "%s: BUD Error: Timeout\n", dev->name);
1387 else if ((Status & 0x000f) > 6)
1388 printk(KERN_INFO "%s: BUD Error: Illegal Failure\n", dev->name);
1389 else
1390 printk(KERN_INFO "%s: Bring Up Diagnostics Error (%04X) occurred\n", dev->name, Status & 0x000f);
1391
1392 return -1;
1393}
1394
1395/*
1396 * Copy initialisation data to adapter memory, beginning at address
1397 * 1:0A00; Starting DMA test and evaluating result bits.
1398 */
1399static int tms380tr_init_adapter(struct net_device *dev)
1400{
1401 struct net_local *tp = netdev_priv(dev);
1402
1403 const unsigned char SCB_Test[6] = {0x00, 0x00, 0xC1, 0xE2, 0xD4, 0x8B};
1404 const unsigned char SSB_Test[8] = {0xFF, 0xFF, 0xD1, 0xD7,
1405 0xC5, 0xD9, 0xC3, 0xD4};
1406 void *ptr = (void *)&tp->ipb;
1407 unsigned short *ipb_ptr = (unsigned short *)ptr;
1408 unsigned char *cb_ptr = (unsigned char *) &tp->scb;
1409 unsigned char *sb_ptr = (unsigned char *) &tp->ssb;
1410 unsigned short Status;
1411 int i, loop_cnt, retry_cnt;
1412
1413 /* Normalize: byte order low/high, word order high/low! (only IPB!) */
1414 tp->ipb.SCB_Addr = SWAPW(((char *)&tp->scb - (char *)tp) + tp->dmabuffer);
1415 tp->ipb.SSB_Addr = SWAPW(((char *)&tp->ssb - (char *)tp) + tp->dmabuffer);
1416
1417 if(tms380tr_debug > 3)
1418 {
1419 printk(KERN_DEBUG "%s: buffer (real): %lx\n", dev->name, (long) &tp->scb);
1420 printk(KERN_DEBUG "%s: buffer (virt): %lx\n", dev->name, (long) ((char *)&tp->scb - (char *)tp) + (long) tp->dmabuffer);
1421 printk(KERN_DEBUG "%s: buffer (DMA) : %lx\n", dev->name, (long) tp->dmabuffer);
1422 printk(KERN_DEBUG "%s: buffer (tp) : %lx\n", dev->name, (long) tp);
1423 }
1424 /* Maximum: three initialization retries */
1425 retry_cnt = INIT_MAX_RETRIES;
1426
1427 do {
1428 retry_cnt--;
1429
1430 /* Transfer initialization block */
1431 SIFWRITEW(0x0001, SIFADX);
1432
1433 /* To address 0001:0A00 of adapter RAM */
1434 SIFWRITEW(0x0A00, SIFADD);
1435
1436 /* Write 11 words to adapter RAM */
1437 for(i = 0; i < 11; i++)
1438 SIFWRITEW(ipb_ptr[i], SIFINC);
1439
1440 /* Execute SCB adapter command */
1441 tms380tr_exec_sifcmd(dev, CMD_EXECUTE);
1442
1443 loop_cnt = INIT_MAX_LOOPCNT; /* Maximum: 11 seconds */
1444
1445 /* While remaining retries, no error and not completed */
1446 do {
1447 Status = 0;
1448 loop_cnt--;
1449 tms380tr_wait(HALF_SECOND);
1450
1451 /* Mask interesting status bits */
1452 Status = SIFREADW(SIFSTS);
1453 Status &= STS_MASK;
1454 } while(((Status &(STS_INITIALIZE | STS_ERROR | STS_TEST)) != 0) &&
1455 ((Status & STS_ERROR) == 0) && (loop_cnt != 0));
1456
1457 if((Status & (STS_INITIALIZE | STS_ERROR | STS_TEST)) == 0)
1458 {
1459 /* Initialization completed without error */
1460 i = 0;
1461 do { /* Test if contents of SCB is valid */
1462 if(SCB_Test[i] != *(cb_ptr + i))
1463 {
1464 printk(KERN_INFO "%s: DMA failed\n", dev->name);
1465 /* DMA data error: wrong data in SCB */
1466 return -1;
1467 }
1468 i++;
1469 } while(i < 6);
1470
1471 i = 0;
1472 do { /* Test if contents of SSB is valid */
1473 if(SSB_Test[i] != *(sb_ptr + i))
1474 /* DMA data error: wrong data in SSB */
1475 return -1;
1476 i++;
1477 } while (i < 8);
1478
1479 return 1; /* Adapter successfully initialized */
1480 }
1481 else
1482 {
1483 if((Status & STS_ERROR) != 0)
1484 {
1485 /* Initialization error occurred */
1486 Status = SIFREADW(SIFSTS);
1487 Status &= STS_ERROR_MASK;
1488 /* ShowInitialisationErrorCode(Status); */
1489 printk(KERN_INFO "%s: Status error: %d\n", dev->name, Status);
1490 return -1; /* Unrecoverable error */
1491 }
1492 else
1493 {
1494 if(retry_cnt > 0)
1495 {
1496 /* Reset adapter and try init again */
1497 tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET);
1498 tms380tr_wait(HALF_SECOND);
1499 }
1500 }
1501 }
1502 } while(retry_cnt > 0);
1503
1504 printk(KERN_INFO "%s: Retry exceeded\n", dev->name);
1505 return -1;
1506}
1507
1508/*
1509 * Check for outstanding commands in command queue and tries to execute
1510 * command immediately. Corresponding command flag in command queue is cleared.
1511 */
1512static void tms380tr_chk_outstanding_cmds(struct net_device *dev)
1513{
1514 struct net_local *tp = netdev_priv(dev);
1515 unsigned long Addr = 0;
1516
1517 if(tp->CMDqueue == 0)
1518 return; /* No command execution */
1519
1520 /* If SCB in use: no command */
1521 if(tp->ScbInUse == 1)
1522 return;
1523
1524 /* Check if adapter is opened, avoiding COMMAND_REJECT
1525 * interrupt by the adapter!
1526 */
1527 if (tp->AdapterOpenFlag == 0) {
1528 if (tp->CMDqueue & OC_OPEN) {
1529 /* Execute OPEN command */
1530 tp->CMDqueue ^= OC_OPEN;
1531
1532 Addr = htonl(((char *)&tp->ocpl - (char *)tp) + tp->dmabuffer);
1533 tp->scb.Parm[0] = LOWORD(Addr);
1534 tp->scb.Parm[1] = HIWORD(Addr);
1535 tp->scb.CMD = OPEN;
1536 } else
1537 /* No OPEN command queued, but adapter closed. Note:
1538 * We'll try to re-open the adapter in DriverPoll()
1539 */
1540 return; /* No adapter command issued */
1541 } else {
1542 /* Adapter is open; evaluate command queue: try to execute
1543 * outstanding commands (depending on priority!) CLOSE
1544 * command queued
1545 */
1546 if (tp->CMDqueue & OC_CLOSE) {
1547 tp->CMDqueue ^= OC_CLOSE;
1548 tp->AdapterOpenFlag = 0;
1549 tp->scb.Parm[0] = 0; /* Parm[0], Parm[1] are ignored */
1550 tp->scb.Parm[1] = 0; /* but should be set to zero! */
1551 tp->scb.CMD = CLOSE;
1552 if(!tp->HaltInProgress)
1553 tp->CMDqueue |= OC_OPEN; /* re-open adapter */
1554 else
1555 tp->CMDqueue = 0; /* no more commands */
1556 } else if (tp->CMDqueue & OC_RECEIVE) {
1557 tp->CMDqueue ^= OC_RECEIVE;
1558 Addr = htonl(((char *)tp->RplHead - (char *)tp) + tp->dmabuffer);
1559 tp->scb.Parm[0] = LOWORD(Addr);
1560 tp->scb.Parm[1] = HIWORD(Addr);
1561 tp->scb.CMD = RECEIVE;
1562 } else if (tp->CMDqueue & OC_TRANSMIT_HALT) {
1563 /* NOTE: TRANSMIT.HALT must be checked
1564 * before TRANSMIT.
1565 */
1566 tp->CMDqueue ^= OC_TRANSMIT_HALT;
1567 tp->scb.CMD = TRANSMIT_HALT;
1568
1569 /* Parm[0] and Parm[1] are ignored
1570 * but should be set to zero!
1571 */
1572 tp->scb.Parm[0] = 0;
1573 tp->scb.Parm[1] = 0;
1574 } else if (tp->CMDqueue & OC_TRANSMIT) {
1575 /* NOTE: TRANSMIT must be
1576 * checked after TRANSMIT.HALT
1577 */
1578 if (tp->TransmitCommandActive) {
1579 if (!tp->TransmitHaltScheduled) {
1580 tp->TransmitHaltScheduled = 1;
1581 tms380tr_exec_cmd(dev, OC_TRANSMIT_HALT);
1582 }
1583 tp->TransmitCommandActive = 0;
1584 return;
1585 }
1586
1587 tp->CMDqueue ^= OC_TRANSMIT;
1588 tms380tr_cancel_tx_queue(tp);
1589 Addr = htonl(((char *)tp->TplBusy - (char *)tp) + tp->dmabuffer);
1590 tp->scb.Parm[0] = LOWORD(Addr);
1591 tp->scb.Parm[1] = HIWORD(Addr);
1592 tp->scb.CMD = TRANSMIT;
1593 tp->TransmitCommandActive = 1;
1594 } else if (tp->CMDqueue & OC_MODIFY_OPEN_PARMS) {
1595 tp->CMDqueue ^= OC_MODIFY_OPEN_PARMS;
1596 tp->scb.Parm[0] = tp->ocpl.OPENOptions; /* new OPEN options*/
1597 tp->scb.Parm[0] |= ENABLE_FULL_DUPLEX_SELECTION;
1598 tp->scb.Parm[1] = 0; /* is ignored but should be zero */
1599 tp->scb.CMD = MODIFY_OPEN_PARMS;
1600 } else if (tp->CMDqueue & OC_SET_FUNCT_ADDR) {
1601 tp->CMDqueue ^= OC_SET_FUNCT_ADDR;
1602 tp->scb.Parm[0] = LOWORD(tp->ocpl.FunctAddr);
1603 tp->scb.Parm[1] = HIWORD(tp->ocpl.FunctAddr);
1604 tp->scb.CMD = SET_FUNCT_ADDR;
1605 } else if (tp->CMDqueue & OC_SET_GROUP_ADDR) {
1606 tp->CMDqueue ^= OC_SET_GROUP_ADDR;
1607 tp->scb.Parm[0] = LOWORD(tp->ocpl.GroupAddr);
1608 tp->scb.Parm[1] = HIWORD(tp->ocpl.GroupAddr);
1609 tp->scb.CMD = SET_GROUP_ADDR;
1610 } else if (tp->CMDqueue & OC_READ_ERROR_LOG) {
1611 tp->CMDqueue ^= OC_READ_ERROR_LOG;
1612 Addr = htonl(((char *)&tp->errorlogtable - (char *)tp) + tp->dmabuffer);
1613 tp->scb.Parm[0] = LOWORD(Addr);
1614 tp->scb.Parm[1] = HIWORD(Addr);
1615 tp->scb.CMD = READ_ERROR_LOG;
1616 } else {
1617 printk(KERN_WARNING "CheckForOutstandingCommand: unknown Command\n");
1618 tp->CMDqueue = 0;
1619 return;
1620 }
1621 }
1622
1623 tp->ScbInUse = 1; /* Set semaphore: SCB in use. */
1624
1625 /* Execute SCB and generate IRQ when done. */
1626 tms380tr_exec_sifcmd(dev, CMD_EXECUTE | CMD_SCB_REQUEST);
1627}
1628
1629/*
1630 * IRQ conditions: signal loss on the ring, transmit or receive of beacon
1631 * frames (disabled if bit 1 of OPEN option is set); report error MAC
1632 * frame transmit (disabled if bit 2 of OPEN option is set); open or short
1633 * circuit fault on the lobe is detected; remove MAC frame received;
1634 * error counter overflow (255); opened adapter is the only station in ring.
1635 * After some of the IRQs the adapter is closed!
1636 */
1637static void tms380tr_ring_status_irq(struct net_device *dev)
1638{
1639 struct net_local *tp = netdev_priv(dev);
1640
1641 tp->CurrentRingStatus = be16_to_cpu((unsigned short)tp->ssb.Parm[0]);
1642
1643 /* First: fill up statistics */
1644 if(tp->ssb.Parm[0] & SIGNAL_LOSS)
1645 {
1646 printk(KERN_INFO "%s: Signal Loss\n", dev->name);
1647 tp->MacStat.line_errors++;
1648 }
1649
1650 /* Adapter is closed, but initialized */
1651 if(tp->ssb.Parm[0] & LOBE_WIRE_FAULT)
1652 {
1653 printk(KERN_INFO "%s: Lobe Wire Fault, Reopen Adapter\n",
1654 dev->name);
1655 tp->MacStat.line_errors++;
1656 }
1657
1658 if(tp->ssb.Parm[0] & RING_RECOVERY)
1659 printk(KERN_INFO "%s: Ring Recovery\n", dev->name);
1660
1661 /* Counter overflow: read error log */
1662 if(tp->ssb.Parm[0] & COUNTER_OVERFLOW)
1663 {
1664 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
1665 tms380tr_exec_cmd(dev, OC_READ_ERROR_LOG);
1666 }
1667
1668 /* Adapter is closed, but initialized */
1669 if(tp->ssb.Parm[0] & REMOVE_RECEIVED)
1670 printk(KERN_INFO "%s: Remove Received, Reopen Adapter\n",
1671 dev->name);
1672
1673 /* Adapter is closed, but initialized */
1674 if(tp->ssb.Parm[0] & AUTO_REMOVAL_ERROR)
1675 printk(KERN_INFO "%s: Auto Removal Error, Reopen Adapter\n",
1676 dev->name);
1677
1678 if(tp->ssb.Parm[0] & HARD_ERROR)
1679 printk(KERN_INFO "%s: Hard Error\n", dev->name);
1680
1681 if(tp->ssb.Parm[0] & SOFT_ERROR)
1682 printk(KERN_INFO "%s: Soft Error\n", dev->name);
1683
1684 if(tp->ssb.Parm[0] & TRANSMIT_BEACON)
1685 printk(KERN_INFO "%s: Transmit Beacon\n", dev->name);
1686
1687 if(tp->ssb.Parm[0] & SINGLE_STATION)
1688 printk(KERN_INFO "%s: Single Station\n", dev->name);
1689
1690 /* Check if adapter has been closed */
1691 if(tp->ssb.Parm[0] & ADAPTER_CLOSED)
1692 {
1693 printk(KERN_INFO "%s: Adapter closed (Reopening),"
1694 "CurrentRingStat %x\n",
1695 dev->name, tp->CurrentRingStatus);
1696 tp->AdapterOpenFlag = 0;
1697 tms380tr_open_adapter(dev);
1698 }
1699}
1700
1701/*
1702 * Issued if adapter has encountered an unrecoverable hardware
1703 * or software error.
1704 */
1705static void tms380tr_chk_irq(struct net_device *dev)
1706{
1707 int i;
1708 unsigned short AdapterCheckBlock[4];
1709 struct net_local *tp = netdev_priv(dev);
1710
1711 tp->AdapterOpenFlag = 0; /* Adapter closed now */
1712
1713 /* Page number of adapter memory */
1714 SIFWRITEW(0x0001, SIFADX);
1715 /* Address offset */
1716 SIFWRITEW(CHECKADDR, SIFADR);
1717
1718 /* Reading 8 byte adapter check block. */
1719 for(i = 0; i < 4; i++)
1720 AdapterCheckBlock[i] = SIFREADW(SIFINC);
1721
1722 if(tms380tr_debug > 3)
1723 {
1724 printk(KERN_DEBUG "%s: AdapterCheckBlock: ", dev->name);
1725 for (i = 0; i < 4; i++)
1726 printk("%04X", AdapterCheckBlock[i]);
1727 printk("\n");
1728 }
1729
1730 switch(AdapterCheckBlock[0])
1731 {
1732 case DIO_PARITY:
1733 printk(KERN_INFO "%s: DIO parity error\n", dev->name);
1734 break;
1735
1736 case DMA_READ_ABORT:
1737 printk(KERN_INFO "%s DMA read operation aborted:\n",
1738 dev->name);
1739 switch (AdapterCheckBlock[1])
1740 {
1741 case 0:
1742 printk(KERN_INFO "Timeout\n");
1743 printk(KERN_INFO "Address: %04X %04X\n",
1744 AdapterCheckBlock[2],
1745 AdapterCheckBlock[3]);
1746 break;
1747
1748 case 1:
1749 printk(KERN_INFO "Parity error\n");
1750 printk(KERN_INFO "Address: %04X %04X\n",
1751 AdapterCheckBlock[2],
1752 AdapterCheckBlock[3]);
1753 break;
1754
1755 case 2:
1756 printk(KERN_INFO "Bus error\n");
1757 printk(KERN_INFO "Address: %04X %04X\n",
1758 AdapterCheckBlock[2],
1759 AdapterCheckBlock[3]);
1760 break;
1761
1762 default:
1763 printk(KERN_INFO "Unknown error.\n");
1764 break;
1765 }
1766 break;
1767
1768 case DMA_WRITE_ABORT:
1769 printk(KERN_INFO "%s: DMA write operation aborted:\n",
1770 dev->name);
1771 switch (AdapterCheckBlock[1])
1772 {
1773 case 0:
1774 printk(KERN_INFO "Timeout\n");
1775 printk(KERN_INFO "Address: %04X %04X\n",
1776 AdapterCheckBlock[2],
1777 AdapterCheckBlock[3]);
1778 break;
1779
1780 case 1:
1781 printk(KERN_INFO "Parity error\n");
1782 printk(KERN_INFO "Address: %04X %04X\n",
1783 AdapterCheckBlock[2],
1784 AdapterCheckBlock[3]);
1785 break;
1786
1787 case 2:
1788 printk(KERN_INFO "Bus error\n");
1789 printk(KERN_INFO "Address: %04X %04X\n",
1790 AdapterCheckBlock[2],
1791 AdapterCheckBlock[3]);
1792 break;
1793
1794 default:
1795 printk(KERN_INFO "Unknown error.\n");
1796 break;
1797 }
1798 break;
1799
1800 case ILLEGAL_OP_CODE:
1801 printk(KERN_INFO "%s: Illegal operation code in firmware\n",
1802 dev->name);
1803 /* Parm[0-3]: adapter internal register R13-R15 */
1804 break;
1805
1806 case PARITY_ERRORS:
1807 printk(KERN_INFO "%s: Adapter internal bus parity error\n",
1808 dev->name);
1809 /* Parm[0-3]: adapter internal register R13-R15 */
1810 break;
1811
1812 case RAM_DATA_ERROR:
1813 printk(KERN_INFO "%s: RAM data error\n", dev->name);
1814 /* Parm[0-1]: MSW/LSW address of RAM location. */
1815 break;
1816
1817 case RAM_PARITY_ERROR:
1818 printk(KERN_INFO "%s: RAM parity error\n", dev->name);
1819 /* Parm[0-1]: MSW/LSW address of RAM location. */
1820 break;
1821
1822 case RING_UNDERRUN:
1823 printk(KERN_INFO "%s: Internal DMA underrun detected\n",
1824 dev->name);
1825 break;
1826
1827 case INVALID_IRQ:
1828 printk(KERN_INFO "%s: Unrecognized interrupt detected\n",
1829 dev->name);
1830 /* Parm[0-3]: adapter internal register R13-R15 */
1831 break;
1832
1833 case INVALID_ERROR_IRQ:
1834 printk(KERN_INFO "%s: Unrecognized error interrupt detected\n",
1835 dev->name);
1836 /* Parm[0-3]: adapter internal register R13-R15 */
1837 break;
1838
1839 case INVALID_XOP:
1840 printk(KERN_INFO "%s: Unrecognized XOP request detected\n",
1841 dev->name);
1842 /* Parm[0-3]: adapter internal register R13-R15 */
1843 break;
1844
1845 default:
1846 printk(KERN_INFO "%s: Unknown status", dev->name);
1847 break;
1848 }
1849
1850 if(tms380tr_chipset_init(dev) == 1)
1851 {
1852 /* Restart of firmware successful */
1853 tp->AdapterOpenFlag = 1;
1854 }
1855}
1856
1857/*
1858 * Internal adapter pointer to RAM data are copied from adapter into
1859 * host system.
1860 */
1861static int tms380tr_read_ptr(struct net_device *dev)
1862{
1863 struct net_local *tp = netdev_priv(dev);
1864 unsigned short adapterram;
1865
1866 tms380tr_read_ram(dev, (unsigned char *)&tp->intptrs.BurnedInAddrPtr,
1867 ADAPTER_INT_PTRS, 16);
1868 tms380tr_read_ram(dev, (unsigned char *)&adapterram,
1869 cpu_to_be16((unsigned short)tp->intptrs.AdapterRAMPtr), 2);
1870 return be16_to_cpu(adapterram);
1871}
1872
1873/*
1874 * Reads a number of bytes from adapter to system memory.
1875 */
1876static void tms380tr_read_ram(struct net_device *dev, unsigned char *Data,
1877 unsigned short Address, int Length)
1878{
1879 int i;
1880 unsigned short old_sifadx, old_sifadr, InWord;
1881
1882 /* Save the current values */
1883 old_sifadx = SIFREADW(SIFADX);
1884 old_sifadr = SIFREADW(SIFADR);
1885
1886 /* Page number of adapter memory */
1887 SIFWRITEW(0x0001, SIFADX);
1888 /* Address offset in adapter RAM */
1889 SIFWRITEW(Address, SIFADR);
1890
1891 /* Copy len byte from adapter memory to system data area. */
1892 i = 0;
1893 for(;;)
1894 {
1895 InWord = SIFREADW(SIFINC);
1896
1897 *(Data + i) = HIBYTE(InWord); /* Write first byte */
1898 if(++i == Length) /* All is done break */
1899 break;
1900
1901 *(Data + i) = LOBYTE(InWord); /* Write second byte */
1902 if (++i == Length) /* All is done break */
1903 break;
1904 }
1905
1906 /* Restore original values */
1907 SIFWRITEW(old_sifadx, SIFADX);
1908 SIFWRITEW(old_sifadr, SIFADR);
1909}
1910
1911/*
1912 * Cancel all queued packets in the transmission queue.
1913 */
1914static void tms380tr_cancel_tx_queue(struct net_local* tp)
1915{
1916 TPL *tpl;
1917
1918 /*
1919 * NOTE: There must not be an active TRANSMIT command pending, when
1920 * this function is called.
1921 */
1922 if(tp->TransmitCommandActive)
1923 return;
1924
1925 for(;;)
1926 {
1927 tpl = tp->TplBusy;
1928 if(!tpl->BusyFlag)
1929 break;
1930 /* "Remove" TPL from busy list. */
1931 tp->TplBusy = tpl->NextTPLPtr;
1932 tms380tr_write_tpl_status(tpl, 0); /* Clear VALID bit */
1933 tpl->BusyFlag = 0; /* "free" TPL */
1934
1935 printk(KERN_INFO "Cancel tx (%08lXh).\n", (unsigned long)tpl);
1936 if (tpl->DMABuff)
1937 dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
1938 dev_kfree_skb_any(tpl->Skb);
1939 }
1940}
1941
1942/*
1943 * This function is called whenever a transmit interrupt is generated by the
1944 * adapter. For a command complete interrupt, it is checked if we have to
1945 * issue a new transmit command or not.
1946 */
1947static void tms380tr_tx_status_irq(struct net_device *dev)
1948{
1949 struct net_local *tp = netdev_priv(dev);
1950 unsigned char HighByte, HighAc, LowAc;
1951 TPL *tpl;
1952
1953 /* NOTE: At this point the SSB from TRANSMIT STATUS is no longer
1954 * available, because the CLEAR SSB command has already been issued.
1955 *
1956 * Process all complete transmissions.
1957 */
1958
1959 for(;;)
1960 {
1961 tpl = tp->TplBusy;
1962 if(!tpl->BusyFlag || (tpl->Status
1963 & (TX_VALID | TX_FRAME_COMPLETE))
1964 != TX_FRAME_COMPLETE)
1965 {
1966 break;
1967 }
1968
1969 /* "Remove" TPL from busy list. */
1970 tp->TplBusy = tpl->NextTPLPtr ;
1971
1972 /* Check the transmit status field only for directed frames*/
1973 if(DIRECTED_FRAME(tpl) && (tpl->Status & TX_ERROR) == 0)
1974 {
1975 HighByte = GET_TRANSMIT_STATUS_HIGH_BYTE(tpl->Status);
1976 HighAc = GET_FRAME_STATUS_HIGH_AC(HighByte);
1977 LowAc = GET_FRAME_STATUS_LOW_AC(HighByte);
1978
1979 if((HighAc != LowAc) || (HighAc == AC_NOT_RECOGNIZED))
1980 {
1981 printk(KERN_DEBUG "%s: (DA=%08lX not recognized)\n",
1982 dev->name,
1983 *(unsigned long *)&tpl->MData[2+2]);
1984 }
1985 else
1986 {
1987 if(tms380tr_debug > 3)
1988 printk(KERN_DEBUG "%s: Directed frame tx'd\n",
1989 dev->name);
1990 }
1991 }
1992 else
1993 {
1994 if(!DIRECTED_FRAME(tpl))
1995 {
1996 if(tms380tr_debug > 3)
1997 printk(KERN_DEBUG "%s: Broadcast frame tx'd\n",
1998 dev->name);
1999 }
2000 }
2001
2002 tp->MacStat.tx_packets++;
2003 if (tpl->DMABuff)
2004 dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
2005 dev_kfree_skb_irq(tpl->Skb);
2006 tpl->BusyFlag = 0; /* "free" TPL */
2007 }
2008
2009 if(!tp->TplFree->NextTPLPtr->BusyFlag)
2010 netif_wake_queue(dev);
2011}
2012
2013/*
2014 * Called if a frame receive interrupt is generated by the adapter.
2015 * Check if the frame is valid and indicate it to system.
2016 */
2017static void tms380tr_rcv_status_irq(struct net_device *dev)
2018{
2019 struct net_local *tp = netdev_priv(dev);
2020 unsigned char *ReceiveDataPtr;
2021 struct sk_buff *skb;
2022 unsigned int Length, Length2;
2023 RPL *rpl;
2024 RPL *SaveHead;
2025 dma_addr_t dmabuf;
2026
2027 /* NOTE: At this point the SSB from RECEIVE STATUS is no longer
2028 * available, because the CLEAR SSB command has already been issued.
2029 *
2030 * Process all complete receives.
2031 */
2032
2033 for(;;)
2034 {
2035 rpl = tp->RplHead;
2036 if(rpl->Status & RX_VALID)
2037 break; /* RPL still in use by adapter */
2038
2039 /* Forward RPLHead pointer to next list. */
2040 SaveHead = tp->RplHead;
2041 tp->RplHead = rpl->NextRPLPtr;
2042
2043 /* Get the frame size (Byte swap for Intel).
2044 * Do this early (see workaround comment below)
2045 */
2046 Length = be16_to_cpu(rpl->FrameSize);
2047
2048 /* Check if the Frame_Start, Frame_End and
2049 * Frame_Complete bits are set.
2050 */
2051 if((rpl->Status & VALID_SINGLE_BUFFER_FRAME)
2052 == VALID_SINGLE_BUFFER_FRAME)
2053 {
2054 ReceiveDataPtr = rpl->MData;
2055
2056 /* Workaround for delayed write of FrameSize on ISA
2057 * (FrameSize is false but valid-bit is reset)
2058 * Frame size is set to zero when the RPL is freed.
2059 * Length2 is there because there have also been
2060 * cases where the FrameSize was partially written
2061 */
2062 Length2 = be16_to_cpu(rpl->FrameSize);
2063
2064 if(Length == 0 || Length != Length2)
2065 {
2066 tp->RplHead = SaveHead;
2067 break; /* Return to tms380tr_interrupt */
2068 }
2069 tms380tr_update_rcv_stats(tp,ReceiveDataPtr,Length);
2070
2071 if(tms380tr_debug > 3)
2072 printk(KERN_DEBUG "%s: Packet Length %04X (%d)\n",
2073 dev->name, Length, Length);
2074
2075 /* Indicate the received frame to system the
2076 * adapter does the Source-Routing padding for
2077 * us. See: OpenOptions in tms380tr_init_opb()
2078 */
2079 skb = rpl->Skb;
2080 if(rpl->SkbStat == SKB_UNAVAILABLE)
2081 {
2082 /* Try again to allocate skb */
2083 skb = dev_alloc_skb(tp->MaxPacketSize);
2084 if(skb == NULL)
2085 {
2086 /* Update Stats ?? */
2087 }
2088 else
2089 {
2090 skb_put(skb, tp->MaxPacketSize);
2091 rpl->SkbStat = SKB_DATA_COPY;
2092 ReceiveDataPtr = rpl->MData;
2093 }
2094 }
2095
2096 if(skb && (rpl->SkbStat == SKB_DATA_COPY ||
2097 rpl->SkbStat == SKB_DMA_DIRECT))
2098 {
2099 if(rpl->SkbStat == SKB_DATA_COPY)
2100 skb_copy_to_linear_data(skb, ReceiveDataPtr,
2101 Length);
2102
2103 /* Deliver frame to system */
2104 rpl->Skb = NULL;
2105 skb_trim(skb,Length);
2106 skb->protocol = tr_type_trans(skb,dev);
2107 netif_rx(skb);
2108 }
2109 }
2110 else /* Invalid frame */
2111 {
2112 if(rpl->Skb != NULL)
2113 dev_kfree_skb_irq(rpl->Skb);
2114
2115 /* Skip list. */
2116 if(rpl->Status & RX_START_FRAME)
2117 /* Frame start bit is set -> overflow. */
2118 tp->MacStat.rx_errors++;
2119 }
2120 if (rpl->DMABuff)
2121 dma_unmap_single(tp->pdev, rpl->DMABuff, tp->MaxPacketSize, DMA_TO_DEVICE);
2122 rpl->DMABuff = 0;
2123
2124 /* Allocate new skb for rpl */
2125 rpl->Skb = dev_alloc_skb(tp->MaxPacketSize);
2126 /* skb == NULL ? then use local buffer */
2127 if(rpl->Skb == NULL)
2128 {
2129 rpl->SkbStat = SKB_UNAVAILABLE;
2130 rpl->FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[rpl->RPLIndex] - (char *)tp) + tp->dmabuffer);
2131 rpl->MData = tp->LocalRxBuffers[rpl->RPLIndex];
2132 }
2133 else /* skb != NULL */
2134 {
2135 rpl->Skb->dev = dev;
2136 skb_put(rpl->Skb, tp->MaxPacketSize);
2137
2138 /* Data unreachable for DMA ? then use local buffer */
2139 dmabuf = dma_map_single(tp->pdev, rpl->Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE);
2140 if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit))
2141 {
2142 rpl->SkbStat = SKB_DATA_COPY;
2143 rpl->FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[rpl->RPLIndex] - (char *)tp) + tp->dmabuffer);
2144 rpl->MData = tp->LocalRxBuffers[rpl->RPLIndex];
2145 }
2146 else
2147 {
2148 /* DMA directly in skb->data */
2149 rpl->SkbStat = SKB_DMA_DIRECT;
2150 rpl->FragList[0].DataAddr = htonl(dmabuf);
2151 rpl->MData = rpl->Skb->data;
2152 rpl->DMABuff = dmabuf;
2153 }
2154 }
2155
2156 rpl->FragList[0].DataCount = cpu_to_be16((unsigned short)tp->MaxPacketSize);
2157 rpl->FrameSize = 0;
2158
2159 /* Pass the last RPL back to the adapter */
2160 tp->RplTail->FrameSize = 0;
2161
2162 /* Reset the CSTAT field in the list. */
2163 tms380tr_write_rpl_status(tp->RplTail, RX_VALID | RX_FRAME_IRQ);
2164
2165 /* Current RPL becomes last one in list. */
2166 tp->RplTail = tp->RplTail->NextRPLPtr;
2167
2168 /* Inform adapter about RPL valid. */
2169 tms380tr_exec_sifcmd(dev, CMD_RX_VALID);
2170 }
2171}
2172
2173/*
2174 * This function should be used whenever the status of any RPL must be
2175 * modified by the driver, because the compiler may otherwise change the
2176 * order of instructions such that writing the RPL status may be executed
2177 * at an undesirable time. When this function is used, the status is
2178 * always written when the function is called.
2179 */
2180static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status)
2181{
2182 rpl->Status = Status;
2183}
2184
2185/*
2186 * The function updates the statistic counters in mac->MacStat.
2187 * It differtiates between directed and broadcast/multicast ( ==functional)
2188 * frames.
2189 */
2190static void tms380tr_update_rcv_stats(struct net_local *tp, unsigned char DataPtr[],
2191 unsigned int Length)
2192{
2193 tp->MacStat.rx_packets++;
2194 tp->MacStat.rx_bytes += Length;
2195
2196 /* Test functional bit */
2197 if(DataPtr[2] & GROUP_BIT)
2198 tp->MacStat.multicast++;
2199}
2200
2201static int tms380tr_set_mac_address(struct net_device *dev, void *addr)
2202{
2203 struct net_local *tp = netdev_priv(dev);
2204 struct sockaddr *saddr = addr;
2205
2206 if (tp->AdapterOpenFlag || tp->AdapterVirtOpenFlag) {
2207 printk(KERN_WARNING "%s: Cannot set MAC/LAA address while card is open\n", dev->name);
2208 return -EIO;
2209 }
2210 memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
2211 return 0;
2212}
2213
2214#if TMS380TR_DEBUG > 0
2215/*
2216 * Dump Packet (data)
2217 */
2218static void tms380tr_dump(unsigned char *Data, int length)
2219{
2220 int i, j;
2221
2222 for (i = 0, j = 0; i < length / 8; i++, j += 8)
2223 {
2224 printk(KERN_DEBUG "%02x %02x %02x %02x %02x %02x %02x %02x\n",
2225 Data[j+0],Data[j+1],Data[j+2],Data[j+3],
2226 Data[j+4],Data[j+5],Data[j+6],Data[j+7]);
2227 }
2228}
2229#endif
2230
2231void tmsdev_term(struct net_device *dev)
2232{
2233 struct net_local *tp;
2234
2235 tp = netdev_priv(dev);
2236 dma_unmap_single(tp->pdev, tp->dmabuffer, sizeof(struct net_local),
2237 DMA_BIDIRECTIONAL);
2238}
2239
2240const struct net_device_ops tms380tr_netdev_ops = {
2241 .ndo_open = tms380tr_open,
2242 .ndo_stop = tms380tr_close,
2243 .ndo_start_xmit = tms380tr_send_packet,
2244 .ndo_tx_timeout = tms380tr_timeout,
2245 .ndo_get_stats = tms380tr_get_stats,
2246 .ndo_set_rx_mode = tms380tr_set_multicast_list,
2247 .ndo_set_mac_address = tms380tr_set_mac_address,
2248};
2249EXPORT_SYMBOL(tms380tr_netdev_ops);
2250
2251int tmsdev_init(struct net_device *dev, struct device *pdev)
2252{
2253 struct net_local *tms_local;
2254
2255 memset(netdev_priv(dev), 0, sizeof(struct net_local));
2256 tms_local = netdev_priv(dev);
2257 init_waitqueue_head(&tms_local->wait_for_tok_int);
2258 if (pdev->dma_mask)
2259 tms_local->dmalimit = *pdev->dma_mask;
2260 else
2261 return -ENOMEM;
2262 tms_local->pdev = pdev;
2263 tms_local->dmabuffer = dma_map_single(pdev, (void *)tms_local,
2264 sizeof(struct net_local), DMA_BIDIRECTIONAL);
2265 if (tms_local->dmabuffer + sizeof(struct net_local) >
2266 tms_local->dmalimit)
2267 {
2268 printk(KERN_INFO "%s: Memory not accessible for DMA\n",
2269 dev->name);
2270 tmsdev_term(dev);
2271 return -ENOMEM;
2272 }
2273
2274 dev->netdev_ops = &tms380tr_netdev_ops;
2275 dev->watchdog_timeo = HZ;
2276
2277 return 0;
2278}
2279
2280EXPORT_SYMBOL(tms380tr_open);
2281EXPORT_SYMBOL(tms380tr_close);
2282EXPORT_SYMBOL(tms380tr_interrupt);
2283EXPORT_SYMBOL(tmsdev_init);
2284EXPORT_SYMBOL(tmsdev_term);
2285EXPORT_SYMBOL(tms380tr_wait);
2286
2287#ifdef MODULE
2288
2289static struct module *TMS380_module = NULL;
2290
2291int init_module(void)
2292{
2293 printk(KERN_DEBUG "%s", version);
2294
2295 TMS380_module = &__this_module;
2296 return 0;
2297}
2298
2299void cleanup_module(void)
2300{
2301 TMS380_module = NULL;
2302}
2303#endif
2304
2305MODULE_LICENSE("GPL");
2306
diff --git a/drivers/net/tokenring/tms380tr.h b/drivers/net/tokenring/tms380tr.h
deleted file mode 100644
index e5a617c586c2..000000000000
--- a/drivers/net/tokenring/tms380tr.h
+++ /dev/null
@@ -1,1141 +0,0 @@
1/*
2 * tms380tr.h: TI TMS380 Token Ring driver for Linux
3 *
4 * Authors:
5 * - Christoph Goos <cgoos@syskonnect.de>
6 * - Adam Fritzler
7 */
8
9#ifndef __LINUX_TMS380TR_H
10#define __LINUX_TMS380TR_H
11
12#ifdef __KERNEL__
13
14#include <linux/interrupt.h>
15
16/* module prototypes */
17extern const struct net_device_ops tms380tr_netdev_ops;
18int tms380tr_open(struct net_device *dev);
19int tms380tr_close(struct net_device *dev);
20irqreturn_t tms380tr_interrupt(int irq, void *dev_id);
21int tmsdev_init(struct net_device *dev, struct device *pdev);
22void tmsdev_term(struct net_device *dev);
23void tms380tr_wait(unsigned long time);
24
25#define TMS380TR_MAX_ADAPTERS 7
26
27#define SEND_TIMEOUT 10*HZ
28
29#define TR_RCF_LONGEST_FRAME_MASK 0x0070
30#define TR_RCF_FRAME4K 0x0030
31
32/*------------------------------------------------------------------*/
33/* Bit order for adapter communication with DMA */
34/* -------------------------------------------------------------- */
35/* Bit 8 | 9| 10| 11|| 12| 13| 14| 15|| 0| 1| 2| 3|| 4| 5| 6| 7| */
36/* -------------------------------------------------------------- */
37/* The bytes in a word must be byte swapped. Also, if a double */
38/* word is used for storage, then the words, as well as the bytes, */
39/* must be swapped. */
40/* Bit order for adapter communication with DIO */
41/* -------------------------------------------------------------- */
42/* Bit 0 | 1| 2| 3|| 4| 5| 6| 7|| 8| 9| 10| 11|| 12| 13| 14| 15| */
43/* -------------------------------------------------------------- */
44/*------------------------------------------------------------------*/
45
46/* Swap words of a long. */
47#define SWAPW(x) (((x) << 16) | ((x) >> 16))
48
49/* Get the low byte of a word. */
50#define LOBYTE(w) ((unsigned char)(w))
51
52/* Get the high byte of a word. */
53#define HIBYTE(w) ((unsigned char)((unsigned short)(w) >> 8))
54
55/* Get the low word of a long. */
56#define LOWORD(l) ((unsigned short)(l))
57
58/* Get the high word of a long. */
59#define HIWORD(l) ((unsigned short)((unsigned long)(l) >> 16))
60
61
62
63/* Token ring adapter I/O addresses for normal mode. */
64
65/*
66 * The SIF registers. Common to all adapters.
67 */
68/* Basic SIF (SRSX = 0) */
69#define SIFDAT 0x00 /* SIF/DMA data. */
70#define SIFINC 0x02 /* IO Word data with auto increment. */
71#define SIFINH 0x03 /* IO Byte data with auto increment. */
72#define SIFADR 0x04 /* SIF/DMA Address. */
73#define SIFCMD 0x06 /* SIF Command. */
74#define SIFSTS 0x06 /* SIF Status. */
75
76/* "Extended" SIF (SRSX = 1) */
77#define SIFACL 0x08 /* SIF Adapter Control Register. */
78#define SIFADD 0x0a /* SIF/DMA Address. -- 0x0a */
79#define SIFADX 0x0c /* 0x0c */
80#define DMALEN 0x0e /* SIF DMA length. -- 0x0e */
81
82/*
83 * POS Registers. Only for ISA Adapters.
84 */
85#define POSREG 0x10 /* Adapter Program Option Select (POS)
86 * Register: base IO address + 16 byte.
87 */
88#define POSREG_2 24L /* only for TR4/16+ adapter
89 * base IO address + 24 byte. -- 0x18
90 */
91
92/* SIFCMD command codes (high-low) */
93#define CMD_INTERRUPT_ADAPTER 0x8000 /* Cause internal adapter interrupt */
94#define CMD_ADAPTER_RESET 0x4000 /* Hardware reset of adapter */
95#define CMD_SSB_CLEAR 0x2000 /* Acknowledge to adapter to
96 * system interrupts.
97 */
98#define CMD_EXECUTE 0x1000 /* Execute SCB command */
99#define CMD_SCB_REQUEST 0x0800 /* Request adapter to interrupt
100 * system when SCB is available for
101 * another command.
102 */
103#define CMD_RX_CONTINUE 0x0400 /* Continue receive after odd pointer
104 * stop. (odd pointer receive method)
105 */
106#define CMD_RX_VALID 0x0200 /* Now actual RPL is valid. */
107#define CMD_TX_VALID 0x0100 /* Now actual TPL is valid. (valid
108 * bit receive/transmit method)
109 */
110#define CMD_SYSTEM_IRQ 0x0080 /* Adapter-to-attached-system
111 * interrupt is reset.
112 */
113#define CMD_CLEAR_SYSTEM_IRQ 0x0080 /* Clear SYSTEM_INTERRUPT bit.
114 * (write: 1=ignore, 0=reset)
115 */
116#define EXEC_SOFT_RESET 0xFF00 /* adapter soft reset. (restart
117 * adapter after hardware reset)
118 */
119
120
121/* ACL commands (high-low) */
122#define ACL_SWHLDA 0x0800 /* Software hold acknowledge. */
123#define ACL_SWDDIR 0x0400 /* Data transfer direction. */
124#define ACL_SWHRQ 0x0200 /* Pseudo DMA operation. */
125#define ACL_PSDMAEN 0x0100 /* Enable pseudo system DMA. */
126#define ACL_ARESET 0x0080 /* Adapter hardware reset command.
127 * (held in reset condition as
128 * long as bit is set)
129 */
130#define ACL_CPHALT 0x0040 /* Communication processor halt.
131 * (can only be set while ACL_ARESET
132 * bit is set; prevents adapter
133 * processor from executing code while
134 * downloading firmware)
135 */
136#define ACL_BOOT 0x0020
137#define ACL_SINTEN 0x0008 /* System interrupt enable/disable
138 * (1/0): can be written if ACL_ARESET
139 * is zero.
140 */
141#define ACL_PEN 0x0004
142
143#define ACL_NSELOUT0 0x0002
144#define ACL_NSELOUT1 0x0001 /* NSELOUTx have a card-specific
145 * meaning for setting ring speed.
146 */
147
148#define PS_DMA_MASK (ACL_SWHRQ | ACL_PSDMAEN)
149
150
151/* SIFSTS register return codes (high-low) */
152#define STS_SYSTEM_IRQ 0x0080 /* Adapter-to-attached-system
153 * interrupt is valid.
154 */
155#define STS_INITIALIZE 0x0040 /* INITIALIZE status. (ready to
156 * initialize)
157 */
158#define STS_TEST 0x0020 /* TEST status. (BUD not completed) */
159#define STS_ERROR 0x0010 /* ERROR status. (unrecoverable
160 * HW error occurred)
161 */
162#define STS_MASK 0x00F0 /* Mask interesting status bits. */
163#define STS_ERROR_MASK 0x000F /* Get Error Code by masking the
164 * interrupt code bits.
165 */
166#define ADAPTER_INT_PTRS 0x0A00 /* Address offset of adapter internal
167 * pointers 01:0a00 (high-low) have to
168 * be read after init and before open.
169 */
170
171
172/* Interrupt Codes (only MAC IRQs) */
173#define STS_IRQ_ADAPTER_CHECK 0x0000 /* unrecoverable hardware or
174 * software error.
175 */
176#define STS_IRQ_RING_STATUS 0x0004 /* SSB is updated with ring status. */
177#define STS_IRQ_LLC_STATUS 0x0005 /* Not used in MAC-only microcode */
178#define STS_IRQ_SCB_CLEAR 0x0006 /* SCB clear, following an
179 * SCB_REQUEST IRQ.
180 */
181#define STS_IRQ_TIMER 0x0007 /* Not normally used in MAC ucode */
182#define STS_IRQ_COMMAND_STATUS 0x0008 /* SSB is updated with command
183 * status.
184 */
185#define STS_IRQ_RECEIVE_STATUS 0x000A /* SSB is updated with receive
186 * status.
187 */
188#define STS_IRQ_TRANSMIT_STATUS 0x000C /* SSB is updated with transmit
189 * status
190 */
191#define STS_IRQ_RECEIVE_PENDING 0x000E /* Not used in MAC-only microcode */
192#define STS_IRQ_MASK 0x000F /* = STS_ERROR_MASK. */
193
194
195/* TRANSMIT_STATUS completion code: (SSB.Parm[0]) */
196#define COMMAND_COMPLETE 0x0080 /* TRANSMIT command completed
197 * (avoid this!) issue another transmit
198 * to send additional frames.
199 */
200#define FRAME_COMPLETE 0x0040 /* Frame has been transmitted;
201 * INTERRUPT_FRAME bit was set in the
202 * CSTAT request; indication of possibly
203 * more than one frame transmissions!
204 * SSB.Parm[0-1]: 32 bit pointer to
205 * TPL of last frame.
206 */
207#define LIST_ERROR 0x0020 /* Error in one of the TPLs that
208 * compose the frame; TRANSMIT
209 * terminated; Parm[1-2]: 32bit pointer
210 * to TPL which starts the error
211 * frame; error details in bits 8-13.
212 * (14?)
213 */
214#define FRAME_SIZE_ERROR 0x8000 /* FRAME_SIZE does not equal the sum of
215 * the valid DATA_COUNT fields;
216 * FRAME_SIZE less than header plus
217 * information field. (15 bytes +
218 * routing field) Or if FRAME_SIZE
219 * was specified as zero in one list.
220 */
221#define TX_THRESHOLD 0x4000 /* FRAME_SIZE greater than (BUFFER_SIZE
222 * - 9) * TX_BUF_MAX.
223 */
224#define ODD_ADDRESS 0x2000 /* Odd forward pointer value is
225 * read on a list without END_FRAME
226 * indication.
227 */
228#define FRAME_ERROR 0x1000 /* START_FRAME bit (not) anticipated,
229 * but (not) set.
230 */
231#define ACCESS_PRIORITY_ERROR 0x0800 /* Access priority requested has not
232 * been allowed.
233 */
234#define UNENABLED_MAC_FRAME 0x0400 /* MAC frame has source class of zero
235 * or MAC frame PCF ATTN field is
236 * greater than one.
237 */
238#define ILLEGAL_FRAME_FORMAT 0x0200 /* Bit 0 or FC field was set to one. */
239
240
241/*
242 * Since we need to support some functions even if the adapter is in a
243 * CLOSED state, we have a (pseudo-) command queue which holds commands
244 * that are outstandig to be executed.
245 *
246 * Each time a command completes, an interrupt occurs and the next
247 * command is executed. The command queue is actually a simple word with
248 * a bit for each outstandig command. Therefore the commands will not be
249 * executed in the order they have been queued.
250 *
251 * The following defines the command code bits and the command queue:
252 */
253#define OC_OPEN 0x0001 /* OPEN command */
254#define OC_TRANSMIT 0x0002 /* TRANSMIT command */
255#define OC_TRANSMIT_HALT 0x0004 /* TRANSMIT_HALT command */
256#define OC_RECEIVE 0x0008 /* RECEIVE command */
257#define OC_CLOSE 0x0010 /* CLOSE command */
258#define OC_SET_GROUP_ADDR 0x0020 /* SET_GROUP_ADDR command */
259#define OC_SET_FUNCT_ADDR 0x0040 /* SET_FUNCT_ADDR command */
260#define OC_READ_ERROR_LOG 0x0080 /* READ_ERROR_LOG command */
261#define OC_READ_ADAPTER 0x0100 /* READ_ADAPTER command */
262#define OC_MODIFY_OPEN_PARMS 0x0400 /* MODIFY_OPEN_PARMS command */
263#define OC_RESTORE_OPEN_PARMS 0x0800 /* RESTORE_OPEN_PARMS command */
264#define OC_SET_FIRST_16_GROUP 0x1000 /* SET_FIRST_16_GROUP command */
265#define OC_SET_BRIDGE_PARMS 0x2000 /* SET_BRIDGE_PARMS command */
266#define OC_CONFIG_BRIDGE_PARMS 0x4000 /* CONFIG_BRIDGE_PARMS command */
267
268#define OPEN 0x0300 /* C: open command. S: completion. */
269#define TRANSMIT 0x0400 /* C: transmit command. S: completion
270 * status. (reject: COMMAND_REJECT if
271 * adapter not opened, TRANSMIT already
272 * issued or address passed in the SCB
273 * not word aligned)
274 */
275#define TRANSMIT_HALT 0x0500 /* C: interrupt TX TPL chain; if no
276 * TRANSMIT command issued, the command
277 * is ignored (completion with TRANSMIT
278 * status (0x0400)!)
279 */
280#define RECEIVE 0x0600 /* C: receive command. S: completion
281 * status. (reject: COMMAND_REJECT if
282 * adapter not opened, RECEIVE already
283 * issued or address passed in the SCB
284 * not word aligned)
285 */
286#define CLOSE 0x0700 /* C: close adapter. S: completion.
287 * (COMMAND_REJECT if adapter not open)
288 */
289#define SET_GROUP_ADDR 0x0800 /* C: alter adapter group address after
290 * OPEN. S: completion. (COMMAND_REJECT
291 * if adapter not open)
292 */
293#define SET_FUNCT_ADDR 0x0900 /* C: alter adapter functional address
294 * after OPEN. S: completion.
295 * (COMMAND_REJECT if adapter not open)
296 */
297#define READ_ERROR_LOG 0x0A00 /* C: read adapter error counters.
298 * S: completion. (command ignored
299 * if adapter not open!)
300 */
301#define READ_ADAPTER 0x0B00 /* C: read data from adapter memory.
302 * (important: after init and before
303 * open!) S: completion. (ADAPTER_CHECK
304 * interrupt if undefined storage area
305 * read)
306 */
307#define MODIFY_OPEN_PARMS 0x0D00 /* C: modify some adapter operational
308 * parameters. (bit correspondend to
309 * WRAP_INTERFACE is ignored)
310 * S: completion. (reject:
311 * COMMAND_REJECT)
312 */
313#define RESTORE_OPEN_PARMS 0x0E00 /* C: modify some adapter operational
314 * parameters. (bit correspondend
315 * to WRAP_INTERFACE is ignored)
316 * S: completion. (reject:
317 * COMMAND_REJECT)
318 */
319#define SET_FIRST_16_GROUP 0x0F00 /* C: alter the first two bytes in
320 * adapter group address.
321 * S: completion. (reject:
322 * COMMAND_REJECT)
323 */
324#define SET_BRIDGE_PARMS 0x1000 /* C: values and conditions for the
325 * adapter hardware to use when frames
326 * are copied for forwarding.
327 * S: completion. (reject:
328 * COMMAND_REJECT)
329 */
330#define CONFIG_BRIDGE_PARMS 0x1100 /* C: ..
331 * S: completion. (reject:
332 * COMMAND_REJECT)
333 */
334
335#define SPEED_4 4
336#define SPEED_16 16 /* Default transmission speed */
337
338
339/* Initialization Parameter Block (IPB); word alignment necessary! */
340#define BURST_SIZE 0x0018 /* Default burst size */
341#define BURST_MODE 0x9F00 /* Burst mode enable */
342#define DMA_RETRIES 0x0505 /* Magic DMA retry number... */
343
344#define CYCLE_TIME 3 /* Default AT-bus cycle time: 500 ns
345 * (later adapter version: fix cycle time!)
346 */
347#define LINE_SPEED_BIT 0x80
348
349/* Macro definition for the wait function. */
350#define ONE_SECOND_TICKS 1000000
351#define HALF_SECOND (ONE_SECOND_TICKS / 2)
352#define ONE_SECOND (ONE_SECOND_TICKS)
353#define TWO_SECONDS (ONE_SECOND_TICKS * 2)
354#define THREE_SECONDS (ONE_SECOND_TICKS * 3)
355#define FOUR_SECONDS (ONE_SECOND_TICKS * 4)
356#define FIVE_SECONDS (ONE_SECOND_TICKS * 5)
357
358#define BUFFER_SIZE 2048 /* Buffers on Adapter */
359
360#pragma pack(1)
361typedef struct {
362 unsigned short Init_Options; /* Initialize with burst mode;
363 * LLC disabled. (MAC only)
364 */
365
366 /* Interrupt vectors the adapter places on attached system bus. */
367 u_int8_t CMD_Status_IV; /* Interrupt vector: command status. */
368 u_int8_t TX_IV; /* Interrupt vector: transmit. */
369 u_int8_t RX_IV; /* Interrupt vector: receive. */
370 u_int8_t Ring_Status_IV; /* Interrupt vector: ring status. */
371 u_int8_t SCB_Clear_IV; /* Interrupt vector: SCB clear. */
372 u_int8_t Adapter_CHK_IV; /* Interrupt vector: adapter check. */
373
374 u_int16_t RX_Burst_Size; /* Max. number of transfer cycles. */
375 u_int16_t TX_Burst_Size; /* During DMA burst; even value! */
376 u_int16_t DMA_Abort_Thrhld; /* Number of DMA retries. */
377
378 u_int32_t SCB_Addr; /* SCB address: even, word aligned, high-low */
379 u_int32_t SSB_Addr; /* SSB address: even, word aligned, high-low */
380} IPB, *IPB_Ptr;
381#pragma pack()
382
383/*
384 * OPEN Command Parameter List (OCPL) (can be reused, if the adapter has to
385 * be reopened)
386 */
387#define BUFFER_SIZE 2048 /* Buffers on Adapter. */
388#define TPL_SIZE 8+6*TX_FRAG_NUM /* Depending on fragments per TPL. */
389#define RPL_SIZE 14 /* (with TI firmware v2.26 handling
390 * up to nine fragments possible)
391 */
392#define TX_BUF_MIN 20 /* ??? (Stephan: calculation with */
393#define TX_BUF_MAX 40 /* BUFFER_SIZE and MAX_FRAME_SIZE) ???
394 */
395#define DISABLE_EARLY_TOKEN_RELEASE 0x1000
396
397/* OPEN Options (high-low) */
398#define WRAP_INTERFACE 0x0080 /* Inserting omitted for test
399 * purposes; transmit data appears
400 * as receive data. (useful for
401 * testing; change: CLOSE necessary)
402 */
403#define DISABLE_HARD_ERROR 0x0040 /* On HARD_ERROR & TRANSMIT_BEACON
404 * no RING.STATUS interrupt.
405 */
406#define DISABLE_SOFT_ERROR 0x0020 /* On SOFT_ERROR, no RING.STATUS
407 * interrupt.
408 */
409#define PASS_ADAPTER_MAC_FRAMES 0x0010 /* Passing unsupported MAC frames
410 * to system.
411 */
412#define PASS_ATTENTION_FRAMES 0x0008 /* All changed attention MAC frames are
413 * passed to the system.
414 */
415#define PAD_ROUTING_FIELD 0x0004 /* Routing field is padded to 18
416 * bytes.
417 */
418#define FRAME_HOLD 0x0002 /*Adapter waits for entire frame before
419 * initiating DMA transfer; otherwise:
420 * DMA transfer initiation if internal
421 * buffer filled.
422 */
423#define CONTENDER 0x0001 /* Adapter participates in the monitor
424 * contention process.
425 */
426#define PASS_BEACON_MAC_FRAMES 0x8000 /* Adapter passes beacon MAC frames
427 * to the system.
428 */
429#define EARLY_TOKEN_RELEASE 0x1000 /* Only valid in 16 Mbps operation;
430 * 0 = ETR. (no effect in 4 Mbps
431 * operation)
432 */
433#define COPY_ALL_MAC_FRAMES 0x0400 /* All MAC frames are copied to
434 * the system. (after OPEN: duplicate
435 * address test (DAT) MAC frame is
436 * first received frame copied to the
437 * system)
438 */
439#define COPY_ALL_NON_MAC_FRAMES 0x0200 /* All non MAC frames are copied to
440 * the system.
441 */
442#define PASS_FIRST_BUF_ONLY 0x0100 /* Passes only first internal buffer
443 * of each received frame; FrameSize
444 * of RPLs must contain internal
445 * BUFFER_SIZE bits for promiscuous mode.
446 */
447#define ENABLE_FULL_DUPLEX_SELECTION 0x2000
448 /* Enable the use of full-duplex
449 * settings with bits in byte 22 in
450 * ocpl. (new feature in firmware
451 * version 3.09)
452 */
453
454/* Full-duplex settings */
455#define OPEN_FULL_DUPLEX_OFF 0x0000
456#define OPEN_FULL_DUPLEX_ON 0x00c0
457#define OPEN_FULL_DUPLEX_AUTO 0x0080
458
459#define PROD_ID_SIZE 18 /* Length of product ID. */
460
461#define TX_FRAG_NUM 3 /* Number of fragments used in one TPL. */
462#define TX_MORE_FRAGMENTS 0x8000 /* Bit set in DataCount to indicate more
463 * fragments following.
464 */
465
466/* XXX is there some better way to do this? */
467#define ISA_MAX_ADDRESS 0x00ffffff
468#define PCI_MAX_ADDRESS 0xffffffff
469
470#pragma pack(1)
471typedef struct {
472 u_int16_t OPENOptions;
473 u_int8_t NodeAddr[6]; /* Adapter node address; use ROM
474 * address
475 */
476 u_int32_t GroupAddr; /* Multicast: high order
477 * bytes = 0xC000
478 */
479 u_int32_t FunctAddr; /* High order bytes = 0xC000 */
480 __be16 RxListSize; /* RPL size: 0 (=26), 14, 20 or
481 * 26 bytes read by the adapter.
482 * (Depending on the number of
483 * fragments/list)
484 */
485 __be16 TxListSize; /* TPL size */
486 __be16 BufSize; /* Is automatically rounded up to the
487 * nearest nK boundary.
488 */
489 u_int16_t FullDuplex;
490 u_int16_t Reserved;
491 u_int8_t TXBufMin; /* Number of adapter buffers reserved
492 * for transmission a minimum of 2
493 * buffers must be allocated.
494 */
495 u_int8_t TXBufMax; /* Maximum number of adapter buffers
496 * for transmit; a minimum of 2 buffers
497 * must be available for receive.
498 * Default: 6
499 */
500 u_int16_t ProdIDAddr[2];/* Pointer to product ID. */
501} OPB, *OPB_Ptr;
502#pragma pack()
503
504/*
505 * SCB: adapter commands enabled by the host system started by writing
506 * CMD_INTERRUPT_ADAPTER | CMD_EXECUTE (|SCB_REQUEST) to the SIFCMD IO
507 * register. (special case: | CMD_SYSTEM_IRQ for initialization)
508 */
509#pragma pack(1)
510typedef struct {
511 u_int16_t CMD; /* Command code */
512 u_int16_t Parm[2]; /* Pointer to Command Parameter Block */
513} SCB; /* System Command Block (32 bit physical address; big endian)*/
514#pragma pack()
515
516/*
517 * SSB: adapter command return status can be evaluated after COMMAND_STATUS
518 * adapter to system interrupt after reading SSB, the availability of the SSB
519 * has to be told the adapter by writing CMD_INTERRUPT_ADAPTER | CMD_SSB_CLEAR
520 * in the SIFCMD IO register.
521 */
522#pragma pack(1)
523typedef struct {
524 u_int16_t STS; /* Status code */
525 u_int16_t Parm[3]; /* Parameter or pointer to Status Parameter
526 * Block.
527 */
528} SSB; /* System Status Block (big endian - physical address) */
529#pragma pack()
530
531typedef struct {
532 unsigned short BurnedInAddrPtr; /* Pointer to adapter burned in
533 * address. (BIA)
534 */
535 unsigned short SoftwareLevelPtr;/* Pointer to software level data. */
536 unsigned short AdapterAddrPtr; /* Pointer to adapter addresses. */
537 unsigned short AdapterParmsPtr; /* Pointer to adapter parameters. */
538 unsigned short MACBufferPtr; /* Pointer to MAC buffer. (internal) */
539 unsigned short LLCCountersPtr; /* Pointer to LLC counters. */
540 unsigned short SpeedFlagPtr; /* Pointer to data rate flag.
541 * (4/16 Mbps)
542 */
543 unsigned short AdapterRAMPtr; /* Pointer to adapter RAM found. (KB) */
544} INTPTRS; /* Adapter internal pointers */
545
546#pragma pack(1)
547typedef struct {
548 u_int8_t Line_Error; /* Line error: code violation in
549 * frame or in a token, or FCS error.
550 */
551 u_int8_t Internal_Error; /* IBM specific. (Reserved_1) */
552 u_int8_t Burst_Error;
553 u_int8_t ARI_FCI_Error; /* ARI/FCI bit zero in AMP or
554 * SMP MAC frame.
555 */
556 u_int8_t AbortDelimeters; /* IBM specific. (Reserved_2) */
557 u_int8_t Reserved_3;
558 u_int8_t Lost_Frame_Error; /* Receive of end of transmitted
559 * frame failed.
560 */
561 u_int8_t Rx_Congest_Error; /* Adapter in repeat mode has not
562 * enough buffer space to copy incoming
563 * frame.
564 */
565 u_int8_t Frame_Copied_Error; /* ARI bit not zero in frame
566 * addressed to adapter.
567 */
568 u_int8_t Frequency_Error; /* IBM specific. (Reserved_4) */
569 u_int8_t Token_Error; /* (active only in monitor station) */
570 u_int8_t Reserved_5;
571 u_int8_t DMA_Bus_Error; /* DMA bus errors not exceeding the
572 * abort thresholds.
573 */
574 u_int8_t DMA_Parity_Error; /* DMA parity errors not exceeding
575 * the abort thresholds.
576 */
577} ERRORTAB; /* Adapter error counters */
578#pragma pack()
579
580
581/*--------------------- Send and Receive definitions -------------------*/
582#pragma pack(1)
583typedef struct {
584 __be16 DataCount; /* Value 0, even and odd values are
585 * permitted; value is unaltered most
586 * significant bit set: following
587 * fragments last fragment: most
588 * significant bit is not evaluated.
589 * (???)
590 */
591 __be32 DataAddr; /* Pointer to frame data fragment;
592 * even or odd.
593 */
594} Fragment;
595#pragma pack()
596
597#define MAX_FRAG_NUMBERS 9 /* Maximal number of fragments possible to use
598 * in one RPL/TPL. (depending on TI firmware
599 * version)
600 */
601
602/*
603 * AC (1), FC (1), Dst (6), Src (6), RIF (18), Data (4472) = 4504
604 * The packet size can be one of the follows: 548, 1502, 2084, 4504, 8176,
605 * 11439, 17832. Refer to TMS380 Second Generation Token Ring User's Guide
606 * Page 2-27.
607 */
608#define HEADER_SIZE (1 + 1 + 6 + 6)
609#define SRC_SIZE 18
610#define MIN_DATA_SIZE 516
611#define DEFAULT_DATA_SIZE 4472
612#define MAX_DATA_SIZE 17800
613
614#define DEFAULT_PACKET_SIZE (HEADER_SIZE + SRC_SIZE + DEFAULT_DATA_SIZE)
615#define MIN_PACKET_SIZE (HEADER_SIZE + SRC_SIZE + MIN_DATA_SIZE)
616#define MAX_PACKET_SIZE (HEADER_SIZE + SRC_SIZE + MAX_DATA_SIZE)
617
618/*
619 * Macros to deal with the frame status field.
620 */
621#define AC_NOT_RECOGNIZED 0x00
622#define GROUP_BIT 0x80
623#define GET_TRANSMIT_STATUS_HIGH_BYTE(Ts) ((unsigned char)((Ts) >> 8))
624#define GET_FRAME_STATUS_HIGH_AC(Fs) ((unsigned char)(((Fs) & 0xC0) >> 6))
625#define GET_FRAME_STATUS_LOW_AC(Fs) ((unsigned char)(((Fs) & 0x0C) >> 2))
626#define DIRECTED_FRAME(Context) (!((Context)->MData[2] & GROUP_BIT))
627
628
629/*--------------------- Send Functions ---------------------------------*/
630/* define TX_CSTAT _REQUEST (R) and _COMPLETE (C) values (high-low) */
631
632#define TX_VALID 0x0080 /* R: set via TRANSMIT.VALID interrupt.
633 * C: always reset to zero!
634 */
635#define TX_FRAME_COMPLETE 0x0040 /* R: must be reset to zero.
636 * C: set to one.
637 */
638#define TX_START_FRAME 0x0020 /* R: start of a frame: 1
639 * C: unchanged.
640 */
641#define TX_END_FRAME 0x0010 /* R: end of a frame: 1
642 * C: unchanged.
643 */
644#define TX_FRAME_IRQ 0x0008 /* R: request interrupt generation
645 * after transmission.
646 * C: unchanged.
647 */
648#define TX_ERROR 0x0004 /* R: reserved.
649 * C: set to one if Error occurred.
650 */
651#define TX_INTERFRAME_WAIT 0x0004
652#define TX_PASS_CRC 0x0002 /* R: set if CRC value is already
653 * calculated. (valid only in
654 * FRAME_START TPL)
655 * C: unchanged.
656 */
657#define TX_PASS_SRC_ADDR 0x0001 /* R: adapter uses explicit frame
658 * source address and does not overwrite
659 * with the adapter node address.
660 * (valid only in FRAME_START TPL)
661 *
662 * C: unchanged.
663 */
664#define TX_STRIP_FS 0xFF00 /* R: reserved.
665 * C: if no Transmission Error,
666 * field contains copy of FS byte after
667 * stripping of frame.
668 */
669
670/*
671 * Structure of Transmit Parameter Lists (TPLs) (only one frame every TPL,
672 * but possibly multiple TPLs for one frame) the length of the TPLs has to be
673 * initialized in the OPL. (OPEN parameter list)
674 */
675#define TPL_NUM 3 /* Number of Transmit Parameter Lists.
676 * !! MUST BE >= 3 !!
677 */
678
679#pragma pack(1)
680typedef struct s_TPL TPL;
681
682struct s_TPL { /* Transmit Parameter List (align on even word boundaries) */
683 __be32 NextTPLAddr; /* Pointer to next TPL in chain; if
684 * pointer is odd: this is the last
685 * TPL. Pointing to itself can cause
686 * problems!
687 */
688 volatile u_int16_t Status; /* Initialized by the adapter:
689 * CSTAT_REQUEST important: update least
690 * significant bit first! Set by the
691 * adapter: CSTAT_COMPLETE status.
692 */
693 __be16 FrameSize; /* Number of bytes to be transmitted
694 * as a frame including AC/FC,
695 * Destination, Source, Routing field
696 * not including CRC, FS, End Delimiter
697 * (valid only if START_FRAME bit in
698 * CSTAT nonzero) must not be zero in
699 * any list; maximum value: (BUFFER_SIZE
700 * - 8) * TX_BUF_MAX sum of DataCount
701 * values in FragmentList must equal
702 * Frame_Size value in START_FRAME TPL!
703 * frame data fragment list.
704 */
705
706 /* TPL/RPL size in OPEN parameter list depending on maximal
707 * numbers of fragments used in one parameter list.
708 */
709 Fragment FragList[TX_FRAG_NUM]; /* Maximum: nine frame fragments in one
710 * TPL actual version of firmware: 9
711 * fragments possible.
712 */
713#pragma pack()
714
715 /* Special proprietary data and precalculations */
716
717 TPL *NextTPLPtr; /* Pointer to next TPL in chain. */
718 unsigned char *MData;
719 struct sk_buff *Skb;
720 unsigned char TPLIndex;
721 volatile unsigned char BusyFlag;/* Flag: TPL busy? */
722 dma_addr_t DMABuff; /* DMA IO bus address from dma_map */
723};
724
725/* ---------------------Receive Functions-------------------------------*
726 * define RECEIVE_CSTAT_REQUEST (R) and RECEIVE_CSTAT_COMPLETE (C) values.
727 * (high-low)
728 */
729#define RX_VALID 0x0080 /* R: set; tell adapter with
730 * RECEIVE.VALID interrupt.
731 * C: reset to zero.
732 */
733#define RX_FRAME_COMPLETE 0x0040 /* R: must be reset to zero,
734 * C: set to one.
735 */
736#define RX_START_FRAME 0x0020 /* R: must be reset to zero.
737 * C: set to one on the list.
738 */
739#define RX_END_FRAME 0x0010 /* R: must be reset to zero.
740 * C: set to one on the list
741 * that ends the frame.
742 */
743#define RX_FRAME_IRQ 0x0008 /* R: request interrupt generation
744 * after receive.
745 * C: unchanged.
746 */
747#define RX_INTERFRAME_WAIT 0x0004 /* R: after receiving a frame:
748 * interrupt and wait for a
749 * RECEIVE.CONTINUE.
750 * C: unchanged.
751 */
752#define RX_PASS_CRC 0x0002 /* R: if set, the adapter includes
753 * the CRC in data passed. (last four
754 * bytes; valid only if FRAME_START is
755 * set)
756 * C: set, if CRC is included in
757 * received data.
758 */
759#define RX_PASS_SRC_ADDR 0x0001 /* R: adapter uses explicit frame
760 * source address and does not
761 * overwrite with the adapter node
762 * address. (valid only if FRAME_START
763 * is set)
764 * C: unchanged.
765 */
766#define RX_RECEIVE_FS 0xFC00 /* R: reserved; must be reset to zero.
767 * C: on lists with START_FRAME, field
768 * contains frame status field from
769 * received frame; otherwise cleared.
770 */
771#define RX_ADDR_MATCH 0x0300 /* R: reserved; must be reset to zero.
772 * C: address match code mask.
773 */
774#define RX_STATUS_MASK 0x00FF /* Mask for receive status bits. */
775
776#define RX_INTERN_ADDR_MATCH 0x0100 /* C: internally address match. */
777#define RX_EXTERN_ADDR_MATCH 0x0200 /* C: externally matched via
778 * XMATCH/XFAIL interface.
779 */
780#define RX_INTEXT_ADDR_MATCH 0x0300 /* C: internally and externally
781 * matched.
782 */
783#define RX_READY (RX_VALID | RX_FRAME_IRQ) /* Ready for receive. */
784
785/* Constants for Command Status Interrupt.
786 * COMMAND_REJECT status field bit functions (SSB.Parm[0])
787 */
788#define ILLEGAL_COMMAND 0x0080 /* Set if an unknown command
789 * is issued to the adapter
790 */
791#define ADDRESS_ERROR 0x0040 /* Set if any address field in
792 * the SCB is odd. (not word aligned)
793 */
794#define ADAPTER_OPEN 0x0020 /* Command issued illegal with
795 * open adapter.
796 */
797#define ADAPTER_CLOSE 0x0010 /* Command issued illegal with
798 * closed adapter.
799 */
800#define SAME_COMMAND 0x0008 /* Command issued with same command
801 * already executing.
802 */
803
804/* OPEN_COMPLETION values (SSB.Parm[0], MSB) */
805#define NODE_ADDR_ERROR 0x0040 /* Wrong address or BIA read
806 * zero address.
807 */
808#define LIST_SIZE_ERROR 0x0020 /* If List_Size value not in 0,
809 * 14, 20, 26.
810 */
811#define BUF_SIZE_ERROR 0x0010 /* Not enough available memory for
812 * two buffers.
813 */
814#define TX_BUF_COUNT_ERROR 0x0004 /* Remaining receive buffers less than
815 * two.
816 */
817#define OPEN_ERROR 0x0002 /* Error during ring insertion; more
818 * information in bits 8-15.
819 */
820
821/* Standard return codes */
822#define GOOD_COMPLETION 0x0080 /* =OPEN_SUCCESSFULL */
823#define INVALID_OPEN_OPTION 0x0001 /* OPEN options are not supported by
824 * the adapter.
825 */
826
827/* OPEN phases; details of OPEN_ERROR (SSB.Parm[0], LSB) */
828#define OPEN_PHASES_MASK 0xF000 /* Check only the bits 8-11. */
829#define LOBE_MEDIA_TEST 0x1000
830#define PHYSICAL_INSERTION 0x2000
831#define ADDRESS_VERIFICATION 0x3000
832#define PARTICIPATION_IN_RING_POLL 0x4000
833#define REQUEST_INITIALISATION 0x5000
834#define FULLDUPLEX_CHECK 0x6000
835
836/* OPEN error codes; details of OPEN_ERROR (SSB.Parm[0], LSB) */
837#define OPEN_ERROR_CODES_MASK 0x0F00 /* Check only the bits 12-15. */
838#define OPEN_FUNCTION_FAILURE 0x0100 /* Unable to transmit to itself or
839 * frames received before insertion.
840 */
841#define OPEN_SIGNAL_LOSS 0x0200 /* Signal loss condition detected at
842 * receiver.
843 */
844#define OPEN_TIMEOUT 0x0500 /* Insertion timer expired before
845 * logical insertion.
846 */
847#define OPEN_RING_FAILURE 0x0600 /* Unable to receive own ring purge
848 * MAC frames.
849 */
850#define OPEN_RING_BEACONING 0x0700 /* Beacon MAC frame received after
851 * ring insertion.
852 */
853#define OPEN_DUPLICATE_NODEADDR 0x0800 /* Other station in ring found
854 * with the same address.
855 */
856#define OPEN_REQUEST_INIT 0x0900 /* RPS present but does not respond. */
857#define OPEN_REMOVE_RECEIVED 0x0A00 /* Adapter received a remove adapter
858 * MAC frame.
859 */
860#define OPEN_FULLDUPLEX_SET 0x0D00 /* Got this with full duplex on when
861 * trying to connect to a normal ring.
862 */
863
864/* SET_BRIDGE_PARMS return codes: */
865#define BRIDGE_INVALID_MAX_LEN 0x4000 /* MAX_ROUTING_FIELD_LENGTH odd,
866 * less than 6 or > 30.
867 */
868#define BRIDGE_INVALID_SRC_RING 0x2000 /* SOURCE_RING number zero, too large
869 * or = TARGET_RING.
870 */
871#define BRIDGE_INVALID_TRG_RING 0x1000 /* TARGET_RING number zero, too large
872 * or = SOURCE_RING.
873 */
874#define BRIDGE_INVALID_BRDGE_NO 0x0800 /* BRIDGE_NUMBER too large. */
875#define BRIDGE_INVALID_OPTIONS 0x0400 /* Invalid bridge options. */
876#define BRIDGE_DIAGS_FAILED 0x0200 /* Diagnostics of TMS380SRA failed. */
877#define BRIDGE_NO_SRA 0x0100 /* The TMS380SRA does not exist in HW
878 * configuration.
879 */
880
881/*
882 * Bring Up Diagnostics error codes.
883 */
884#define BUD_INITIAL_ERROR 0x0
885#define BUD_CHECKSUM_ERROR 0x1
886#define BUD_ADAPTER_RAM_ERROR 0x2
887#define BUD_INSTRUCTION_ERROR 0x3
888#define BUD_CONTEXT_ERROR 0x4
889#define BUD_PROTOCOL_ERROR 0x5
890#define BUD_INTERFACE_ERROR 0x6
891
892/* BUD constants */
893#define BUD_MAX_RETRIES 3
894#define BUD_MAX_LOOPCNT 6
895#define BUD_TIMEOUT 3000
896
897/* Initialization constants */
898#define INIT_MAX_RETRIES 3 /* Maximum three retries. */
899#define INIT_MAX_LOOPCNT 22 /* Maximum loop counts. */
900
901/* RING STATUS field values (high/low) */
902#define SIGNAL_LOSS 0x0080 /* Loss of signal on the ring
903 * detected.
904 */
905#define HARD_ERROR 0x0040 /* Transmitting or receiving beacon
906 * frames.
907 */
908#define SOFT_ERROR 0x0020 /* Report error MAC frame
909 * transmitted.
910 */
911#define TRANSMIT_BEACON 0x0010 /* Transmitting beacon frames on the
912 * ring.
913 */
914#define LOBE_WIRE_FAULT 0x0008 /* Open or short circuit in the
915 * cable to concentrator; adapter
916 * closed.
917 */
918#define AUTO_REMOVAL_ERROR 0x0004 /* Lobe wrap test failed, deinserted;
919 * adapter closed.
920 */
921#define REMOVE_RECEIVED 0x0001 /* Received a remove ring station MAC
922 * MAC frame request; adapter closed.
923 */
924#define COUNTER_OVERFLOW 0x8000 /* Overflow of one of the adapters
925 * error counters; READ.ERROR.LOG.
926 */
927#define SINGLE_STATION 0x4000 /* Adapter is the only station on the
928 * ring.
929 */
930#define RING_RECOVERY 0x2000 /* Claim token MAC frames on the ring;
931 * reset after ring purge frame.
932 */
933
934#define ADAPTER_CLOSED (LOBE_WIRE_FAULT | AUTO_REMOVAL_ERROR |\
935 REMOVE_RECEIVED)
936
937/* Adapter_check_block.Status field bit assignments: */
938#define DIO_PARITY 0x8000 /* Adapter detects bad parity
939 * through direct I/O access.
940 */
941#define DMA_READ_ABORT 0x4000 /* Aborting DMA read operation
942 * from system Parm[0]: 0=timeout,
943 * 1=parity error, 2=bus error;
944 * Parm[1]: 32 bit pointer to host
945 * system address at failure.
946 */
947#define DMA_WRITE_ABORT 0x2000 /* Aborting DMA write operation
948 * to system. (parameters analogous to
949 * DMA_READ_ABORT)
950 */
951#define ILLEGAL_OP_CODE 0x1000 /* Illegal operation code in the
952 * the adapters firmware Parm[0]-2:
953 * communications processor registers
954 * R13-R15.
955 */
956#define PARITY_ERRORS 0x0800 /* Adapter detects internal bus
957 * parity error.
958 */
959#define RAM_DATA_ERROR 0x0080 /* Valid only during RAM testing;
960 * RAM data error Parm[0-1]: 32 bit
961 * pointer to RAM location.
962 */
963#define RAM_PARITY_ERROR 0x0040 /* Valid only during RAM testing;
964 * RAM parity error Parm[0-1]: 32 bit
965 * pointer to RAM location.
966 */
967#define RING_UNDERRUN 0x0020 /* Internal DMA underrun when
968 * transmitting onto ring.
969 */
970#define INVALID_IRQ 0x0008 /* Unrecognized interrupt generated
971 * internal to adapter Parm[0-2]:
972 * adapter register R13-R15.
973 */
974#define INVALID_ERROR_IRQ 0x0004 /* Unrecognized error interrupt
975 * generated Parm[0-2]: adapter register
976 * R13-R15.
977 */
978#define INVALID_XOP 0x0002 /* Unrecognized XOP request in
979 * communication processor Parm[0-2]:
980 * adapter register R13-R15.
981 */
982#define CHECKADDR 0x05E0 /* Adapter check status information
983 * address offset.
984 */
985#define ROM_PAGE_0 0x0000 /* Adapter ROM page 0. */
986
987/*
988 * RECEIVE.STATUS interrupt result SSB values: (high-low)
989 * (RECEIVE_COMPLETE field bit definitions in SSB.Parm[0])
990 */
991#define RX_COMPLETE 0x0080 /* SSB.Parm[0]; SSB.Parm[1]: 32
992 * bit pointer to last RPL.
993 */
994#define RX_SUSPENDED 0x0040 /* SSB.Parm[0]; SSB.Parm[1]: 32
995 * bit pointer to RPL with odd
996 * forward pointer.
997 */
998
999/* Valid receive CSTAT: */
1000#define RX_FRAME_CONTROL_BITS (RX_VALID | RX_START_FRAME | RX_END_FRAME | \
1001 RX_FRAME_COMPLETE)
1002#define VALID_SINGLE_BUFFER_FRAME (RX_START_FRAME | RX_END_FRAME | \
1003 RX_FRAME_COMPLETE)
1004
1005typedef enum SKB_STAT SKB_STAT;
1006enum SKB_STAT {
1007 SKB_UNAVAILABLE,
1008 SKB_DMA_DIRECT,
1009 SKB_DATA_COPY
1010};
1011
1012/* Receive Parameter List (RPL) The length of the RPLs has to be initialized
1013 * in the OPL. (OPEN parameter list)
1014 */
1015#define RPL_NUM 3
1016
1017#define RX_FRAG_NUM 1 /* Maximal number of used fragments in one RPL.
1018 * (up to firmware v2.24: 3, now: up to 9)
1019 */
1020
1021#pragma pack(1)
1022typedef struct s_RPL RPL;
1023struct s_RPL { /* Receive Parameter List */
1024 __be32 NextRPLAddr; /* Pointer to next RPL in chain
1025 * (normalized = physical 32 bit
1026 * address) if pointer is odd: this
1027 * is last RPL. Pointing to itself can
1028 * cause problems!
1029 */
1030 volatile u_int16_t Status; /* Set by creation of Receive Parameter
1031 * List RECEIVE_CSTAT_COMPLETE set by
1032 * adapter in lists that start or end
1033 * a frame.
1034 */
1035 volatile __be16 FrameSize; /* Number of bytes received as a
1036 * frame including AC/FC, Destination,
1037 * Source, Routing field not including
1038 * CRC, FS (Frame Status), End Delimiter
1039 * (valid only if START_FRAME bit in
1040 * CSTAT nonzero) must not be zero in
1041 * any list; maximum value: (BUFFER_SIZE
1042 * - 8) * TX_BUF_MAX sum of DataCount
1043 * values in FragmentList must equal
1044 * Frame_Size value in START_FRAME TPL!
1045 * frame data fragment list
1046 */
1047
1048 /* TPL/RPL size in OPEN parameter list depending on maximal numbers
1049 * of fragments used in one parameter list.
1050 */
1051 Fragment FragList[RX_FRAG_NUM]; /* Maximum: nine frame fragments in
1052 * one TPL. Actual version of firmware:
1053 * 9 fragments possible.
1054 */
1055#pragma pack()
1056
1057 /* Special proprietary data and precalculations. */
1058 RPL *NextRPLPtr; /* Logical pointer to next RPL in chain. */
1059 unsigned char *MData;
1060 struct sk_buff *Skb;
1061 SKB_STAT SkbStat;
1062 int RPLIndex;
1063 dma_addr_t DMABuff; /* DMA IO bus address from dma_map */
1064};
1065
1066/* Information that need to be kept for each board. */
1067typedef struct net_local {
1068#pragma pack(1)
1069 IPB ipb; /* Initialization Parameter Block. */
1070 SCB scb; /* System Command Block: system to adapter
1071 * communication.
1072 */
1073 SSB ssb; /* System Status Block: adapter to system
1074 * communication.
1075 */
1076 OPB ocpl; /* Open Options Parameter Block. */
1077
1078 ERRORTAB errorlogtable; /* Adapter statistic error counters.
1079 * (read from adapter memory)
1080 */
1081 unsigned char ProductID[PROD_ID_SIZE + 1]; /* Product ID */
1082#pragma pack()
1083
1084 TPL Tpl[TPL_NUM];
1085 TPL *TplFree;
1086 TPL *TplBusy;
1087 unsigned char LocalTxBuffers[TPL_NUM][DEFAULT_PACKET_SIZE];
1088
1089 RPL Rpl[RPL_NUM];
1090 RPL *RplHead;
1091 RPL *RplTail;
1092 unsigned char LocalRxBuffers[RPL_NUM][DEFAULT_PACKET_SIZE];
1093
1094 struct device *pdev;
1095 int DataRate;
1096 unsigned char ScbInUse;
1097 unsigned short CMDqueue;
1098
1099 unsigned long AdapterOpenFlag:1;
1100 unsigned long AdapterVirtOpenFlag:1;
1101 unsigned long OpenCommandIssued:1;
1102 unsigned long TransmitCommandActive:1;
1103 unsigned long TransmitHaltScheduled:1;
1104 unsigned long HaltInProgress:1;
1105 unsigned long LobeWireFaultLogged:1;
1106 unsigned long ReOpenInProgress:1;
1107 unsigned long Sleeping:1;
1108
1109 unsigned long LastOpenStatus;
1110 unsigned short CurrentRingStatus;
1111 unsigned long MaxPacketSize;
1112
1113 unsigned long StartTime;
1114 unsigned long LastSendTime;
1115
1116 struct tr_statistics MacStat; /* MAC statistics structure */
1117
1118 unsigned long dmalimit; /* the max DMA address (ie, ISA) */
1119 dma_addr_t dmabuffer; /* the DMA bus address corresponding to
1120 priv. Might be different from virt_to_bus()
1121 for architectures with IO MMU (Alpha) */
1122
1123 struct timer_list timer;
1124
1125 wait_queue_head_t wait_for_tok_int;
1126
1127 INTPTRS intptrs; /* Internal adapter pointer. Must be read
1128 * before OPEN command.
1129 */
1130 unsigned short (*setnselout)(struct net_device *);
1131 unsigned short (*sifreadb)(struct net_device *, unsigned short);
1132 void (*sifwriteb)(struct net_device *, unsigned short, unsigned short);
1133 unsigned short (*sifreadw)(struct net_device *, unsigned short);
1134 void (*sifwritew)(struct net_device *, unsigned short, unsigned short);
1135
1136 spinlock_t lock; /* SMP protection */
1137 void *tmspriv;
1138} NET_LOCAL;
1139
1140#endif /* __KERNEL__ */
1141#endif /* __LINUX_TMS380TR_H */
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
deleted file mode 100644
index fb9918da5792..000000000000
--- a/drivers/net/tokenring/tmspci.c
+++ /dev/null
@@ -1,248 +0,0 @@
1/*
2 * tmspci.c: A generic network driver for TMS380-based PCI token ring cards.
3 *
4 * Written 1999 by Adam Fritzler
5 *
6 * This software may be used and distributed according to the terms
7 * of the GNU General Public License, incorporated herein by reference.
8 *
9 * This driver module supports the following cards:
10 * - SysKonnect TR4/16(+) PCI (SK-4590)
11 * - SysKonnect TR4/16 PCI (SK-4591)
12 * - Compaq TR 4/16 PCI
13 * - Thomas-Conrad TC4048 4/16 PCI
14 * - 3Com 3C339 Token Link Velocity
15 *
16 * Maintainer(s):
17 * AF Adam Fritzler
18 *
19 * Modification History:
20 * 30-Dec-99 AF Split off from the tms380tr driver.
21 * 22-Jan-00 AF Updated to use indirect read/writes
22 * 23-Nov-00 JG New PCI API, cleanups
23 *
24 * TODO:
25 * 1. See if we can use MMIO instead of port accesses
26 *
27 */
28
29#include <linux/module.h>
30#include <linux/kernel.h>
31#include <linux/errno.h>
32#include <linux/pci.h>
33#include <linux/init.h>
34#include <linux/netdevice.h>
35#include <linux/trdevice.h>
36
37#include <asm/io.h>
38#include <asm/irq.h>
39
40#include "tms380tr.h"
41
42static char version[] __devinitdata =
43"tmspci.c: v1.02 23/11/2000 by Adam Fritzler\n";
44
45#define TMS_PCI_IO_EXTENT 32
46
47struct card_info {
48 unsigned char nselout[2]; /* NSELOUT vals for 4mb([0]) and 16mb([1]) */
49 char *name;
50};
51
52static struct card_info card_info_table[] = {
53 { {0x03, 0x01}, "Compaq 4/16 TR PCI"},
54 { {0x03, 0x01}, "SK NET TR 4/16 PCI"},
55 { {0x03, 0x01}, "Thomas-Conrad TC4048 PCI 4/16"},
56 { {0x03, 0x01}, "3Com Token Link Velocity"},
57};
58
59static DEFINE_PCI_DEVICE_TABLE(tmspci_pci_tbl) = {
60 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
61 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_TR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
62 { PCI_VENDOR_ID_TCONRAD, PCI_DEVICE_ID_TCONRAD_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
63 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C339, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
64 { } /* Terminating entry */
65};
66MODULE_DEVICE_TABLE(pci, tmspci_pci_tbl);
67
68MODULE_LICENSE("GPL");
69
70static void tms_pci_read_eeprom(struct net_device *dev);
71static unsigned short tms_pci_setnselout_pins(struct net_device *dev);
72
73static unsigned short tms_pci_sifreadb(struct net_device *dev, unsigned short reg)
74{
75 return inb(dev->base_addr + reg);
76}
77
78static unsigned short tms_pci_sifreadw(struct net_device *dev, unsigned short reg)
79{
80 return inw(dev->base_addr + reg);
81}
82
83static void tms_pci_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
84{
85 outb(val, dev->base_addr + reg);
86}
87
88static void tms_pci_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
89{
90 outw(val, dev->base_addr + reg);
91}
92
93static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_device_id *ent)
94{
95 static int versionprinted;
96 struct net_device *dev;
97 struct net_local *tp;
98 int ret;
99 unsigned int pci_irq_line;
100 unsigned long pci_ioaddr;
101 struct card_info *cardinfo = &card_info_table[ent->driver_data];
102
103 if (versionprinted++ == 0)
104 printk("%s", version);
105
106 if (pci_enable_device(pdev))
107 return -EIO;
108
109 /* Remove I/O space marker in bit 0. */
110 pci_irq_line = pdev->irq;
111 pci_ioaddr = pci_resource_start (pdev, 0);
112
113 /* At this point we have found a valid card. */
114 dev = alloc_trdev(sizeof(struct net_local));
115 if (!dev)
116 return -ENOMEM;
117
118 if (!request_region(pci_ioaddr, TMS_PCI_IO_EXTENT, dev->name)) {
119 ret = -EBUSY;
120 goto err_out_trdev;
121 }
122
123 dev->base_addr = pci_ioaddr;
124 dev->irq = pci_irq_line;
125 dev->dma = 0;
126
127 dev_info(&pdev->dev, "%s\n", cardinfo->name);
128 dev_info(&pdev->dev, " IO: %#4lx IRQ: %d\n", dev->base_addr, dev->irq);
129
130 tms_pci_read_eeprom(dev);
131
132 dev_info(&pdev->dev, " Ring Station Address: %pM\n", dev->dev_addr);
133
134 ret = tmsdev_init(dev, &pdev->dev);
135 if (ret) {
136 dev_info(&pdev->dev, "unable to get memory for dev->priv.\n");
137 goto err_out_region;
138 }
139
140 tp = netdev_priv(dev);
141 tp->setnselout = tms_pci_setnselout_pins;
142
143 tp->sifreadb = tms_pci_sifreadb;
144 tp->sifreadw = tms_pci_sifreadw;
145 tp->sifwriteb = tms_pci_sifwriteb;
146 tp->sifwritew = tms_pci_sifwritew;
147
148 memcpy(tp->ProductID, cardinfo->name, PROD_ID_SIZE + 1);
149
150 tp->tmspriv = cardinfo;
151
152 dev->netdev_ops = &tms380tr_netdev_ops;
153
154 ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED,
155 dev->name, dev);
156 if (ret)
157 goto err_out_tmsdev;
158
159 pci_set_drvdata(pdev, dev);
160 SET_NETDEV_DEV(dev, &pdev->dev);
161
162 ret = register_netdev(dev);
163 if (ret)
164 goto err_out_irq;
165
166 return 0;
167
168err_out_irq:
169 free_irq(pdev->irq, dev);
170err_out_tmsdev:
171 pci_set_drvdata(pdev, NULL);
172 tmsdev_term(dev);
173err_out_region:
174 release_region(pci_ioaddr, TMS_PCI_IO_EXTENT);
175err_out_trdev:
176 free_netdev(dev);
177 return ret;
178}
179
180/*
181 * Reads MAC address from adapter RAM, which should've read it from
182 * the onboard ROM.
183 *
184 * Calling this on a board that does not support it can be a very
185 * dangerous thing. The Madge board, for instance, will lock your
186 * machine hard when this is called. Luckily, its supported in a
187 * separate driver. --ASF
188 */
189static void tms_pci_read_eeprom(struct net_device *dev)
190{
191 int i;
192
193 /* Address: 0000:0000 */
194 tms_pci_sifwritew(dev, 0, SIFADX);
195 tms_pci_sifwritew(dev, 0, SIFADR);
196
197 /* Read six byte MAC address data */
198 dev->addr_len = 6;
199 for(i = 0; i < 6; i++)
200 dev->dev_addr[i] = tms_pci_sifreadw(dev, SIFINC) >> 8;
201}
202
203static unsigned short tms_pci_setnselout_pins(struct net_device *dev)
204{
205 unsigned short val = 0;
206 struct net_local *tp = netdev_priv(dev);
207 struct card_info *cardinfo = tp->tmspriv;
208
209 if(tp->DataRate == SPEED_4)
210 val |= cardinfo->nselout[0]; /* Set 4Mbps */
211 else
212 val |= cardinfo->nselout[1]; /* Set 16Mbps */
213 return val;
214}
215
216static void __devexit tms_pci_detach (struct pci_dev *pdev)
217{
218 struct net_device *dev = pci_get_drvdata(pdev);
219
220 BUG_ON(!dev);
221 unregister_netdev(dev);
222 release_region(dev->base_addr, TMS_PCI_IO_EXTENT);
223 free_irq(dev->irq, dev);
224 tmsdev_term(dev);
225 free_netdev(dev);
226 pci_set_drvdata(pdev, NULL);
227}
228
229static struct pci_driver tms_pci_driver = {
230 .name = "tmspci",
231 .id_table = tmspci_pci_tbl,
232 .probe = tms_pci_attach,
233 .remove = __devexit_p(tms_pci_detach),
234};
235
236static int __init tms_pci_init (void)
237{
238 return pci_register_driver(&tms_pci_driver);
239}
240
241static void __exit tms_pci_rmmod (void)
242{
243 pci_unregister_driver (&tms_pci_driver);
244}
245
246module_init(tms_pci_init);
247module_exit(tms_pci_rmmod);
248
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index bb8c72c79c6f..987aeefbc774 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -313,7 +313,7 @@ static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
313 313
314 /* Exact match */ 314 /* Exact match */
315 for (i = 0; i < filter->count; i++) 315 for (i = 0; i < filter->count; i++)
316 if (!compare_ether_addr(eh->h_dest, filter->addr[i])) 316 if (ether_addr_equal(eh->h_dest, filter->addr[i]))
317 return 1; 317 return 1;
318 318
319 /* Inexact match (multicast only) */ 319 /* Inexact match (multicast only) */
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 5ee032cafade..42b5151aa78a 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -355,7 +355,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
355 u32 packet_len; 355 u32 packet_len;
356 u32 padbytes = 0xffff0000; 356 u32 padbytes = 0xffff0000;
357 357
358 padlen = ((skb->len + 4) % 512) ? 0 : 4; 358 padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4;
359 359
360 if ((!skb_cloned(skb)) && 360 if ((!skb_cloned(skb)) &&
361 ((headroom + tailroom) >= (4 + padlen))) { 361 ((headroom + tailroom) >= (4 + padlen))) {
@@ -377,7 +377,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
377 cpu_to_le32s(&packet_len); 377 cpu_to_le32s(&packet_len);
378 skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len)); 378 skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
379 379
380 if ((skb->len % 512) == 0) { 380 if (padlen) {
381 cpu_to_le32s(&padbytes); 381 cpu_to_le32s(&padbytes);
382 memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); 382 memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
383 skb_put(skb, sizeof(padbytes)); 383 skb_put(skb, sizeof(padbytes));
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 90a30026a931..fffee6aee8bb 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -83,6 +83,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
83 struct cdc_state *info = (void *) &dev->data; 83 struct cdc_state *info = (void *) &dev->data;
84 int status; 84 int status;
85 int rndis; 85 int rndis;
86 bool android_rndis_quirk = false;
86 struct usb_driver *driver = driver_of(intf); 87 struct usb_driver *driver = driver_of(intf);
87 struct usb_cdc_mdlm_desc *desc = NULL; 88 struct usb_cdc_mdlm_desc *desc = NULL;
88 struct usb_cdc_mdlm_detail_desc *detail = NULL; 89 struct usb_cdc_mdlm_detail_desc *detail = NULL;
@@ -195,6 +196,11 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
195 info->control, 196 info->control,
196 info->u->bSlaveInterface0, 197 info->u->bSlaveInterface0,
197 info->data); 198 info->data);
199 /* fall back to hard-wiring for RNDIS */
200 if (rndis) {
201 android_rndis_quirk = true;
202 goto next_desc;
203 }
198 goto bad_desc; 204 goto bad_desc;
199 } 205 }
200 if (info->control != intf) { 206 if (info->control != intf) {
@@ -271,11 +277,15 @@ next_desc:
271 /* Microsoft ActiveSync based and some regular RNDIS devices lack the 277 /* Microsoft ActiveSync based and some regular RNDIS devices lack the
272 * CDC descriptors, so we'll hard-wire the interfaces and not check 278 * CDC descriptors, so we'll hard-wire the interfaces and not check
273 * for descriptors. 279 * for descriptors.
280 *
281 * Some Android RNDIS devices have a CDC Union descriptor pointing
282 * to non-existing interfaces. Ignore that and attempt the same
283 * hard-wired 0 and 1 interfaces.
274 */ 284 */
275 if (rndis && !info->u) { 285 if (rndis && (!info->u || android_rndis_quirk)) {
276 info->control = usb_ifnum_to_if(dev->udev, 0); 286 info->control = usb_ifnum_to_if(dev->udev, 0);
277 info->data = usb_ifnum_to_if(dev->udev, 1); 287 info->data = usb_ifnum_to_if(dev->udev, 1);
278 if (!info->control || !info->data) { 288 if (!info->control || !info->data || info->control != intf) {
279 dev_dbg(&intf->dev, 289 dev_dbg(&intf->dev,
280 "rndis: master #0/%p slave #1/%p\n", 290 "rndis: master #0/%p slave #1/%p\n",
281 info->control, 291 info->control,
@@ -475,6 +485,8 @@ static const struct driver_info wwan_info = {
475/*-------------------------------------------------------------------------*/ 485/*-------------------------------------------------------------------------*/
476 486
477#define HUAWEI_VENDOR_ID 0x12D1 487#define HUAWEI_VENDOR_ID 0x12D1
488#define NOVATEL_VENDOR_ID 0x1410
489#define ZTE_VENDOR_ID 0x19D2
478 490
479static const struct usb_device_id products [] = { 491static const struct usb_device_id products [] = {
480/* 492/*
@@ -592,6 +604,76 @@ static const struct usb_device_id products [] = {
592 * because of bugs/quirks in a given product (like Zaurus, above). 604 * because of bugs/quirks in a given product (like Zaurus, above).
593 */ 605 */
594{ 606{
607 /* Novatel USB551L */
608 /* This match must come *before* the generic CDC-ETHER match so that
609 * we get FLAG_WWAN set on the device, since it's descriptors are
610 * generic CDC-ETHER.
611 */
612 .match_flags = USB_DEVICE_ID_MATCH_VENDOR
613 | USB_DEVICE_ID_MATCH_PRODUCT
614 | USB_DEVICE_ID_MATCH_INT_INFO,
615 .idVendor = NOVATEL_VENDOR_ID,
616 .idProduct = 0xB001,
617 .bInterfaceClass = USB_CLASS_COMM,
618 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
619 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
620 .driver_info = (unsigned long)&wwan_info,
621}, {
622 /* ZTE (Vodafone) K3805-Z */
623 .match_flags = USB_DEVICE_ID_MATCH_VENDOR
624 | USB_DEVICE_ID_MATCH_PRODUCT
625 | USB_DEVICE_ID_MATCH_INT_INFO,
626 .idVendor = ZTE_VENDOR_ID,
627 .idProduct = 0x1003,
628 .bInterfaceClass = USB_CLASS_COMM,
629 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
630 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
631 .driver_info = (unsigned long)&wwan_info,
632}, {
633 /* ZTE (Vodafone) K3806-Z */
634 .match_flags = USB_DEVICE_ID_MATCH_VENDOR
635 | USB_DEVICE_ID_MATCH_PRODUCT
636 | USB_DEVICE_ID_MATCH_INT_INFO,
637 .idVendor = ZTE_VENDOR_ID,
638 .idProduct = 0x1015,
639 .bInterfaceClass = USB_CLASS_COMM,
640 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
641 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
642 .driver_info = (unsigned long)&wwan_info,
643}, {
644 /* ZTE (Vodafone) K4510-Z */
645 .match_flags = USB_DEVICE_ID_MATCH_VENDOR
646 | USB_DEVICE_ID_MATCH_PRODUCT
647 | USB_DEVICE_ID_MATCH_INT_INFO,
648 .idVendor = ZTE_VENDOR_ID,
649 .idProduct = 0x1173,
650 .bInterfaceClass = USB_CLASS_COMM,
651 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
652 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
653 .driver_info = (unsigned long)&wwan_info,
654}, {
655 /* ZTE (Vodafone) K3770-Z */
656 .match_flags = USB_DEVICE_ID_MATCH_VENDOR
657 | USB_DEVICE_ID_MATCH_PRODUCT
658 | USB_DEVICE_ID_MATCH_INT_INFO,
659 .idVendor = ZTE_VENDOR_ID,
660 .idProduct = 0x1177,
661 .bInterfaceClass = USB_CLASS_COMM,
662 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
663 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
664 .driver_info = (unsigned long)&wwan_info,
665}, {
666 /* ZTE (Vodafone) K3772-Z */
667 .match_flags = USB_DEVICE_ID_MATCH_VENDOR
668 | USB_DEVICE_ID_MATCH_PRODUCT
669 | USB_DEVICE_ID_MATCH_INT_INFO,
670 .idVendor = ZTE_VENDOR_ID,
671 .idProduct = 0x1181,
672 .bInterfaceClass = USB_CLASS_COMM,
673 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
674 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
675 .driver_info = (unsigned long)&wwan_info,
676}, {
595 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, 677 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
596 USB_CDC_PROTO_NONE), 678 USB_CDC_PROTO_NONE),
597 .driver_info = (unsigned long) &cdc_info, 679 .driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 552d24bf862e..63cfd0b2c31a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -356,15 +356,45 @@ static const struct driver_info qmi_wwan_gobi = {
356}; 356};
357 357
358/* ZTE suck at making USB descriptors */ 358/* ZTE suck at making USB descriptors */
359static const struct driver_info qmi_wwan_force_int1 = {
360 .description = "Qualcomm WWAN/QMI device",
361 .flags = FLAG_WWAN,
362 .bind = qmi_wwan_bind_shared,
363 .unbind = qmi_wwan_unbind_shared,
364 .manage_power = qmi_wwan_manage_power,
365 .data = BIT(1), /* interface whitelist bitmap */
366};
367
359static const struct driver_info qmi_wwan_force_int4 = { 368static const struct driver_info qmi_wwan_force_int4 = {
360 .description = "Qualcomm Gobi wwan/QMI device", 369 .description = "Qualcomm WWAN/QMI device",
361 .flags = FLAG_WWAN, 370 .flags = FLAG_WWAN,
362 .bind = qmi_wwan_bind_gobi, 371 .bind = qmi_wwan_bind_shared,
363 .unbind = qmi_wwan_unbind_shared, 372 .unbind = qmi_wwan_unbind_shared,
364 .manage_power = qmi_wwan_manage_power, 373 .manage_power = qmi_wwan_manage_power,
365 .data = BIT(4), /* interface whitelist bitmap */ 374 .data = BIT(4), /* interface whitelist bitmap */
366}; 375};
367 376
377/* Sierra Wireless provide equally useless interface descriptors
378 * Devices in QMI mode can be switched between two different
379 * configurations:
380 * a) USB interface #8 is QMI/wwan
381 * b) USB interfaces #8, #19 and #20 are QMI/wwan
382 *
383 * Both configurations provide a number of other interfaces (serial++),
384 * some of which have the same endpoint configuration as we expect, so
385 * a whitelist or blacklist is necessary.
386 *
387 * FIXME: The below whitelist should include BIT(20). It does not
388 * because I cannot get it to work...
389 */
390static const struct driver_info qmi_wwan_sierra = {
391 .description = "Sierra Wireless wwan/QMI device",
392 .flags = FLAG_WWAN,
393 .bind = qmi_wwan_bind_gobi,
394 .unbind = qmi_wwan_unbind_shared,
395 .manage_power = qmi_wwan_manage_power,
396 .data = BIT(8) | BIT(19), /* interface whitelist bitmap */
397};
368 398
369#define HUAWEI_VENDOR_ID 0x12D1 399#define HUAWEI_VENDOR_ID 0x12D1
370#define QMI_GOBI_DEVICE(vend, prod) \ 400#define QMI_GOBI_DEVICE(vend, prod) \
@@ -380,6 +410,14 @@ static const struct usb_device_id products[] = {
380 .bInterfaceProtocol = 8, /* NOTE: This is the *slave* interface of the CDC Union! */ 410 .bInterfaceProtocol = 8, /* NOTE: This is the *slave* interface of the CDC Union! */
381 .driver_info = (unsigned long)&qmi_wwan_info, 411 .driver_info = (unsigned long)&qmi_wwan_info,
382 }, 412 },
413 { /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
414 .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO,
415 .idVendor = HUAWEI_VENDOR_ID,
416 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
417 .bInterfaceSubClass = 1,
418 .bInterfaceProtocol = 56, /* NOTE: This is the *slave* interface of the CDC Union! */
419 .driver_info = (unsigned long)&qmi_wwan_info,
420 },
383 { /* Huawei E392, E398 and possibly others in "Windows mode" 421 { /* Huawei E392, E398 and possibly others in "Windows mode"
384 * using a combined control and data interface without any CDC 422 * using a combined control and data interface without any CDC
385 * functional descriptors 423 * functional descriptors
@@ -409,6 +447,15 @@ static const struct usb_device_id products[] = {
409 .bInterfaceProtocol = 0xff, 447 .bInterfaceProtocol = 0xff,
410 .driver_info = (unsigned long)&qmi_wwan_force_int4, 448 .driver_info = (unsigned long)&qmi_wwan_force_int4,
411 }, 449 },
450 { /* ZTE (Vodafone) K3520-Z */
451 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
452 .idVendor = 0x19d2,
453 .idProduct = 0x0055,
454 .bInterfaceClass = 0xff,
455 .bInterfaceSubClass = 0xff,
456 .bInterfaceProtocol = 0xff,
457 .driver_info = (unsigned long)&qmi_wwan_force_int1,
458 },
412 { /* ZTE (Vodafone) K3565-Z */ 459 { /* ZTE (Vodafone) K3565-Z */
413 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, 460 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
414 .idVendor = 0x19d2, 461 .idVendor = 0x19d2,
@@ -436,6 +483,15 @@ static const struct usb_device_id products[] = {
436 .bInterfaceProtocol = 0xff, 483 .bInterfaceProtocol = 0xff,
437 .driver_info = (unsigned long)&qmi_wwan_force_int4, 484 .driver_info = (unsigned long)&qmi_wwan_force_int4,
438 }, 485 },
486 { /* ZTE (Vodafone) K3765-Z */
487 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
488 .idVendor = 0x19d2,
489 .idProduct = 0x2002,
490 .bInterfaceClass = 0xff,
491 .bInterfaceSubClass = 0xff,
492 .bInterfaceProtocol = 0xff,
493 .driver_info = (unsigned long)&qmi_wwan_force_int4,
494 },
439 { /* ZTE (Vodafone) K4505-Z */ 495 { /* ZTE (Vodafone) K4505-Z */
440 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, 496 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
441 .idVendor = 0x19d2, 497 .idVendor = 0x19d2,
@@ -445,6 +501,15 @@ static const struct usb_device_id products[] = {
445 .bInterfaceProtocol = 0xff, 501 .bInterfaceProtocol = 0xff,
446 .driver_info = (unsigned long)&qmi_wwan_force_int4, 502 .driver_info = (unsigned long)&qmi_wwan_force_int4,
447 }, 503 },
504 { /* Sierra Wireless MC77xx in QMI mode */
505 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
506 .idVendor = 0x1199,
507 .idProduct = 0x68a2,
508 .bInterfaceClass = 0xff,
509 .bInterfaceSubClass = 0xff,
510 .bInterfaceProtocol = 0xff,
511 .driver_info = (unsigned long)&qmi_wwan_sierra,
512 },
448 {QMI_GOBI_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 513 {QMI_GOBI_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
449 {QMI_GOBI_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ 514 {QMI_GOBI_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
450 {QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */ 515 {QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index c8f1b5b3aff3..0d746b3fdef1 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -77,7 +77,9 @@ static void rndis_msg_indicate(struct usbnet *dev, struct rndis_indicate *msg,
77 if (dev->driver_info->indication) { 77 if (dev->driver_info->indication) {
78 dev->driver_info->indication(dev, msg, buflen); 78 dev->driver_info->indication(dev, msg, buflen);
79 } else { 79 } else {
80 switch (msg->status) { 80 u32 status = le32_to_cpu(msg->status);
81
82 switch (status) {
81 case RNDIS_STATUS_MEDIA_CONNECT: 83 case RNDIS_STATUS_MEDIA_CONNECT:
82 dev_info(udev, "rndis media connect\n"); 84 dev_info(udev, "rndis media connect\n");
83 break; 85 break;
@@ -85,8 +87,7 @@ static void rndis_msg_indicate(struct usbnet *dev, struct rndis_indicate *msg,
85 dev_info(udev, "rndis media disconnect\n"); 87 dev_info(udev, "rndis media disconnect\n");
86 break; 88 break;
87 default: 89 default:
88 dev_info(udev, "rndis indication: 0x%08x\n", 90 dev_info(udev, "rndis indication: 0x%08x\n", status);
89 le32_to_cpu(msg->status));
90 } 91 }
91 } 92 }
92} 93}
@@ -109,16 +110,17 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
109 int retval; 110 int retval;
110 int partial; 111 int partial;
111 unsigned count; 112 unsigned count;
112 __le32 rsp; 113 u32 xid = 0, msg_len, request_id, msg_type, rsp,
113 u32 xid = 0, msg_len, request_id; 114 status;
114 115
115 /* REVISIT when this gets called from contexts other than probe() or 116 /* REVISIT when this gets called from contexts other than probe() or
116 * disconnect(): either serialize, or dispatch responses on xid 117 * disconnect(): either serialize, or dispatch responses on xid
117 */ 118 */
118 119
120 msg_type = le32_to_cpu(buf->msg_type);
121
119 /* Issue the request; xid is unique, don't bother byteswapping it */ 122 /* Issue the request; xid is unique, don't bother byteswapping it */
120 if (likely(buf->msg_type != RNDIS_MSG_HALT && 123 if (likely(msg_type != RNDIS_MSG_HALT && msg_type != RNDIS_MSG_RESET)) {
121 buf->msg_type != RNDIS_MSG_RESET)) {
122 xid = dev->xid++; 124 xid = dev->xid++;
123 if (!xid) 125 if (!xid)
124 xid = dev->xid++; 126 xid = dev->xid++;
@@ -149,7 +151,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
149 } 151 }
150 152
151 /* Poll the control channel; the request probably completed immediately */ 153 /* Poll the control channel; the request probably completed immediately */
152 rsp = buf->msg_type | RNDIS_MSG_COMPLETION; 154 rsp = le32_to_cpu(buf->msg_type) | RNDIS_MSG_COMPLETION;
153 for (count = 0; count < 10; count++) { 155 for (count = 0; count < 10; count++) {
154 memset(buf, 0, CONTROL_BUFFER_SIZE); 156 memset(buf, 0, CONTROL_BUFFER_SIZE);
155 retval = usb_control_msg(dev->udev, 157 retval = usb_control_msg(dev->udev,
@@ -160,35 +162,36 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
160 buf, buflen, 162 buf, buflen,
161 RNDIS_CONTROL_TIMEOUT_MS); 163 RNDIS_CONTROL_TIMEOUT_MS);
162 if (likely(retval >= 8)) { 164 if (likely(retval >= 8)) {
165 msg_type = le32_to_cpu(buf->msg_type);
163 msg_len = le32_to_cpu(buf->msg_len); 166 msg_len = le32_to_cpu(buf->msg_len);
167 status = le32_to_cpu(buf->status);
164 request_id = (__force u32) buf->request_id; 168 request_id = (__force u32) buf->request_id;
165 if (likely(buf->msg_type == rsp)) { 169 if (likely(msg_type == rsp)) {
166 if (likely(request_id == xid)) { 170 if (likely(request_id == xid)) {
167 if (unlikely(rsp == RNDIS_MSG_RESET_C)) 171 if (unlikely(rsp == RNDIS_MSG_RESET_C))
168 return 0; 172 return 0;
169 if (likely(RNDIS_STATUS_SUCCESS 173 if (likely(RNDIS_STATUS_SUCCESS ==
170 == buf->status)) 174 status))
171 return 0; 175 return 0;
172 dev_dbg(&info->control->dev, 176 dev_dbg(&info->control->dev,
173 "rndis reply status %08x\n", 177 "rndis reply status %08x\n",
174 le32_to_cpu(buf->status)); 178 status);
175 return -EL3RST; 179 return -EL3RST;
176 } 180 }
177 dev_dbg(&info->control->dev, 181 dev_dbg(&info->control->dev,
178 "rndis reply id %d expected %d\n", 182 "rndis reply id %d expected %d\n",
179 request_id, xid); 183 request_id, xid);
180 /* then likely retry */ 184 /* then likely retry */
181 } else switch (buf->msg_type) { 185 } else switch (msg_type) {
182 case RNDIS_MSG_INDICATE: /* fault/event */ 186 case RNDIS_MSG_INDICATE: /* fault/event */
183 rndis_msg_indicate(dev, (void *)buf, buflen); 187 rndis_msg_indicate(dev, (void *)buf, buflen);
184
185 break; 188 break;
186 case RNDIS_MSG_KEEPALIVE: { /* ping */ 189 case RNDIS_MSG_KEEPALIVE: { /* ping */
187 struct rndis_keepalive_c *msg = (void *)buf; 190 struct rndis_keepalive_c *msg = (void *)buf;
188 191
189 msg->msg_type = RNDIS_MSG_KEEPALIVE_C; 192 msg->msg_type = cpu_to_le32(RNDIS_MSG_KEEPALIVE_C);
190 msg->msg_len = cpu_to_le32(sizeof *msg); 193 msg->msg_len = cpu_to_le32(sizeof *msg);
191 msg->status = RNDIS_STATUS_SUCCESS; 194 msg->status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
192 retval = usb_control_msg(dev->udev, 195 retval = usb_control_msg(dev->udev,
193 usb_sndctrlpipe(dev->udev, 0), 196 usb_sndctrlpipe(dev->udev, 0),
194 USB_CDC_SEND_ENCAPSULATED_COMMAND, 197 USB_CDC_SEND_ENCAPSULATED_COMMAND,
@@ -236,7 +239,7 @@ EXPORT_SYMBOL_GPL(rndis_command);
236 * ActiveSync 4.1 Windows driver. 239 * ActiveSync 4.1 Windows driver.
237 */ 240 */
238static int rndis_query(struct usbnet *dev, struct usb_interface *intf, 241static int rndis_query(struct usbnet *dev, struct usb_interface *intf,
239 void *buf, __le32 oid, u32 in_len, 242 void *buf, u32 oid, u32 in_len,
240 void **reply, int *reply_len) 243 void **reply, int *reply_len)
241{ 244{
242 int retval; 245 int retval;
@@ -251,9 +254,9 @@ static int rndis_query(struct usbnet *dev, struct usb_interface *intf,
251 u.buf = buf; 254 u.buf = buf;
252 255
253 memset(u.get, 0, sizeof *u.get + in_len); 256 memset(u.get, 0, sizeof *u.get + in_len);
254 u.get->msg_type = RNDIS_MSG_QUERY; 257 u.get->msg_type = cpu_to_le32(RNDIS_MSG_QUERY);
255 u.get->msg_len = cpu_to_le32(sizeof *u.get + in_len); 258 u.get->msg_len = cpu_to_le32(sizeof *u.get + in_len);
256 u.get->oid = oid; 259 u.get->oid = cpu_to_le32(oid);
257 u.get->len = cpu_to_le32(in_len); 260 u.get->len = cpu_to_le32(in_len);
258 u.get->offset = cpu_to_le32(20); 261 u.get->offset = cpu_to_le32(20);
259 262
@@ -324,7 +327,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
324 if (retval < 0) 327 if (retval < 0)
325 goto fail; 328 goto fail;
326 329
327 u.init->msg_type = RNDIS_MSG_INIT; 330 u.init->msg_type = cpu_to_le32(RNDIS_MSG_INIT);
328 u.init->msg_len = cpu_to_le32(sizeof *u.init); 331 u.init->msg_len = cpu_to_le32(sizeof *u.init);
329 u.init->major_version = cpu_to_le32(1); 332 u.init->major_version = cpu_to_le32(1);
330 u.init->minor_version = cpu_to_le32(0); 333 u.init->minor_version = cpu_to_le32(0);
@@ -395,22 +398,23 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
395 /* Check physical medium */ 398 /* Check physical medium */
396 phym = NULL; 399 phym = NULL;
397 reply_len = sizeof *phym; 400 reply_len = sizeof *phym;
398 retval = rndis_query(dev, intf, u.buf, OID_GEN_PHYSICAL_MEDIUM, 401 retval = rndis_query(dev, intf, u.buf,
399 0, (void **) &phym, &reply_len); 402 RNDIS_OID_GEN_PHYSICAL_MEDIUM,
403 0, (void **) &phym, &reply_len);
400 if (retval != 0 || !phym) { 404 if (retval != 0 || !phym) {
401 /* OID is optional so don't fail here. */ 405 /* OID is optional so don't fail here. */
402 phym_unspec = RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED; 406 phym_unspec = cpu_to_le32(RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED);
403 phym = &phym_unspec; 407 phym = &phym_unspec;
404 } 408 }
405 if ((flags & FLAG_RNDIS_PHYM_WIRELESS) && 409 if ((flags & FLAG_RNDIS_PHYM_WIRELESS) &&
406 *phym != RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) { 410 le32_to_cpup(phym) != RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) {
407 netif_dbg(dev, probe, dev->net, 411 netif_dbg(dev, probe, dev->net,
408 "driver requires wireless physical medium, but device is not\n"); 412 "driver requires wireless physical medium, but device is not\n");
409 retval = -ENODEV; 413 retval = -ENODEV;
410 goto halt_fail_and_release; 414 goto halt_fail_and_release;
411 } 415 }
412 if ((flags & FLAG_RNDIS_PHYM_NOT_WIRELESS) && 416 if ((flags & FLAG_RNDIS_PHYM_NOT_WIRELESS) &&
413 *phym == RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) { 417 le32_to_cpup(phym) == RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) {
414 netif_dbg(dev, probe, dev->net, 418 netif_dbg(dev, probe, dev->net,
415 "driver requires non-wireless physical medium, but device is wireless.\n"); 419 "driver requires non-wireless physical medium, but device is wireless.\n");
416 retval = -ENODEV; 420 retval = -ENODEV;
@@ -419,8 +423,9 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
419 423
420 /* Get designated host ethernet address */ 424 /* Get designated host ethernet address */
421 reply_len = ETH_ALEN; 425 reply_len = ETH_ALEN;
422 retval = rndis_query(dev, intf, u.buf, OID_802_3_PERMANENT_ADDRESS, 426 retval = rndis_query(dev, intf, u.buf,
423 48, (void **) &bp, &reply_len); 427 RNDIS_OID_802_3_PERMANENT_ADDRESS,
428 48, (void **) &bp, &reply_len);
424 if (unlikely(retval< 0)) { 429 if (unlikely(retval< 0)) {
425 dev_err(&intf->dev, "rndis get ethaddr, %d\n", retval); 430 dev_err(&intf->dev, "rndis get ethaddr, %d\n", retval);
426 goto halt_fail_and_release; 431 goto halt_fail_and_release;
@@ -430,12 +435,12 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
430 435
431 /* set a nonzero filter to enable data transfers */ 436 /* set a nonzero filter to enable data transfers */
432 memset(u.set, 0, sizeof *u.set); 437 memset(u.set, 0, sizeof *u.set);
433 u.set->msg_type = RNDIS_MSG_SET; 438 u.set->msg_type = cpu_to_le32(RNDIS_MSG_SET);
434 u.set->msg_len = cpu_to_le32(4 + sizeof *u.set); 439 u.set->msg_len = cpu_to_le32(4 + sizeof *u.set);
435 u.set->oid = OID_GEN_CURRENT_PACKET_FILTER; 440 u.set->oid = cpu_to_le32(RNDIS_OID_GEN_CURRENT_PACKET_FILTER);
436 u.set->len = cpu_to_le32(4); 441 u.set->len = cpu_to_le32(4);
437 u.set->offset = cpu_to_le32((sizeof *u.set) - 8); 442 u.set->offset = cpu_to_le32((sizeof *u.set) - 8);
438 *(__le32 *)(u.buf + sizeof *u.set) = RNDIS_DEFAULT_FILTER; 443 *(__le32 *)(u.buf + sizeof *u.set) = cpu_to_le32(RNDIS_DEFAULT_FILTER);
439 444
440 retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); 445 retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE);
441 if (unlikely(retval < 0)) { 446 if (unlikely(retval < 0)) {
@@ -450,7 +455,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
450 455
451halt_fail_and_release: 456halt_fail_and_release:
452 memset(u.halt, 0, sizeof *u.halt); 457 memset(u.halt, 0, sizeof *u.halt);
453 u.halt->msg_type = RNDIS_MSG_HALT; 458 u.halt->msg_type = cpu_to_le32(RNDIS_MSG_HALT);
454 u.halt->msg_len = cpu_to_le32(sizeof *u.halt); 459 u.halt->msg_len = cpu_to_le32(sizeof *u.halt);
455 (void) rndis_command(dev, (void *)u.halt, CONTROL_BUFFER_SIZE); 460 (void) rndis_command(dev, (void *)u.halt, CONTROL_BUFFER_SIZE);
456fail_and_release: 461fail_and_release:
@@ -475,7 +480,7 @@ void rndis_unbind(struct usbnet *dev, struct usb_interface *intf)
475 /* try to clear any rndis state/activity (no i/o from stack!) */ 480 /* try to clear any rndis state/activity (no i/o from stack!) */
476 halt = kzalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL); 481 halt = kzalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL);
477 if (halt) { 482 if (halt) {
478 halt->msg_type = RNDIS_MSG_HALT; 483 halt->msg_type = cpu_to_le32(RNDIS_MSG_HALT);
479 halt->msg_len = cpu_to_le32(sizeof *halt); 484 halt->msg_len = cpu_to_le32(sizeof *halt);
480 (void) rndis_command(dev, (void *)halt, CONTROL_BUFFER_SIZE); 485 (void) rndis_command(dev, (void *)halt, CONTROL_BUFFER_SIZE);
481 kfree(halt); 486 kfree(halt);
@@ -494,16 +499,16 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
494 while (likely(skb->len)) { 499 while (likely(skb->len)) {
495 struct rndis_data_hdr *hdr = (void *)skb->data; 500 struct rndis_data_hdr *hdr = (void *)skb->data;
496 struct sk_buff *skb2; 501 struct sk_buff *skb2;
497 u32 msg_len, data_offset, data_len; 502 u32 msg_type, msg_len, data_offset, data_len;
498 503
504 msg_type = le32_to_cpu(hdr->msg_type);
499 msg_len = le32_to_cpu(hdr->msg_len); 505 msg_len = le32_to_cpu(hdr->msg_len);
500 data_offset = le32_to_cpu(hdr->data_offset); 506 data_offset = le32_to_cpu(hdr->data_offset);
501 data_len = le32_to_cpu(hdr->data_len); 507 data_len = le32_to_cpu(hdr->data_len);
502 508
503 /* don't choke if we see oob, per-packet data, etc */ 509 /* don't choke if we see oob, per-packet data, etc */
504 if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET || 510 if (unlikely(msg_type != RNDIS_MSG_PACKET || skb->len < msg_len
505 skb->len < msg_len || 511 || (data_offset + data_len + 8) > msg_len)) {
506 (data_offset + data_len + 8) > msg_len)) {
507 dev->net->stats.rx_frame_errors++; 512 dev->net->stats.rx_frame_errors++;
508 netdev_dbg(dev->net, "bad rndis message %d/%d/%d/%d, len %d\n", 513 netdev_dbg(dev->net, "bad rndis message %d/%d/%d/%d, len %d\n",
509 le32_to_cpu(hdr->msg_type), 514 le32_to_cpu(hdr->msg_type),
@@ -569,7 +574,7 @@ rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
569fill: 574fill:
570 hdr = (void *) __skb_push(skb, sizeof *hdr); 575 hdr = (void *) __skb_push(skb, sizeof *hdr);
571 memset(hdr, 0, sizeof *hdr); 576 memset(hdr, 0, sizeof *hdr);
572 hdr->msg_type = RNDIS_MSG_PACKET; 577 hdr->msg_type = cpu_to_le32(RNDIS_MSG_PACKET);
573 hdr->msg_len = cpu_to_le32(skb->len); 578 hdr->msg_len = cpu_to_le32(skb->len);
574 hdr->data_offset = cpu_to_le32(sizeof(*hdr) - 8); 579 hdr->data_offset = cpu_to_le32(sizeof(*hdr) - 8);
575 hdr->data_len = cpu_to_le32(len); 580 hdr->data_len = cpu_to_le32(len);
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 187d01ccb973..fb1a087b101d 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -98,7 +98,7 @@ static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index,
98 98
99 if (unlikely(ret < 0)) 99 if (unlikely(ret < 0))
100 netdev_warn(dev->net, 100 netdev_warn(dev->net,
101 "Failed to read register index 0x%08x", index); 101 "Failed to read reg index 0x%08x: %d", index, ret);
102 102
103 le32_to_cpus(buf); 103 le32_to_cpus(buf);
104 *data = *buf; 104 *data = *buf;
@@ -128,7 +128,7 @@ static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index,
128 128
129 if (unlikely(ret < 0)) 129 if (unlikely(ret < 0))
130 netdev_warn(dev->net, 130 netdev_warn(dev->net,
131 "Failed to write register index 0x%08x", index); 131 "Failed to write reg index 0x%08x: %d", index, ret);
132 132
133 kfree(buf); 133 kfree(buf);
134 134
@@ -171,7 +171,7 @@ static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
171 idx &= dev->mii.reg_num_mask; 171 idx &= dev->mii.reg_num_mask;
172 addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR) 172 addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
173 | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) 173 | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
174 | MII_ACCESS_READ; 174 | MII_ACCESS_READ | MII_ACCESS_BUSY;
175 ret = smsc75xx_write_reg(dev, MII_ACCESS, addr); 175 ret = smsc75xx_write_reg(dev, MII_ACCESS, addr);
176 check_warn_goto_done(ret, "Error writing MII_ACCESS"); 176 check_warn_goto_done(ret, "Error writing MII_ACCESS");
177 177
@@ -210,7 +210,7 @@ static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
210 idx &= dev->mii.reg_num_mask; 210 idx &= dev->mii.reg_num_mask;
211 addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR) 211 addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
212 | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) 212 | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
213 | MII_ACCESS_WRITE; 213 | MII_ACCESS_WRITE | MII_ACCESS_BUSY;
214 ret = smsc75xx_write_reg(dev, MII_ACCESS, addr); 214 ret = smsc75xx_write_reg(dev, MII_ACCESS, addr);
215 check_warn_goto_done(ret, "Error writing MII_ACCESS"); 215 check_warn_goto_done(ret, "Error writing MII_ACCESS");
216 216
@@ -508,9 +508,9 @@ static int smsc75xx_link_reset(struct usbnet *dev)
508 u16 lcladv, rmtadv; 508 u16 lcladv, rmtadv;
509 int ret; 509 int ret;
510 510
511 /* clear interrupt status */ 511 /* write to clear phy interrupt status */
512 ret = smsc75xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC); 512 smsc75xx_mdio_write(dev->net, mii->phy_id, PHY_INT_SRC,
513 check_warn_return(ret, "Error reading PHY_INT_SRC"); 513 PHY_INT_SRC_CLEAR_ALL);
514 514
515 ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL); 515 ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL);
516 check_warn_return(ret, "Error writing INT_STS"); 516 check_warn_return(ret, "Error writing INT_STS");
@@ -643,7 +643,7 @@ static int smsc75xx_set_mac_address(struct usbnet *dev)
643 643
644static int smsc75xx_phy_initialize(struct usbnet *dev) 644static int smsc75xx_phy_initialize(struct usbnet *dev)
645{ 645{
646 int bmcr, timeout = 0; 646 int bmcr, ret, timeout = 0;
647 647
648 /* Initialize MII structure */ 648 /* Initialize MII structure */
649 dev->mii.dev = dev->net; 649 dev->mii.dev = dev->net;
@@ -651,6 +651,7 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
651 dev->mii.mdio_write = smsc75xx_mdio_write; 651 dev->mii.mdio_write = smsc75xx_mdio_write;
652 dev->mii.phy_id_mask = 0x1f; 652 dev->mii.phy_id_mask = 0x1f;
653 dev->mii.reg_num_mask = 0x1f; 653 dev->mii.reg_num_mask = 0x1f;
654 dev->mii.supports_gmii = 1;
654 dev->mii.phy_id = SMSC75XX_INTERNAL_PHY_ID; 655 dev->mii.phy_id = SMSC75XX_INTERNAL_PHY_ID;
655 656
656 /* reset phy and wait for reset to complete */ 657 /* reset phy and wait for reset to complete */
@@ -661,7 +662,7 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
661 bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR); 662 bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR);
662 check_warn_return(bmcr, "Error reading MII_BMCR"); 663 check_warn_return(bmcr, "Error reading MII_BMCR");
663 timeout++; 664 timeout++;
664 } while ((bmcr & MII_BMCR) && (timeout < 100)); 665 } while ((bmcr & BMCR_RESET) && (timeout < 100));
665 666
666 if (timeout >= 100) { 667 if (timeout >= 100) {
667 netdev_warn(dev->net, "timeout on PHY Reset"); 668 netdev_warn(dev->net, "timeout on PHY Reset");
@@ -671,10 +672,13 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
671 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, 672 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
672 ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | 673 ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
673 ADVERTISE_PAUSE_ASYM); 674 ADVERTISE_PAUSE_ASYM);
675 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_CTRL1000,
676 ADVERTISE_1000FULL);
674 677
675 /* read to clear */ 678 /* read and write to clear phy interrupt status */
676 smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC); 679 ret = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
677 check_warn_return(bmcr, "Error reading PHY_INT_SRC"); 680 check_warn_return(ret, "Error reading PHY_INT_SRC");
681 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_SRC, 0xffff);
678 682
679 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK, 683 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
680 PHY_INT_MASK_DEFAULT); 684 PHY_INT_MASK_DEFAULT);
@@ -899,15 +903,20 @@ static int smsc75xx_reset(struct usbnet *dev)
899 903
900 netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x", buf); 904 netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x", buf);
901 905
902 /* Configure GPIO pins as LED outputs */ 906 ret = smsc75xx_read_reg(dev, E2P_CMD, &buf);
903 ret = smsc75xx_read_reg(dev, LED_GPIO_CFG, &buf); 907 check_warn_return(ret, "Failed to read E2P_CMD: %d", ret);
904 check_warn_return(ret, "Failed to read LED_GPIO_CFG: %d", ret); 908
909 /* only set default GPIO/LED settings if no EEPROM is detected */
910 if (!(buf & E2P_CMD_LOADED)) {
911 ret = smsc75xx_read_reg(dev, LED_GPIO_CFG, &buf);
912 check_warn_return(ret, "Failed to read LED_GPIO_CFG: %d", ret);
905 913
906 buf &= ~(LED_GPIO_CFG_LED2_FUN_SEL | LED_GPIO_CFG_LED10_FUN_SEL); 914 buf &= ~(LED_GPIO_CFG_LED2_FUN_SEL | LED_GPIO_CFG_LED10_FUN_SEL);
907 buf |= LED_GPIO_CFG_LEDGPIO_EN | LED_GPIO_CFG_LED2_FUN_SEL; 915 buf |= LED_GPIO_CFG_LEDGPIO_EN | LED_GPIO_CFG_LED2_FUN_SEL;
908 916
909 ret = smsc75xx_write_reg(dev, LED_GPIO_CFG, buf); 917 ret = smsc75xx_write_reg(dev, LED_GPIO_CFG, buf);
910 check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d", ret); 918 check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d", ret);
919 }
911 920
912 ret = smsc75xx_write_reg(dev, FLOW, 0); 921 ret = smsc75xx_write_reg(dev, FLOW, 0);
913 check_warn_return(ret, "Failed to write FLOW: %d", ret); 922 check_warn_return(ret, "Failed to write FLOW: %d", ret);
@@ -946,6 +955,14 @@ static int smsc75xx_reset(struct usbnet *dev)
946 ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf); 955 ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf);
947 check_warn_return(ret, "Failed to write INT_EP_CTL: %d", ret); 956 check_warn_return(ret, "Failed to write INT_EP_CTL: %d", ret);
948 957
958 /* allow mac to detect speed and duplex from phy */
959 ret = smsc75xx_read_reg(dev, MAC_CR, &buf);
960 check_warn_return(ret, "Failed to read MAC_CR: %d", ret);
961
962 buf |= (MAC_CR_ADD | MAC_CR_ASD);
963 ret = smsc75xx_write_reg(dev, MAC_CR, buf);
964 check_warn_return(ret, "Failed to write MAC_CR: %d", ret);
965
949 ret = smsc75xx_read_reg(dev, MAC_TX, &buf); 966 ret = smsc75xx_read_reg(dev, MAC_TX, &buf);
950 check_warn_return(ret, "Failed to read MAC_TX: %d", ret); 967 check_warn_return(ret, "Failed to read MAC_TX: %d", ret);
951 968
@@ -1051,6 +1068,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
1051 dev->net->ethtool_ops = &smsc75xx_ethtool_ops; 1068 dev->net->ethtool_ops = &smsc75xx_ethtool_ops;
1052 dev->net->flags |= IFF_MULTICAST; 1069 dev->net->flags |= IFF_MULTICAST;
1053 dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD; 1070 dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD;
1071 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1054 return 0; 1072 return 0;
1055} 1073}
1056 1074
@@ -1211,7 +1229,7 @@ static const struct driver_info smsc75xx_info = {
1211 .rx_fixup = smsc75xx_rx_fixup, 1229 .rx_fixup = smsc75xx_rx_fixup,
1212 .tx_fixup = smsc75xx_tx_fixup, 1230 .tx_fixup = smsc75xx_tx_fixup,
1213 .status = smsc75xx_status, 1231 .status = smsc75xx_status,
1214 .flags = FLAG_ETHER | FLAG_SEND_ZLP, 1232 .flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR,
1215}; 1233};
1216 1234
1217static const struct usb_device_id products[] = { 1235static const struct usb_device_id products[] = {
diff --git a/drivers/net/usb/smsc75xx.h b/drivers/net/usb/smsc75xx.h
index 16e98c778344..67eba39e6ee2 100644
--- a/drivers/net/usb/smsc75xx.h
+++ b/drivers/net/usb/smsc75xx.h
@@ -388,6 +388,7 @@
388#define PHY_INT_SRC_ANEG_COMP ((u16)0x0040) 388#define PHY_INT_SRC_ANEG_COMP ((u16)0x0040)
389#define PHY_INT_SRC_REMOTE_FAULT ((u16)0x0020) 389#define PHY_INT_SRC_REMOTE_FAULT ((u16)0x0020)
390#define PHY_INT_SRC_LINK_DOWN ((u16)0x0010) 390#define PHY_INT_SRC_LINK_DOWN ((u16)0x0010)
391#define PHY_INT_SRC_CLEAR_ALL ((u16)0xffff)
391 392
392#define PHY_INT_MASK (30) 393#define PHY_INT_MASK (30)
393#define PHY_INT_MASK_ENERGY_ON ((u16)0x0080) 394#define PHY_INT_MASK_ENERGY_ON ((u16)0x0080)
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 5f19f84d3494..94ae66999f59 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1017,6 +1017,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
1017 dev->net->ethtool_ops = &smsc95xx_ethtool_ops; 1017 dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
1018 dev->net->flags |= IFF_MULTICAST; 1018 dev->net->flags |= IFF_MULTICAST;
1019 dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM; 1019 dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
1020 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1020 return 0; 1021 return 0;
1021} 1022}
1022 1023
@@ -1191,7 +1192,7 @@ static const struct driver_info smsc95xx_info = {
1191 .rx_fixup = smsc95xx_rx_fixup, 1192 .rx_fixup = smsc95xx_rx_fixup,
1192 .tx_fixup = smsc95xx_tx_fixup, 1193 .tx_fixup = smsc95xx_tx_fixup,
1193 .status = smsc95xx_status, 1194 .status = smsc95xx_status,
1194 .flags = FLAG_ETHER | FLAG_SEND_ZLP, 1195 .flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR,
1195}; 1196};
1196 1197
1197static const struct usb_device_id products[] = { 1198static const struct usb_device_id products[] = {
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index b7b3f5b0d406..9f58330f1312 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -210,6 +210,7 @@ static int init_status (struct usbnet *dev, struct usb_interface *intf)
210 } else { 210 } else {
211 usb_fill_int_urb(dev->interrupt, dev->udev, pipe, 211 usb_fill_int_urb(dev->interrupt, dev->udev, pipe,
212 buf, maxp, intr_complete, dev, period); 212 buf, maxp, intr_complete, dev, period);
213 dev->interrupt->transfer_flags |= URB_FREE_BUFFER;
213 dev_dbg(&intf->dev, 214 dev_dbg(&intf->dev,
214 "status ep%din, %d bytes period %d\n", 215 "status ep%din, %d bytes period %d\n",
215 usb_pipeendpoint(pipe), maxp, period); 216 usb_pipeendpoint(pipe), maxp, period);
@@ -281,17 +282,32 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
281} 282}
282EXPORT_SYMBOL_GPL(usbnet_change_mtu); 283EXPORT_SYMBOL_GPL(usbnet_change_mtu);
283 284
285/* The caller must hold list->lock */
286static void __usbnet_queue_skb(struct sk_buff_head *list,
287 struct sk_buff *newsk, enum skb_state state)
288{
289 struct skb_data *entry = (struct skb_data *) newsk->cb;
290
291 __skb_queue_tail(list, newsk);
292 entry->state = state;
293}
294
284/*-------------------------------------------------------------------------*/ 295/*-------------------------------------------------------------------------*/
285 296
286/* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from 297/* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from
287 * completion callbacks. 2.5 should have fixed those bugs... 298 * completion callbacks. 2.5 should have fixed those bugs...
288 */ 299 */
289 300
290static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list) 301static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
302 struct sk_buff_head *list, enum skb_state state)
291{ 303{
292 unsigned long flags; 304 unsigned long flags;
305 enum skb_state old_state;
306 struct skb_data *entry = (struct skb_data *) skb->cb;
293 307
294 spin_lock_irqsave(&list->lock, flags); 308 spin_lock_irqsave(&list->lock, flags);
309 old_state = entry->state;
310 entry->state = state;
295 __skb_unlink(skb, list); 311 __skb_unlink(skb, list);
296 spin_unlock(&list->lock); 312 spin_unlock(&list->lock);
297 spin_lock(&dev->done.lock); 313 spin_lock(&dev->done.lock);
@@ -299,6 +315,7 @@ static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_hea
299 if (dev->done.qlen == 1) 315 if (dev->done.qlen == 1)
300 tasklet_schedule(&dev->bh); 316 tasklet_schedule(&dev->bh);
301 spin_unlock_irqrestore(&dev->done.lock, flags); 317 spin_unlock_irqrestore(&dev->done.lock, flags);
318 return old_state;
302} 319}
303 320
304/* some work can't be done in tasklets, so we use keventd 321/* some work can't be done in tasklets, so we use keventd
@@ -339,7 +356,6 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
339 entry = (struct skb_data *) skb->cb; 356 entry = (struct skb_data *) skb->cb;
340 entry->urb = urb; 357 entry->urb = urb;
341 entry->dev = dev; 358 entry->dev = dev;
342 entry->state = rx_start;
343 entry->length = 0; 359 entry->length = 0;
344 360
345 usb_fill_bulk_urb (urb, dev->udev, dev->in, 361 usb_fill_bulk_urb (urb, dev->udev, dev->in,
@@ -371,7 +387,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
371 tasklet_schedule (&dev->bh); 387 tasklet_schedule (&dev->bh);
372 break; 388 break;
373 case 0: 389 case 0:
374 __skb_queue_tail (&dev->rxq, skb); 390 __usbnet_queue_skb(&dev->rxq, skb, rx_start);
375 } 391 }
376 } else { 392 } else {
377 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); 393 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
@@ -422,16 +438,17 @@ static void rx_complete (struct urb *urb)
422 struct skb_data *entry = (struct skb_data *) skb->cb; 438 struct skb_data *entry = (struct skb_data *) skb->cb;
423 struct usbnet *dev = entry->dev; 439 struct usbnet *dev = entry->dev;
424 int urb_status = urb->status; 440 int urb_status = urb->status;
441 enum skb_state state;
425 442
426 skb_put (skb, urb->actual_length); 443 skb_put (skb, urb->actual_length);
427 entry->state = rx_done; 444 state = rx_done;
428 entry->urb = NULL; 445 entry->urb = NULL;
429 446
430 switch (urb_status) { 447 switch (urb_status) {
431 /* success */ 448 /* success */
432 case 0: 449 case 0:
433 if (skb->len < dev->net->hard_header_len) { 450 if (skb->len < dev->net->hard_header_len) {
434 entry->state = rx_cleanup; 451 state = rx_cleanup;
435 dev->net->stats.rx_errors++; 452 dev->net->stats.rx_errors++;
436 dev->net->stats.rx_length_errors++; 453 dev->net->stats.rx_length_errors++;
437 netif_dbg(dev, rx_err, dev->net, 454 netif_dbg(dev, rx_err, dev->net,
@@ -470,7 +487,7 @@ static void rx_complete (struct urb *urb)
470 "rx throttle %d\n", urb_status); 487 "rx throttle %d\n", urb_status);
471 } 488 }
472block: 489block:
473 entry->state = rx_cleanup; 490 state = rx_cleanup;
474 entry->urb = urb; 491 entry->urb = urb;
475 urb = NULL; 492 urb = NULL;
476 break; 493 break;
@@ -481,17 +498,18 @@ block:
481 // FALLTHROUGH 498 // FALLTHROUGH
482 499
483 default: 500 default:
484 entry->state = rx_cleanup; 501 state = rx_cleanup;
485 dev->net->stats.rx_errors++; 502 dev->net->stats.rx_errors++;
486 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); 503 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
487 break; 504 break;
488 } 505 }
489 506
490 defer_bh(dev, skb, &dev->rxq); 507 state = defer_bh(dev, skb, &dev->rxq, state);
491 508
492 if (urb) { 509 if (urb) {
493 if (netif_running (dev->net) && 510 if (netif_running (dev->net) &&
494 !test_bit (EVENT_RX_HALT, &dev->flags)) { 511 !test_bit (EVENT_RX_HALT, &dev->flags) &&
512 state != unlink_start) {
495 rx_submit (dev, urb, GFP_ATOMIC); 513 rx_submit (dev, urb, GFP_ATOMIC);
496 usb_mark_last_busy(dev->udev); 514 usb_mark_last_busy(dev->udev);
497 return; 515 return;
@@ -578,16 +596,23 @@ EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq);
578static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) 596static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
579{ 597{
580 unsigned long flags; 598 unsigned long flags;
581 struct sk_buff *skb, *skbnext; 599 struct sk_buff *skb;
582 int count = 0; 600 int count = 0;
583 601
584 spin_lock_irqsave (&q->lock, flags); 602 spin_lock_irqsave (&q->lock, flags);
585 skb_queue_walk_safe(q, skb, skbnext) { 603 while (!skb_queue_empty(q)) {
586 struct skb_data *entry; 604 struct skb_data *entry;
587 struct urb *urb; 605 struct urb *urb;
588 int retval; 606 int retval;
589 607
590 entry = (struct skb_data *) skb->cb; 608 skb_queue_walk(q, skb) {
609 entry = (struct skb_data *) skb->cb;
610 if (entry->state != unlink_start)
611 goto found;
612 }
613 break;
614found:
615 entry->state = unlink_start;
591 urb = entry->urb; 616 urb = entry->urb;
592 617
593 /* 618 /*
@@ -884,6 +909,7 @@ static const struct ethtool_ops usbnet_ethtool_ops = {
884 .get_drvinfo = usbnet_get_drvinfo, 909 .get_drvinfo = usbnet_get_drvinfo,
885 .get_msglevel = usbnet_get_msglevel, 910 .get_msglevel = usbnet_get_msglevel,
886 .set_msglevel = usbnet_set_msglevel, 911 .set_msglevel = usbnet_set_msglevel,
912 .get_ts_info = ethtool_op_get_ts_info,
887}; 913};
888 914
889/*-------------------------------------------------------------------------*/ 915/*-------------------------------------------------------------------------*/
@@ -1038,8 +1064,7 @@ static void tx_complete (struct urb *urb)
1038 } 1064 }
1039 1065
1040 usb_autopm_put_interface_async(dev->intf); 1066 usb_autopm_put_interface_async(dev->intf);
1041 entry->state = tx_done; 1067 (void) defer_bh(dev, skb, &dev->txq, tx_done);
1042 defer_bh(dev, skb, &dev->txq);
1043} 1068}
1044 1069
1045/*-------------------------------------------------------------------------*/ 1070/*-------------------------------------------------------------------------*/
@@ -1095,7 +1120,6 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1095 entry = (struct skb_data *) skb->cb; 1120 entry = (struct skb_data *) skb->cb;
1096 entry->urb = urb; 1121 entry->urb = urb;
1097 entry->dev = dev; 1122 entry->dev = dev;
1098 entry->state = tx_start;
1099 entry->length = length; 1123 entry->length = length;
1100 1124
1101 usb_fill_bulk_urb (urb, dev->udev, dev->out, 1125 usb_fill_bulk_urb (urb, dev->udev, dev->out,
@@ -1154,7 +1178,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1154 break; 1178 break;
1155 case 0: 1179 case 0:
1156 net->trans_start = jiffies; 1180 net->trans_start = jiffies;
1157 __skb_queue_tail (&dev->txq, skb); 1181 __usbnet_queue_skb(&dev->txq, skb, tx_start);
1158 if (dev->txq.qlen >= TX_QLEN (dev)) 1182 if (dev->txq.qlen >= TX_QLEN (dev))
1159 netif_stop_queue (net); 1183 netif_stop_queue (net);
1160 } 1184 }
@@ -1443,7 +1467,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1443 1467
1444 status = register_netdev (net); 1468 status = register_netdev (net);
1445 if (status) 1469 if (status)
1446 goto out3; 1470 goto out4;
1447 netif_info(dev, probe, dev->net, 1471 netif_info(dev, probe, dev->net,
1448 "register '%s' at usb-%s-%s, %s, %pM\n", 1472 "register '%s' at usb-%s-%s, %s, %pM\n",
1449 udev->dev.driver->name, 1473 udev->dev.driver->name,
@@ -1461,6 +1485,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1461 1485
1462 return 0; 1486 return 0;
1463 1487
1488out4:
1489 usb_free_urb(dev->interrupt);
1464out3: 1490out3:
1465 if (info->unbind) 1491 if (info->unbind)
1466 info->unbind (dev, udev); 1492 info->unbind (dev, udev);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4de2760c5937..9ce6995e8d08 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -66,12 +66,21 @@ struct virtnet_info {
66 /* Host will merge rx buffers for big packets (shake it! shake it!) */ 66 /* Host will merge rx buffers for big packets (shake it! shake it!) */
67 bool mergeable_rx_bufs; 67 bool mergeable_rx_bufs;
68 68
69 /* enable config space updates */
70 bool config_enable;
71
69 /* Active statistics */ 72 /* Active statistics */
70 struct virtnet_stats __percpu *stats; 73 struct virtnet_stats __percpu *stats;
71 74
72 /* Work struct for refilling if we run low on memory. */ 75 /* Work struct for refilling if we run low on memory. */
73 struct delayed_work refill; 76 struct delayed_work refill;
74 77
78 /* Work struct for config space updates */
79 struct work_struct config_work;
80
81 /* Lock for config space updates */
82 struct mutex config_lock;
83
75 /* Chain pages by the private ptr. */ 84 /* Chain pages by the private ptr. */
76 struct page *pages; 85 struct page *pages;
77 86
@@ -492,7 +501,9 @@ static void virtnet_napi_enable(struct virtnet_info *vi)
492 * We synchronize against interrupts via NAPI_STATE_SCHED */ 501 * We synchronize against interrupts via NAPI_STATE_SCHED */
493 if (napi_schedule_prep(&vi->napi)) { 502 if (napi_schedule_prep(&vi->napi)) {
494 virtqueue_disable_cb(vi->rvq); 503 virtqueue_disable_cb(vi->rvq);
504 local_bh_disable();
495 __napi_schedule(&vi->napi); 505 __napi_schedule(&vi->napi);
506 local_bh_enable();
496 } 507 }
497} 508}
498 509
@@ -626,16 +637,15 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
626 /* This can happen with OOM and indirect buffers. */ 637 /* This can happen with OOM and indirect buffers. */
627 if (unlikely(capacity < 0)) { 638 if (unlikely(capacity < 0)) {
628 if (likely(capacity == -ENOMEM)) { 639 if (likely(capacity == -ENOMEM)) {
629 if (net_ratelimit()) { 640 if (net_ratelimit())
630 dev_warn(&dev->dev, 641 dev_warn(&dev->dev,
631 "TX queue failure: out of memory\n"); 642 "TX queue failure: out of memory\n");
632 } else { 643 } else {
633 dev->stats.tx_fifo_errors++; 644 dev->stats.tx_fifo_errors++;
634 if (net_ratelimit()) 645 if (net_ratelimit())
635 dev_warn(&dev->dev, 646 dev_warn(&dev->dev,
636 "Unexpected TX queue failure: %d\n", 647 "Unexpected TX queue failure: %d\n",
637 capacity); 648 capacity);
638 }
639 } 649 }
640 dev->stats.tx_dropped++; 650 dev->stats.tx_dropped++;
641 kfree_skb(skb); 651 kfree_skb(skb);
@@ -781,6 +791,16 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
781 return status == VIRTIO_NET_OK; 791 return status == VIRTIO_NET_OK;
782} 792}
783 793
794static void virtnet_ack_link_announce(struct virtnet_info *vi)
795{
796 rtnl_lock();
797 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
798 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL,
799 0, 0))
800 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
801 rtnl_unlock();
802}
803
784static int virtnet_close(struct net_device *dev) 804static int virtnet_close(struct net_device *dev)
785{ 805{
786 struct virtnet_info *vi = netdev_priv(dev); 806 struct virtnet_info *vi = netdev_priv(dev);
@@ -952,20 +972,31 @@ static const struct net_device_ops virtnet_netdev = {
952#endif 972#endif
953}; 973};
954 974
955static void virtnet_update_status(struct virtnet_info *vi) 975static void virtnet_config_changed_work(struct work_struct *work)
956{ 976{
977 struct virtnet_info *vi =
978 container_of(work, struct virtnet_info, config_work);
957 u16 v; 979 u16 v;
958 980
981 mutex_lock(&vi->config_lock);
982 if (!vi->config_enable)
983 goto done;
984
959 if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS, 985 if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
960 offsetof(struct virtio_net_config, status), 986 offsetof(struct virtio_net_config, status),
961 &v) < 0) 987 &v) < 0)
962 return; 988 goto done;
989
990 if (v & VIRTIO_NET_S_ANNOUNCE) {
991 netif_notify_peers(vi->dev);
992 virtnet_ack_link_announce(vi);
993 }
963 994
964 /* Ignore unknown (future) status bits */ 995 /* Ignore unknown (future) status bits */
965 v &= VIRTIO_NET_S_LINK_UP; 996 v &= VIRTIO_NET_S_LINK_UP;
966 997
967 if (vi->status == v) 998 if (vi->status == v)
968 return; 999 goto done;
969 1000
970 vi->status = v; 1001 vi->status = v;
971 1002
@@ -976,13 +1007,15 @@ static void virtnet_update_status(struct virtnet_info *vi)
976 netif_carrier_off(vi->dev); 1007 netif_carrier_off(vi->dev);
977 netif_stop_queue(vi->dev); 1008 netif_stop_queue(vi->dev);
978 } 1009 }
1010done:
1011 mutex_unlock(&vi->config_lock);
979} 1012}
980 1013
981static void virtnet_config_changed(struct virtio_device *vdev) 1014static void virtnet_config_changed(struct virtio_device *vdev)
982{ 1015{
983 struct virtnet_info *vi = vdev->priv; 1016 struct virtnet_info *vi = vdev->priv;
984 1017
985 virtnet_update_status(vi); 1018 queue_work(system_nrt_wq, &vi->config_work);
986} 1019}
987 1020
988static int init_vqs(struct virtnet_info *vi) 1021static int init_vqs(struct virtnet_info *vi)
@@ -1076,6 +1109,9 @@ static int virtnet_probe(struct virtio_device *vdev)
1076 goto free; 1109 goto free;
1077 1110
1078 INIT_DELAYED_WORK(&vi->refill, refill_work); 1111 INIT_DELAYED_WORK(&vi->refill, refill_work);
1112 mutex_init(&vi->config_lock);
1113 vi->config_enable = true;
1114 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
1079 sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg)); 1115 sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
1080 sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg)); 1116 sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
1081 1117
@@ -1111,7 +1147,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1111 otherwise get link status from config. */ 1147 otherwise get link status from config. */
1112 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 1148 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
1113 netif_carrier_off(dev); 1149 netif_carrier_off(dev);
1114 virtnet_update_status(vi); 1150 queue_work(system_nrt_wq, &vi->config_work);
1115 } else { 1151 } else {
1116 vi->status = VIRTIO_NET_S_LINK_UP; 1152 vi->status = VIRTIO_NET_S_LINK_UP;
1117 netif_carrier_on(dev); 1153 netif_carrier_on(dev);
@@ -1170,10 +1206,17 @@ static void __devexit virtnet_remove(struct virtio_device *vdev)
1170{ 1206{
1171 struct virtnet_info *vi = vdev->priv; 1207 struct virtnet_info *vi = vdev->priv;
1172 1208
1209 /* Prevent config work handler from accessing the device. */
1210 mutex_lock(&vi->config_lock);
1211 vi->config_enable = false;
1212 mutex_unlock(&vi->config_lock);
1213
1173 unregister_netdev(vi->dev); 1214 unregister_netdev(vi->dev);
1174 1215
1175 remove_vq_common(vi); 1216 remove_vq_common(vi);
1176 1217
1218 flush_work(&vi->config_work);
1219
1177 free_percpu(vi->stats); 1220 free_percpu(vi->stats);
1178 free_netdev(vi->dev); 1221 free_netdev(vi->dev);
1179} 1222}
@@ -1183,6 +1226,11 @@ static int virtnet_freeze(struct virtio_device *vdev)
1183{ 1226{
1184 struct virtnet_info *vi = vdev->priv; 1227 struct virtnet_info *vi = vdev->priv;
1185 1228
1229 /* Prevent config work handler from accessing the device */
1230 mutex_lock(&vi->config_lock);
1231 vi->config_enable = false;
1232 mutex_unlock(&vi->config_lock);
1233
1186 virtqueue_disable_cb(vi->rvq); 1234 virtqueue_disable_cb(vi->rvq);
1187 virtqueue_disable_cb(vi->svq); 1235 virtqueue_disable_cb(vi->svq);
1188 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) 1236 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
@@ -1196,6 +1244,8 @@ static int virtnet_freeze(struct virtio_device *vdev)
1196 1244
1197 remove_vq_common(vi); 1245 remove_vq_common(vi);
1198 1246
1247 flush_work(&vi->config_work);
1248
1199 return 0; 1249 return 0;
1200} 1250}
1201 1251
@@ -1216,6 +1266,10 @@ static int virtnet_restore(struct virtio_device *vdev)
1216 if (!try_fill_recv(vi, GFP_KERNEL)) 1266 if (!try_fill_recv(vi, GFP_KERNEL))
1217 queue_delayed_work(system_nrt_wq, &vi->refill, 0); 1267 queue_delayed_work(system_nrt_wq, &vi->refill, 0);
1218 1268
1269 mutex_lock(&vi->config_lock);
1270 vi->config_enable = true;
1271 mutex_unlock(&vi->config_lock);
1272
1219 return 0; 1273 return 0;
1220} 1274}
1221#endif 1275#endif
@@ -1233,6 +1287,7 @@ static unsigned int features[] = {
1233 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, 1287 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1234 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, 1288 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1235 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 1289 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1290 VIRTIO_NET_F_GUEST_ANNOUNCE,
1236}; 1291};
1237 1292
1238static struct virtio_driver virtio_net_driver = { 1293static struct virtio_driver virtio_net_driver = {
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index c676de7de024..9eb6479306d6 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -2055,15 +2055,4 @@ static struct pci_driver dscc4_driver = {
2055 .remove = __devexit_p(dscc4_remove_one), 2055 .remove = __devexit_p(dscc4_remove_one),
2056}; 2056};
2057 2057
2058static int __init dscc4_init_module(void) 2058module_pci_driver(dscc4_driver);
2059{
2060 return pci_register_driver(&dscc4_driver);
2061}
2062
2063static void __exit dscc4_cleanup_module(void)
2064{
2065 pci_unregister_driver(&dscc4_driver);
2066}
2067
2068module_init(dscc4_init_module);
2069module_exit(dscc4_cleanup_module);
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index ebb9f24eefb5..1a623183cbe5 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2483,6 +2483,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2483 pr_err("Control memory remap failed\n"); 2483 pr_err("Control memory remap failed\n");
2484 pci_release_regions(pdev); 2484 pci_release_regions(pdev);
2485 pci_disable_device(pdev); 2485 pci_disable_device(pdev);
2486 iounmap(card->mem);
2486 kfree(card); 2487 kfree(card);
2487 return -ENODEV; 2488 return -ENODEV;
2488 } 2489 }
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 76a8a4a522e9..f5d533a706ea 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1120,7 +1120,7 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
1120{ 1120{
1121 lmc_softc_t *sc = dev_to_sc(dev); 1121 lmc_softc_t *sc = dev_to_sc(dev);
1122 1122
1123 lmc_trace(dev, "lmc_runnig_reset in"); 1123 lmc_trace(dev, "lmc_running_reset in");
1124 1124
1125 /* stop interrupts */ 1125 /* stop interrupts */
1126 /* Clear the interrupt mask */ 1126 /* Clear the interrupt mask */
@@ -1736,18 +1736,7 @@ static struct pci_driver lmc_driver = {
1736 .remove = __devexit_p(lmc_remove_one), 1736 .remove = __devexit_p(lmc_remove_one),
1737}; 1737};
1738 1738
1739static int __init init_lmc(void) 1739module_pci_driver(lmc_driver);
1740{
1741 return pci_register_driver(&lmc_driver);
1742}
1743
1744static void __exit exit_lmc(void)
1745{
1746 pci_unregister_driver(&lmc_driver);
1747}
1748
1749module_init(init_lmc);
1750module_exit(exit_lmc);
1751 1740
1752unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/ 1741unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/
1753{ 1742{
diff --git a/drivers/net/wimax/i2400m/Kconfig b/drivers/net/wimax/i2400m/Kconfig
index 3f703384295e..672de18a776c 100644
--- a/drivers/net/wimax/i2400m/Kconfig
+++ b/drivers/net/wimax/i2400m/Kconfig
@@ -32,8 +32,9 @@ config WIMAX_I2400M_SDIO
32 If unsure, it is safe to select M (module). 32 If unsure, it is safe to select M (module).
33 33
34config WIMAX_IWMC3200_SDIO 34config WIMAX_IWMC3200_SDIO
35 bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO" 35 bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO (EXPERIMENTAL)"
36 depends on WIMAX_I2400M_SDIO 36 depends on WIMAX_I2400M_SDIO
37 depends on EXPERIMENTAL
37 select IWMC3200TOP 38 select IWMC3200TOP
38 help 39 help
39 Select if you have a device based on the Intel Multicom WiMAX 40 Select if you have a device based on the Intel Multicom WiMAX
diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/net/wimax/i2400m/usb-rx.c
index e3257681e360..b78ee676e102 100644
--- a/drivers/net/wimax/i2400m/usb-rx.c
+++ b/drivers/net/wimax/i2400m/usb-rx.c
@@ -277,7 +277,7 @@ retry:
277 d_printf(1, dev, "RX: size changed to %d, received %d, " 277 d_printf(1, dev, "RX: size changed to %d, received %d, "
278 "copied %d, capacity %ld\n", 278 "copied %d, capacity %ld\n",
279 rx_size, read_size, rx_skb->len, 279 rx_size, read_size, rx_skb->len,
280 (long) (skb_end_pointer(new_skb) - new_skb->head)); 280 (long) skb_end_offset(new_skb));
281 goto retry; 281 goto retry;
282 } 282 }
283 /* In most cases, it happens due to the hardware scheduling a 283 /* In most cases, it happens due to the hardware scheduling a
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 29b1e033a10b..713d033891e6 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -695,7 +695,7 @@ int i2400mu_resume(struct usb_interface *iface)
695 d_fnstart(3, dev, "(iface %p)\n", iface); 695 d_fnstart(3, dev, "(iface %p)\n", iface);
696 rmb(); /* see i2400m->updown's documentation */ 696 rmb(); /* see i2400m->updown's documentation */
697 if (i2400m->updown == 0) { 697 if (i2400m->updown == 0) {
698 d_printf(1, dev, "fw was down, no resume neeed\n"); 698 d_printf(1, dev, "fw was down, no resume needed\n");
699 goto out; 699 goto out;
700 } 700 }
701 d_printf(1, dev, "fw was up, resuming\n"); 701 d_printf(1, dev, "fw was up, resuming\n");
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index abd3b71cd4ab..5f58fa53238c 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -282,8 +282,7 @@ source "drivers/net/wireless/orinoco/Kconfig"
282source "drivers/net/wireless/p54/Kconfig" 282source "drivers/net/wireless/p54/Kconfig"
283source "drivers/net/wireless/rt2x00/Kconfig" 283source "drivers/net/wireless/rt2x00/Kconfig"
284source "drivers/net/wireless/rtlwifi/Kconfig" 284source "drivers/net/wireless/rtlwifi/Kconfig"
285source "drivers/net/wireless/wl1251/Kconfig" 285source "drivers/net/wireless/ti/Kconfig"
286source "drivers/net/wireless/wl12xx/Kconfig"
287source "drivers/net/wireless/zd1211rw/Kconfig" 286source "drivers/net/wireless/zd1211rw/Kconfig"
288source "drivers/net/wireless/mwifiex/Kconfig" 287source "drivers/net/wireless/mwifiex/Kconfig"
289 288
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 98db76196b59..0ce218b931d4 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -51,9 +51,7 @@ obj-$(CONFIG_ATH_COMMON) += ath/
51 51
52obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o 52obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
53 53
54obj-$(CONFIG_WL1251) += wl1251/ 54obj-$(CONFIG_WL_TI) += ti/
55obj-$(CONFIG_WL12XX) += wl12xx/
56obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx/
57 55
58obj-$(CONFIG_IWM) += iwmc3200wifi/ 56obj-$(CONFIG_IWM) += iwmc3200wifi/
59 57
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index f5ce5623da99..0ac09a2bd144 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1991,19 +1991,4 @@ static struct pci_driver adm8211_driver = {
1991#endif /* CONFIG_PM */ 1991#endif /* CONFIG_PM */
1992}; 1992};
1993 1993
1994 1994module_pci_driver(adm8211_driver);
1995
1996static int __init adm8211_init(void)
1997{
1998 return pci_register_driver(&adm8211_driver);
1999}
2000
2001
2002static void __exit adm8211_exit(void)
2003{
2004 pci_unregister_driver(&adm8211_driver);
2005}
2006
2007
2008module_init(adm8211_init);
2009module_exit(adm8211_exit);
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 4045e5ab0555..3df0146b797e 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1122,12 +1122,12 @@ exit:
1122static void at76_dump_mib_local(struct at76_priv *priv) 1122static void at76_dump_mib_local(struct at76_priv *priv)
1123{ 1123{
1124 int ret; 1124 int ret;
1125 struct mib_local *m = kmalloc(sizeof(struct mib_phy), GFP_KERNEL); 1125 struct mib_local *m = kmalloc(sizeof(*m), GFP_KERNEL);
1126 1126
1127 if (!m) 1127 if (!m)
1128 return; 1128 return;
1129 1129
1130 ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(struct mib_local)); 1130 ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(*m));
1131 if (ret < 0) { 1131 if (ret < 0) {
1132 wiphy_err(priv->hw->wiphy, 1132 wiphy_err(priv->hw->wiphy,
1133 "at76_get_mib (LOCAL) failed: %d\n", ret); 1133 "at76_get_mib (LOCAL) failed: %d\n", ret);
@@ -1751,7 +1751,7 @@ static void at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1751 * following workaround is necessary. If the TX frame is an 1751 * following workaround is necessary. If the TX frame is an
1752 * authentication frame extract the bssid and send the CMD_JOIN. */ 1752 * authentication frame extract the bssid and send the CMD_JOIN. */
1753 if (mgmt->frame_control & cpu_to_le16(IEEE80211_STYPE_AUTH)) { 1753 if (mgmt->frame_control & cpu_to_le16(IEEE80211_STYPE_AUTH)) {
1754 if (compare_ether_addr(priv->bssid, mgmt->bssid)) { 1754 if (!ether_addr_equal(priv->bssid, mgmt->bssid)) {
1755 memcpy(priv->bssid, mgmt->bssid, ETH_ALEN); 1755 memcpy(priv->bssid, mgmt->bssid, ETH_ALEN);
1756 ieee80211_queue_work(hw, &priv->work_join_bssid); 1756 ieee80211_queue_work(hw, &priv->work_join_bssid);
1757 dev_kfree_skb_any(skb); 1757 dev_kfree_skb_any(skb);
@@ -2512,10 +2512,8 @@ static void __exit at76_mod_exit(void)
2512 2512
2513 printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION " unloading\n"); 2513 printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION " unloading\n");
2514 usb_deregister(&at76_driver); 2514 usb_deregister(&at76_driver);
2515 for (i = 0; i < ARRAY_SIZE(firmwares); i++) { 2515 for (i = 0; i < ARRAY_SIZE(firmwares); i++)
2516 if (firmwares[i].fw) 2516 release_firmware(firmwares[i].fw);
2517 release_firmware(firmwares[i].fw);
2518 }
2519 led_trigger_unregister_simple(ledtrig_tx); 2517 led_trigger_unregister_simple(ledtrig_tx);
2520} 2518}
2521 2519
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index 8faa129da5a0..aec33cc207fd 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -19,6 +19,7 @@
19#include <linux/nl80211.h> 19#include <linux/nl80211.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/etherdevice.h> 21#include <linux/etherdevice.h>
22#include <linux/export.h>
22#include <ar231x_platform.h> 23#include <ar231x_platform.h>
23#include "ath5k.h" 24#include "ath5k.h"
24#include "debug.h" 25#include "debug.h"
@@ -119,7 +120,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
119 if (res == NULL) { 120 if (res == NULL) {
120 dev_err(&pdev->dev, "no IRQ resource found\n"); 121 dev_err(&pdev->dev, "no IRQ resource found\n");
121 ret = -ENXIO; 122 ret = -ENXIO;
122 goto err_out; 123 goto err_iounmap;
123 } 124 }
124 125
125 irq = res->start; 126 irq = res->start;
@@ -128,7 +129,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
128 if (hw == NULL) { 129 if (hw == NULL) {
129 dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); 130 dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
130 ret = -ENOMEM; 131 ret = -ENOMEM;
131 goto err_out; 132 goto err_iounmap;
132 } 133 }
133 134
134 ah = hw->priv; 135 ah = hw->priv;
@@ -185,6 +186,8 @@ static int ath_ahb_probe(struct platform_device *pdev)
185 err_free_hw: 186 err_free_hw:
186 ieee80211_free_hw(hw); 187 ieee80211_free_hw(hw);
187 platform_set_drvdata(pdev, NULL); 188 platform_set_drvdata(pdev, NULL);
189 err_iounmap:
190 iounmap(mem);
188 err_out: 191 err_out:
189 return ret; 192 return ret;
190} 193}
@@ -217,6 +220,7 @@ static int ath_ahb_remove(struct platform_device *pdev)
217 } 220 }
218 221
219 ath5k_deinit_ah(ah); 222 ath5k_deinit_ah(ah);
223 iounmap(ah->iobase);
220 platform_set_drvdata(pdev, NULL); 224 platform_set_drvdata(pdev, NULL);
221 ieee80211_free_hw(hw); 225 ieee80211_free_hw(hw);
222 226
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index 35e93704c4ef..5c008757662b 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -14,6 +14,8 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include "ath5k.h" 19#include "ath5k.h"
18#include "reg.h" 20#include "reg.h"
19#include "debug.h" 21#include "debug.h"
@@ -728,33 +730,25 @@ void
728ath5k_ani_print_counters(struct ath5k_hw *ah) 730ath5k_ani_print_counters(struct ath5k_hw *ah)
729{ 731{
730 /* clears too */ 732 /* clears too */
731 printk(KERN_NOTICE "ACK fail\t%d\n", 733 pr_notice("ACK fail\t%d\n", ath5k_hw_reg_read(ah, AR5K_ACK_FAIL));
732 ath5k_hw_reg_read(ah, AR5K_ACK_FAIL)); 734 pr_notice("RTS fail\t%d\n", ath5k_hw_reg_read(ah, AR5K_RTS_FAIL));
733 printk(KERN_NOTICE "RTS fail\t%d\n", 735 pr_notice("RTS success\t%d\n", ath5k_hw_reg_read(ah, AR5K_RTS_OK));
734 ath5k_hw_reg_read(ah, AR5K_RTS_FAIL)); 736 pr_notice("FCS error\t%d\n", ath5k_hw_reg_read(ah, AR5K_FCS_FAIL));
735 printk(KERN_NOTICE "RTS success\t%d\n",
736 ath5k_hw_reg_read(ah, AR5K_RTS_OK));
737 printk(KERN_NOTICE "FCS error\t%d\n",
738 ath5k_hw_reg_read(ah, AR5K_FCS_FAIL));
739 737
740 /* no clear */ 738 /* no clear */
741 printk(KERN_NOTICE "tx\t%d\n", 739 pr_notice("tx\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX));
742 ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX)); 740 pr_notice("rx\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX));
743 printk(KERN_NOTICE "rx\t%d\n", 741 pr_notice("busy\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR));
744 ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX)); 742 pr_notice("cycles\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE));
745 printk(KERN_NOTICE "busy\t%d\n", 743
746 ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR)); 744 pr_notice("AR5K_PHYERR_CNT1\t%d\n",
747 printk(KERN_NOTICE "cycles\t%d\n", 745 ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1));
748 ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE)); 746 pr_notice("AR5K_PHYERR_CNT2\t%d\n",
749 747 ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2));
750 printk(KERN_NOTICE "AR5K_PHYERR_CNT1\t%d\n", 748 pr_notice("AR5K_OFDM_FIL_CNT\t%d\n",
751 ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1)); 749 ath5k_hw_reg_read(ah, AR5K_OFDM_FIL_CNT));
752 printk(KERN_NOTICE "AR5K_PHYERR_CNT2\t%d\n", 750 pr_notice("AR5K_CCK_FIL_CNT\t%d\n",
753 ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2)); 751 ath5k_hw_reg_read(ah, AR5K_CCK_FIL_CNT));
754 printk(KERN_NOTICE "AR5K_OFDM_FIL_CNT\t%d\n",
755 ath5k_hw_reg_read(ah, AR5K_OFDM_FIL_CNT));
756 printk(KERN_NOTICE "AR5K_CCK_FIL_CNT\t%d\n",
757 ath5k_hw_reg_read(ah, AR5K_CCK_FIL_CNT));
758} 752}
759 753
760#endif 754#endif
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 8d434b8f5855..64a453a6dfe4 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -76,26 +76,29 @@
76 GENERIC DRIVER DEFINITIONS 76 GENERIC DRIVER DEFINITIONS
77\****************************/ 77\****************************/
78 78
79#define ATH5K_PRINTF(fmt, ...) \ 79#define ATH5K_PRINTF(fmt, ...) \
80 printk(KERN_WARNING "%s: " fmt, __func__, ##__VA_ARGS__) 80 pr_warn("%s: " fmt, __func__, ##__VA_ARGS__)
81 81
82#define ATH5K_PRINTK(_sc, _level, _fmt, ...) \ 82void __printf(3, 4)
83 printk(_level "ath5k %s: " _fmt, \ 83_ath5k_printk(const struct ath5k_hw *ah, const char *level,
84 ((_sc) && (_sc)->hw) ? wiphy_name((_sc)->hw->wiphy) : "", \ 84 const char *fmt, ...);
85 ##__VA_ARGS__)
86 85
87#define ATH5K_PRINTK_LIMIT(_sc, _level, _fmt, ...) do { \ 86#define ATH5K_PRINTK(_sc, _level, _fmt, ...) \
88 if (net_ratelimit()) \ 87 _ath5k_printk(_sc, _level, _fmt, ##__VA_ARGS__)
89 ATH5K_PRINTK(_sc, _level, _fmt, ##__VA_ARGS__); \
90 } while (0)
91 88
92#define ATH5K_INFO(_sc, _fmt, ...) \ 89#define ATH5K_PRINTK_LIMIT(_sc, _level, _fmt, ...) \
90do { \
91 if (net_ratelimit()) \
92 ATH5K_PRINTK(_sc, _level, _fmt, ##__VA_ARGS__); \
93} while (0)
94
95#define ATH5K_INFO(_sc, _fmt, ...) \
93 ATH5K_PRINTK(_sc, KERN_INFO, _fmt, ##__VA_ARGS__) 96 ATH5K_PRINTK(_sc, KERN_INFO, _fmt, ##__VA_ARGS__)
94 97
95#define ATH5K_WARN(_sc, _fmt, ...) \ 98#define ATH5K_WARN(_sc, _fmt, ...) \
96 ATH5K_PRINTK_LIMIT(_sc, KERN_WARNING, _fmt, ##__VA_ARGS__) 99 ATH5K_PRINTK_LIMIT(_sc, KERN_WARNING, _fmt, ##__VA_ARGS__)
97 100
98#define ATH5K_ERR(_sc, _fmt, ...) \ 101#define ATH5K_ERR(_sc, _fmt, ...) \
99 ATH5K_PRINTK_LIMIT(_sc, KERN_ERR, _fmt, ##__VA_ARGS__) 102 ATH5K_PRINTK_LIMIT(_sc, KERN_ERR, _fmt, ##__VA_ARGS__)
100 103
101/* 104/*
@@ -1524,7 +1527,7 @@ void ath5k_eeprom_detach(struct ath5k_hw *ah);
1524 1527
1525/* Protocol Control Unit Functions */ 1528/* Protocol Control Unit Functions */
1526/* Helpers */ 1529/* Helpers */
1527int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, 1530int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
1528 int len, struct ieee80211_rate *rate, bool shortpre); 1531 int len, struct ieee80211_rate *rate, bool shortpre);
1529unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah); 1532unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah);
1530unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah); 1533unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index d7114c75fe9b..7106547a14dd 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -20,6 +20,8 @@
20* Attach/Detach Functions and helpers * 20* Attach/Detach Functions and helpers *
21\*************************************/ 21\*************************************/
22 22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
23#include <linux/pci.h> 25#include <linux/pci.h>
24#include <linux/slab.h> 26#include <linux/slab.h>
25#include "ath5k.h" 27#include "ath5k.h"
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 0e643b016b32..0ba81a66061f 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -40,6 +40,8 @@
40 * 40 *
41 */ 41 */
42 42
43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
43#include <linux/module.h> 45#include <linux/module.h>
44#include <linux/delay.h> 46#include <linux/delay.h>
45#include <linux/dma-mapping.h> 47#include <linux/dma-mapping.h>
@@ -460,7 +462,7 @@ void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
460 } 462 }
461 463
462 if (iter_data->need_set_hw_addr && iter_data->hw_macaddr) 464 if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
463 if (compare_ether_addr(iter_data->hw_macaddr, mac) == 0) 465 if (ether_addr_equal(iter_data->hw_macaddr, mac))
464 iter_data->need_set_hw_addr = false; 466 iter_data->need_set_hw_addr = false;
465 467
466 if (!iter_data->any_assoc) { 468 if (!iter_data->any_assoc) {
@@ -1168,7 +1170,7 @@ ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb,
1168 1170
1169 if (ieee80211_is_beacon(mgmt->frame_control) && 1171 if (ieee80211_is_beacon(mgmt->frame_control) &&
1170 le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS && 1172 le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
1171 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) == 0) { 1173 ether_addr_equal(mgmt->bssid, common->curbssid)) {
1172 /* 1174 /*
1173 * Received an IBSS beacon with the same BSSID. Hardware *must* 1175 * Received an IBSS beacon with the same BSSID. Hardware *must*
1174 * have updated the local TSF. We have to work around various 1176 * have updated the local TSF. We have to work around various
@@ -1232,7 +1234,7 @@ ath5k_update_beacon_rssi(struct ath5k_hw *ah, struct sk_buff *skb, int rssi)
1232 1234
1233 /* only beacons from our BSSID */ 1235 /* only beacons from our BSSID */
1234 if (!ieee80211_is_beacon(mgmt->frame_control) || 1236 if (!ieee80211_is_beacon(mgmt->frame_control) ||
1235 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0) 1237 !ether_addr_equal(mgmt->bssid, common->curbssid))
1236 return; 1238 return;
1237 1239
1238 ewma_add(&ah->ah_beacon_rssi_avg, rssi); 1240 ewma_add(&ah->ah_beacon_rssi_avg, rssi);
@@ -3038,3 +3040,23 @@ ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable)
3038 ath5k_hw_set_rx_filter(ah, rfilt); 3040 ath5k_hw_set_rx_filter(ah, rfilt);
3039 ah->filter_flags = rfilt; 3041 ah->filter_flags = rfilt;
3040} 3042}
3043
3044void _ath5k_printk(const struct ath5k_hw *ah, const char *level,
3045 const char *fmt, ...)
3046{
3047 struct va_format vaf;
3048 va_list args;
3049
3050 va_start(args, fmt);
3051
3052 vaf.fmt = fmt;
3053 vaf.va = &args;
3054
3055 if (ah && ah->hw)
3056 printk("%s" pr_fmt("%s: %pV"),
3057 level, wiphy_name(ah->hw->wiphy), &vaf);
3058 else
3059 printk("%s" pr_fmt("%pV"), level, &vaf);
3060
3061 va_end(args);
3062}
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index e5e8f45d86ac..9d00dab666a8 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -57,6 +57,9 @@
57 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 57 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
58 * THE POSSIBILITY OF SUCH DAMAGES. 58 * THE POSSIBILITY OF SUCH DAMAGES.
59 */ 59 */
60
61#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
62
60#include <linux/export.h> 63#include <linux/export.h>
61#include <linux/moduleparam.h> 64#include <linux/moduleparam.h>
62 65
@@ -247,10 +250,10 @@ static ssize_t write_file_beacon(struct file *file,
247 250
248 if (strncmp(buf, "disable", 7) == 0) { 251 if (strncmp(buf, "disable", 7) == 0) {
249 AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE); 252 AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
250 printk(KERN_INFO "debugfs disable beacons\n"); 253 pr_info("debugfs disable beacons\n");
251 } else if (strncmp(buf, "enable", 6) == 0) { 254 } else if (strncmp(buf, "enable", 6) == 0) {
252 AR5K_REG_ENABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE); 255 AR5K_REG_ENABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
253 printk(KERN_INFO "debugfs enable beacons\n"); 256 pr_info("debugfs enable beacons\n");
254 } 257 }
255 return count; 258 return count;
256} 259}
@@ -450,19 +453,19 @@ static ssize_t write_file_antenna(struct file *file,
450 453
451 if (strncmp(buf, "diversity", 9) == 0) { 454 if (strncmp(buf, "diversity", 9) == 0) {
452 ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT); 455 ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
453 printk(KERN_INFO "ath5k debug: enable diversity\n"); 456 pr_info("debug: enable diversity\n");
454 } else if (strncmp(buf, "fixed-a", 7) == 0) { 457 } else if (strncmp(buf, "fixed-a", 7) == 0) {
455 ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A); 458 ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A);
456 printk(KERN_INFO "ath5k debugfs: fixed antenna A\n"); 459 pr_info("debug: fixed antenna A\n");
457 } else if (strncmp(buf, "fixed-b", 7) == 0) { 460 } else if (strncmp(buf, "fixed-b", 7) == 0) {
458 ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B); 461 ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B);
459 printk(KERN_INFO "ath5k debug: fixed antenna B\n"); 462 pr_info("debug: fixed antenna B\n");
460 } else if (strncmp(buf, "clear", 5) == 0) { 463 } else if (strncmp(buf, "clear", 5) == 0) {
461 for (i = 0; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) { 464 for (i = 0; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) {
462 ah->stats.antenna_rx[i] = 0; 465 ah->stats.antenna_rx[i] = 0;
463 ah->stats.antenna_tx[i] = 0; 466 ah->stats.antenna_tx[i] = 0;
464 } 467 }
465 printk(KERN_INFO "ath5k debug: cleared antenna stats\n"); 468 pr_info("debug: cleared antenna stats\n");
466 } 469 }
467 return count; 470 return count;
468} 471}
@@ -632,7 +635,7 @@ static ssize_t write_file_frameerrors(struct file *file,
632 st->txerr_fifo = 0; 635 st->txerr_fifo = 0;
633 st->txerr_filt = 0; 636 st->txerr_filt = 0;
634 st->tx_all_count = 0; 637 st->tx_all_count = 0;
635 printk(KERN_INFO "ath5k debug: cleared frameerrors stats\n"); 638 pr_info("debug: cleared frameerrors stats\n");
636 } 639 }
637 return count; 640 return count;
638} 641}
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index f8bfa3ac2af0..bd8d4392d68b 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -21,6 +21,8 @@
21 Hardware Descriptor Functions 21 Hardware Descriptor Functions
22\******************************/ 22\******************************/
23 23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
24#include "ath5k.h" 26#include "ath5k.h"
25#include "reg.h" 27#include "reg.h"
26#include "debug.h" 28#include "debug.h"
@@ -441,10 +443,8 @@ ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
441 struct ath5k_desc *desc, 443 struct ath5k_desc *desc,
442 struct ath5k_tx_status *ts) 444 struct ath5k_tx_status *ts)
443{ 445{
444 struct ath5k_hw_2w_tx_ctl *tx_ctl;
445 struct ath5k_hw_tx_status *tx_status; 446 struct ath5k_hw_tx_status *tx_status;
446 447
447 tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
448 tx_status = &desc->ud.ds_tx5210.tx_stat; 448 tx_status = &desc->ud.ds_tx5210.tx_stat;
449 449
450 /* No frame has been send or error */ 450 /* No frame has been send or error */
@@ -495,11 +495,9 @@ ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
495 struct ath5k_desc *desc, 495 struct ath5k_desc *desc,
496 struct ath5k_tx_status *ts) 496 struct ath5k_tx_status *ts)
497{ 497{
498 struct ath5k_hw_4w_tx_ctl *tx_ctl;
499 struct ath5k_hw_tx_status *tx_status; 498 struct ath5k_hw_tx_status *tx_status;
500 u32 txstat0, txstat1; 499 u32 txstat0, txstat1;
501 500
502 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
503 tx_status = &desc->ud.ds_tx5212.tx_stat; 501 tx_status = &desc->ud.ds_tx5212.tx_stat;
504 502
505 txstat1 = ACCESS_ONCE(tx_status->tx_status_1); 503 txstat1 = ACCESS_ONCE(tx_status->tx_status_1);
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 5cc9aa814697..ce86f158423b 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -29,6 +29,8 @@
29 * status registers (ISR). 29 * status registers (ISR).
30 */ 30 */
31 31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
32#include "ath5k.h" 34#include "ath5k.h"
33#include "reg.h" 35#include "reg.h"
34#include "debug.h" 36#include "debug.h"
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index cd708c15b774..4026c906cc7b 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -21,6 +21,8 @@
21* EEPROM access functions and helpers * 21* EEPROM access functions and helpers *
22\*************************************/ 22\*************************************/
23 23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
24#include <linux/slab.h> 26#include <linux/slab.h>
25 27
26#include "ath5k.h" 28#include "ath5k.h"
diff --git a/drivers/net/wireless/ath/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c
index a1ea78e05b47..ee1c2fa8b591 100644
--- a/drivers/net/wireless/ath/ath5k/initvals.c
+++ b/drivers/net/wireless/ath/ath5k/initvals.c
@@ -19,6 +19,8 @@
19 * 19 *
20 */ 20 */
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
22#include "ath5k.h" 24#include "ath5k.h"
23#include "reg.h" 25#include "reg.h"
24#include "debug.h" 26#include "debug.h"
@@ -1574,8 +1576,7 @@ ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
1574 1576
1575 /* AR5K_MODE_11B */ 1577 /* AR5K_MODE_11B */
1576 if (mode > 2) { 1578 if (mode > 2) {
1577 ATH5K_ERR(ah, 1579 ATH5K_ERR(ah, "unsupported channel mode: %d\n", mode);
1578 "unsupported channel mode: %d\n", mode);
1579 return -EINVAL; 1580 return -EINVAL;
1580 } 1581 }
1581 1582
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index c1151c723711..b9f708a45f4e 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -39,6 +39,8 @@
39 * 39 *
40 */ 40 */
41 41
42#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
43
42#include <linux/pci.h> 44#include <linux/pci.h>
43#include "ath5k.h" 45#include "ath5k.h"
44 46
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 5c5329955414..22b80af0f47c 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -41,6 +41,8 @@
41 * 41 *
42 */ 42 */
43 43
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
44#include <net/mac80211.h> 46#include <net/mac80211.h>
45#include <asm/unaligned.h> 47#include <asm/unaligned.h>
46 48
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index 849fa060ebc4..dff48fbc63bf 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -14,6 +14,8 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/nl80211.h> 19#include <linux/nl80211.h>
18#include <linux/pci.h> 20#include <linux/pci.h>
19#include <linux/pci-aspm.h> 21#include <linux/pci-aspm.h>
@@ -45,6 +47,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
45 { PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */ 47 { PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */
46 { PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */ 48 { PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */
47 { PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */ 49 { PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */
50 { PCI_VDEVICE(ATHEROS, 0xff1b) }, /* AR5BXB63 */
48 { 0 } 51 { 0 }
49}; 52};
50MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table); 53MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
@@ -337,28 +340,4 @@ static struct pci_driver ath5k_pci_driver = {
337 .driver.pm = ATH5K_PM_OPS, 340 .driver.pm = ATH5K_PM_OPS,
338}; 341};
339 342
340/* 343module_pci_driver(ath5k_pci_driver);
341 * Module init/exit functions
342 */
343static int __init
344init_ath5k_pci(void)
345{
346 int ret;
347
348 ret = pci_register_driver(&ath5k_pci_driver);
349 if (ret) {
350 printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
351 return ret;
352 }
353
354 return 0;
355}
356
357static void __exit
358exit_ath5k_pci(void)
359{
360 pci_unregister_driver(&ath5k_pci_driver);
361}
362
363module_init(init_ath5k_pci);
364module_exit(exit_ath5k_pci);
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index cebfd6fd31d3..1f16b4227d8f 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -110,7 +110,7 @@ static const unsigned int ack_rates_high[] =
110 * bwmodes. 110 * bwmodes.
111 */ 111 */
112int 112int
113ath5k_hw_get_frame_duration(struct ath5k_hw *ah, 113ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
114 int len, struct ieee80211_rate *rate, bool shortpre) 114 int len, struct ieee80211_rate *rate, bool shortpre)
115{ 115{
116 int sifs, preamble, plcp_bits, sym_time; 116 int sifs, preamble, plcp_bits, sym_time;
@@ -120,7 +120,7 @@ ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
120 /* Fallback */ 120 /* Fallback */
121 if (!ah->ah_bwmode) { 121 if (!ah->ah_bwmode) {
122 __le16 raw_dur = ieee80211_generic_frame_duration(ah->hw, 122 __le16 raw_dur = ieee80211_generic_frame_duration(ah->hw,
123 NULL, len, rate); 123 NULL, band, len, rate);
124 124
125 /* subtract difference between long and short preamble */ 125 /* subtract difference between long and short preamble */
126 dur = le16_to_cpu(raw_dur); 126 dur = le16_to_cpu(raw_dur);
@@ -302,14 +302,15 @@ ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
302 * actual rate for this rate. See mac80211 tx.c 302 * actual rate for this rate. See mac80211 tx.c
303 * ieee80211_duration() for a brief description of 303 * ieee80211_duration() for a brief description of
304 * what rate we should choose to TX ACKs. */ 304 * what rate we should choose to TX ACKs. */
305 tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false); 305 tx_time = ath5k_hw_get_frame_duration(ah, band, 10,
306 rate, false);
306 307
307 ath5k_hw_reg_write(ah, tx_time, reg); 308 ath5k_hw_reg_write(ah, tx_time, reg);
308 309
309 if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)) 310 if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
310 continue; 311 continue;
311 312
312 tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, true); 313 tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, true);
313 ath5k_hw_reg_write(ah, tx_time, 314 ath5k_hw_reg_write(ah, tx_time,
314 reg + (AR5K_SET_SHORT_PREAMBLE << 2)); 315 reg + (AR5K_SET_SHORT_PREAMBLE << 2));
315 } 316 }
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 3a2845489a1b..8b71a2d947e0 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -22,6 +22,8 @@
22* PHY related functions * 22* PHY related functions *
23\***********************/ 23\***********************/
24 24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
25#include <linux/delay.h> 27#include <linux/delay.h>
26#include <linux/slab.h> 28#include <linux/slab.h>
27#include <asm/unaligned.h> 29#include <asm/unaligned.h>
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 30b50f934172..65fe929529a8 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -20,6 +20,8 @@
20Queue Control Unit, DCF Control Unit Functions 20Queue Control Unit, DCF Control Unit Functions
21\********************************************/ 21\********************************************/
22 22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
23#include "ath5k.h" 25#include "ath5k.h"
24#include "reg.h" 26#include "reg.h"
25#include "debug.h" 27#include "debug.h"
@@ -563,6 +565,7 @@ ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
563int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time) 565int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
564{ 566{
565 struct ieee80211_channel *channel = ah->ah_current_channel; 567 struct ieee80211_channel *channel = ah->ah_current_channel;
568 enum ieee80211_band band;
566 struct ieee80211_rate *rate; 569 struct ieee80211_rate *rate;
567 u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock; 570 u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
568 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time); 571 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
@@ -598,11 +601,12 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
598 * Also we have different lowest rate for 802.11a 601 * Also we have different lowest rate for 802.11a
599 */ 602 */
600 if (channel->band == IEEE80211_BAND_5GHZ) 603 if (channel->band == IEEE80211_BAND_5GHZ)
601 rate = &ah->sbands[IEEE80211_BAND_5GHZ].bitrates[0]; 604 band = IEEE80211_BAND_5GHZ;
602 else 605 else
603 rate = &ah->sbands[IEEE80211_BAND_2GHZ].bitrates[0]; 606 band = IEEE80211_BAND_2GHZ;
604 607
605 ack_tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false); 608 rate = &ah->sbands[band].bitrates[0];
609 ack_tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, false);
606 610
607 /* ack_tx_time includes an SIFS already */ 611 /* ack_tx_time includes an SIFS already */
608 eifs = ack_tx_time + sifs + 2 * slot_time; 612 eifs = ack_tx_time + sifs + 2 * slot_time;
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 200f165c0c6d..0c2dd4771c36 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -23,6 +23,8 @@
23 Reset function and helpers 23 Reset function and helpers
24\****************************/ 24\****************************/
25 25
26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
26#include <asm/unaligned.h> 28#include <asm/unaligned.h>
27 29
28#include <linux/pci.h> /* To determine if a card is pci-e */ 30#include <linux/pci.h> /* To determine if a card is pci-e */
diff --git a/drivers/net/wireless/ath/ath5k/sysfs.c b/drivers/net/wireless/ath/ath5k/sysfs.c
index 9364da7bd131..04cf0ca72610 100644
--- a/drivers/net/wireless/ath/ath5k/sysfs.c
+++ b/drivers/net/wireless/ath/ath5k/sysfs.c
@@ -1,3 +1,5 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
1#include <linux/device.h> 3#include <linux/device.h>
2#include <linux/pci.h> 4#include <linux/pci.h>
3 5
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
index 85746c3eb027..8cae8886f17d 100644
--- a/drivers/net/wireless/ath/ath6kl/Makefile
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -25,7 +25,8 @@
25obj-$(CONFIG_ATH6KL) += ath6kl_core.o 25obj-$(CONFIG_ATH6KL) += ath6kl_core.o
26ath6kl_core-y += debug.o 26ath6kl_core-y += debug.o
27ath6kl_core-y += hif.o 27ath6kl_core-y += hif.o
28ath6kl_core-y += htc.o 28ath6kl_core-y += htc_mbox.o
29ath6kl_core-y += htc_pipe.o
29ath6kl_core-y += bmi.o 30ath6kl_core-y += bmi.o
30ath6kl_core-y += cfg80211.o 31ath6kl_core-y += cfg80211.o
31ath6kl_core-y += init.o 32ath6kl_core-y += init.o
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 00d38952b5fb..28a65d3a03d0 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -15,6 +15,8 @@
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */ 16 */
17 17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
18#include <linux/moduleparam.h> 20#include <linux/moduleparam.h>
19#include <linux/inetdevice.h> 21#include <linux/inetdevice.h>
20#include <linux/export.h> 22#include <linux/export.h>
@@ -49,6 +51,8 @@
49 .max_power = 30, \ 51 .max_power = 30, \
50} 52}
51 53
54#define DEFAULT_BG_SCAN_PERIOD 60
55
52static struct ieee80211_rate ath6kl_rates[] = { 56static struct ieee80211_rate ath6kl_rates[] = {
53 RATETAB_ENT(10, 0x1, 0), 57 RATETAB_ENT(10, 0x1, 0),
54 RATETAB_ENT(20, 0x2, 0), 58 RATETAB_ENT(20, 0x2, 0),
@@ -69,7 +73,8 @@ static struct ieee80211_rate ath6kl_rates[] = {
69#define ath6kl_g_rates (ath6kl_rates + 0) 73#define ath6kl_g_rates (ath6kl_rates + 0)
70#define ath6kl_g_rates_size 12 74#define ath6kl_g_rates_size 12
71 75
72#define ath6kl_g_htcap (IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \ 76#define ath6kl_g_htcap IEEE80211_HT_CAP_SGI_20
77#define ath6kl_a_htcap (IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
73 IEEE80211_HT_CAP_SGI_20 | \ 78 IEEE80211_HT_CAP_SGI_20 | \
74 IEEE80211_HT_CAP_SGI_40) 79 IEEE80211_HT_CAP_SGI_40)
75 80
@@ -126,7 +131,7 @@ static struct ieee80211_supported_band ath6kl_band_5ghz = {
126 .channels = ath6kl_5ghz_a_channels, 131 .channels = ath6kl_5ghz_a_channels,
127 .n_bitrates = ath6kl_a_rates_size, 132 .n_bitrates = ath6kl_a_rates_size,
128 .bitrates = ath6kl_a_rates, 133 .bitrates = ath6kl_a_rates,
129 .ht_cap.cap = ath6kl_g_htcap, 134 .ht_cap.cap = ath6kl_a_htcap,
130 .ht_cap.ht_supported = true, 135 .ht_cap.ht_supported = true,
131}; 136};
132 137
@@ -607,6 +612,17 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
607 vif->req_bssid, vif->ch_hint, 612 vif->req_bssid, vif->ch_hint,
608 ar->connect_ctrl_flags, nw_subtype); 613 ar->connect_ctrl_flags, nw_subtype);
609 614
615 /* disable background scan if period is 0 */
616 if (sme->bg_scan_period == 0)
617 sme->bg_scan_period = 0xffff;
618
619 /* configure default value if not specified */
620 if (sme->bg_scan_period == -1)
621 sme->bg_scan_period = DEFAULT_BG_SCAN_PERIOD;
622
623 ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0, 0,
624 sme->bg_scan_period, 0, 0, 0, 3, 0, 0, 0);
625
610 up(&ar->sem); 626 up(&ar->sem);
611 627
612 if (status == -EINVAL) { 628 if (status == -EINVAL) {
@@ -941,6 +957,8 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
941 if (test_bit(CONNECTED, &vif->flags)) 957 if (test_bit(CONNECTED, &vif->flags))
942 force_fg_scan = 1; 958 force_fg_scan = 1;
943 959
960 vif->scan_req = request;
961
944 if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, 962 if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
945 ar->fw_capabilities)) { 963 ar->fw_capabilities)) {
946 /* 964 /*
@@ -963,10 +981,10 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
963 ATH6KL_FG_SCAN_INTERVAL, 981 ATH6KL_FG_SCAN_INTERVAL,
964 n_channels, channels); 982 n_channels, channels);
965 } 983 }
966 if (ret) 984 if (ret) {
967 ath6kl_err("wmi_startscan_cmd failed\n"); 985 ath6kl_err("wmi_startscan_cmd failed\n");
968 else 986 vif->scan_req = NULL;
969 vif->scan_req = request; 987 }
970 988
971 kfree(channels); 989 kfree(channels);
972 990
@@ -1436,9 +1454,38 @@ static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
1436 struct vif_params *params) 1454 struct vif_params *params)
1437{ 1455{
1438 struct ath6kl_vif *vif = netdev_priv(ndev); 1456 struct ath6kl_vif *vif = netdev_priv(ndev);
1457 int i;
1439 1458
1440 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type); 1459 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type);
1441 1460
1461 /*
1462 * Don't bring up p2p on an interface which is not initialized
1463 * for p2p operation where fw does not have capability to switch
1464 * dynamically between non-p2p and p2p type interface.
1465 */
1466 if (!test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
1467 vif->ar->fw_capabilities) &&
1468 (type == NL80211_IFTYPE_P2P_CLIENT ||
1469 type == NL80211_IFTYPE_P2P_GO)) {
1470 if (vif->ar->vif_max == 1) {
1471 if (vif->fw_vif_idx != 0)
1472 return -EINVAL;
1473 else
1474 goto set_iface_type;
1475 }
1476
1477 for (i = vif->ar->max_norm_iface; i < vif->ar->vif_max; i++) {
1478 if (i == vif->fw_vif_idx)
1479 break;
1480 }
1481
1482 if (i == vif->ar->vif_max) {
1483 ath6kl_err("Invalid interface to bring up P2P\n");
1484 return -EINVAL;
1485 }
1486 }
1487
1488set_iface_type:
1442 switch (type) { 1489 switch (type) {
1443 case NL80211_IFTYPE_STATION: 1490 case NL80211_IFTYPE_STATION:
1444 vif->next_mode = INFRA_NETWORK; 1491 vif->next_mode = INFRA_NETWORK;
@@ -1924,12 +1971,61 @@ static int ath6kl_wow_sta(struct ath6kl *ar, struct ath6kl_vif *vif)
1924 return 0; 1971 return 0;
1925} 1972}
1926 1973
1974static int is_hsleep_mode_procsed(struct ath6kl_vif *vif)
1975{
1976 return test_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags);
1977}
1978
1979static bool is_ctrl_ep_empty(struct ath6kl *ar)
1980{
1981 return !ar->tx_pending[ar->ctrl_ep];
1982}
1983
1984static int ath6kl_cfg80211_host_sleep(struct ath6kl *ar, struct ath6kl_vif *vif)
1985{
1986 int ret, left;
1987
1988 clear_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags);
1989
1990 ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
1991 ATH6KL_HOST_MODE_ASLEEP);
1992 if (ret)
1993 return ret;
1994
1995 left = wait_event_interruptible_timeout(ar->event_wq,
1996 is_hsleep_mode_procsed(vif),
1997 WMI_TIMEOUT);
1998 if (left == 0) {
1999 ath6kl_warn("timeout, didn't get host sleep cmd processed event\n");
2000 ret = -ETIMEDOUT;
2001 } else if (left < 0) {
2002 ath6kl_warn("error while waiting for host sleep cmd processed event %d\n",
2003 left);
2004 ret = left;
2005 }
2006
2007 if (ar->tx_pending[ar->ctrl_ep]) {
2008 left = wait_event_interruptible_timeout(ar->event_wq,
2009 is_ctrl_ep_empty(ar),
2010 WMI_TIMEOUT);
2011 if (left == 0) {
2012 ath6kl_warn("clear wmi ctrl data timeout\n");
2013 ret = -ETIMEDOUT;
2014 } else if (left < 0) {
2015 ath6kl_warn("clear wmi ctrl data failed: %d\n", left);
2016 ret = left;
2017 }
2018 }
2019
2020 return ret;
2021}
2022
1927static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) 2023static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
1928{ 2024{
1929 struct in_device *in_dev; 2025 struct in_device *in_dev;
1930 struct in_ifaddr *ifa; 2026 struct in_ifaddr *ifa;
1931 struct ath6kl_vif *vif; 2027 struct ath6kl_vif *vif;
1932 int ret, left; 2028 int ret;
1933 u32 filter = 0; 2029 u32 filter = 0;
1934 u16 i, bmiss_time; 2030 u16 i, bmiss_time;
1935 u8 index = 0; 2031 u8 index = 0;
@@ -2030,39 +2126,11 @@ skip_arp:
2030 if (ret) 2126 if (ret)
2031 return ret; 2127 return ret;
2032 2128
2033 clear_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags); 2129 ret = ath6kl_cfg80211_host_sleep(ar, vif);
2034
2035 ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
2036 ATH6KL_HOST_MODE_ASLEEP);
2037 if (ret) 2130 if (ret)
2038 return ret; 2131 return ret;
2039 2132
2040 left = wait_event_interruptible_timeout(ar->event_wq, 2133 return 0;
2041 test_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags),
2042 WMI_TIMEOUT);
2043 if (left == 0) {
2044 ath6kl_warn("timeout, didn't get host sleep cmd "
2045 "processed event\n");
2046 ret = -ETIMEDOUT;
2047 } else if (left < 0) {
2048 ath6kl_warn("error while waiting for host sleep cmd "
2049 "processed event %d\n", left);
2050 ret = left;
2051 }
2052
2053 if (ar->tx_pending[ar->ctrl_ep]) {
2054 left = wait_event_interruptible_timeout(ar->event_wq,
2055 ar->tx_pending[ar->ctrl_ep] == 0, WMI_TIMEOUT);
2056 if (left == 0) {
2057 ath6kl_warn("clear wmi ctrl data timeout\n");
2058 ret = -ETIMEDOUT;
2059 } else if (left < 0) {
2060 ath6kl_warn("clear wmi ctrl data failed: %d\n", left);
2061 ret = left;
2062 }
2063 }
2064
2065 return ret;
2066} 2134}
2067 2135
2068static int ath6kl_wow_resume(struct ath6kl *ar) 2136static int ath6kl_wow_resume(struct ath6kl *ar)
@@ -2109,10 +2177,82 @@ static int ath6kl_wow_resume(struct ath6kl *ar)
2109 return 0; 2177 return 0;
2110} 2178}
2111 2179
2180static int ath6kl_cfg80211_deepsleep_suspend(struct ath6kl *ar)
2181{
2182 struct ath6kl_vif *vif;
2183 int ret;
2184
2185 vif = ath6kl_vif_first(ar);
2186 if (!vif)
2187 return -EIO;
2188
2189 if (!ath6kl_cfg80211_ready(vif))
2190 return -EIO;
2191
2192 ath6kl_cfg80211_stop_all(ar);
2193
2194 /* Save the current power mode before enabling power save */
2195 ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
2196
2197 ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER);
2198 if (ret)
2199 return ret;
2200
2201 /* Disable WOW mode */
2202 ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
2203 ATH6KL_WOW_MODE_DISABLE,
2204 0, 0);
2205 if (ret)
2206 return ret;
2207
2208 /* Flush all non control pkts in TX path */
2209 ath6kl_tx_data_cleanup(ar);
2210
2211 ret = ath6kl_cfg80211_host_sleep(ar, vif);
2212 if (ret)
2213 return ret;
2214
2215 return 0;
2216}
2217
2218static int ath6kl_cfg80211_deepsleep_resume(struct ath6kl *ar)
2219{
2220 struct ath6kl_vif *vif;
2221 int ret;
2222
2223 vif = ath6kl_vif_first(ar);
2224
2225 if (!vif)
2226 return -EIO;
2227
2228 if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) {
2229 ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0,
2230 ar->wmi->saved_pwr_mode);
2231 if (ret)
2232 return ret;
2233 }
2234
2235 ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
2236 ATH6KL_HOST_MODE_AWAKE);
2237 if (ret)
2238 return ret;
2239
2240 ar->state = ATH6KL_STATE_ON;
2241
2242 /* Reset scan parameter to default values */
2243 ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
2244 0, 0, 0, 0, 0, 0, 3, 0, 0, 0);
2245 if (ret)
2246 return ret;
2247
2248 return 0;
2249}
2250
2112int ath6kl_cfg80211_suspend(struct ath6kl *ar, 2251int ath6kl_cfg80211_suspend(struct ath6kl *ar,
2113 enum ath6kl_cfg_suspend_mode mode, 2252 enum ath6kl_cfg_suspend_mode mode,
2114 struct cfg80211_wowlan *wow) 2253 struct cfg80211_wowlan *wow)
2115{ 2254{
2255 struct ath6kl_vif *vif;
2116 enum ath6kl_state prev_state; 2256 enum ath6kl_state prev_state;
2117 int ret; 2257 int ret;
2118 2258
@@ -2137,15 +2277,12 @@ int ath6kl_cfg80211_suspend(struct ath6kl *ar,
2137 2277
2138 case ATH6KL_CFG_SUSPEND_DEEPSLEEP: 2278 case ATH6KL_CFG_SUSPEND_DEEPSLEEP:
2139 2279
2140 ath6kl_cfg80211_stop_all(ar); 2280 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "deep sleep suspend\n");
2141
2142 /* save the current power mode before enabling power save */
2143 ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
2144 2281
2145 ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER); 2282 ret = ath6kl_cfg80211_deepsleep_suspend(ar);
2146 if (ret) { 2283 if (ret) {
2147 ath6kl_warn("wmi powermode command failed during suspend: %d\n", 2284 ath6kl_err("deepsleep suspend failed: %d\n", ret);
2148 ret); 2285 return ret;
2149 } 2286 }
2150 2287
2151 ar->state = ATH6KL_STATE_DEEPSLEEP; 2288 ar->state = ATH6KL_STATE_DEEPSLEEP;
@@ -2185,6 +2322,9 @@ int ath6kl_cfg80211_suspend(struct ath6kl *ar,
2185 break; 2322 break;
2186 } 2323 }
2187 2324
2325 list_for_each_entry(vif, &ar->vif_list, list)
2326 ath6kl_cfg80211_scan_complete_event(vif, true);
2327
2188 return 0; 2328 return 0;
2189} 2329}
2190EXPORT_SYMBOL(ath6kl_cfg80211_suspend); 2330EXPORT_SYMBOL(ath6kl_cfg80211_suspend);
@@ -2206,17 +2346,13 @@ int ath6kl_cfg80211_resume(struct ath6kl *ar)
2206 break; 2346 break;
2207 2347
2208 case ATH6KL_STATE_DEEPSLEEP: 2348 case ATH6KL_STATE_DEEPSLEEP:
2209 if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) { 2349 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "deep sleep resume\n");
2210 ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0,
2211 ar->wmi->saved_pwr_mode);
2212 if (ret) {
2213 ath6kl_warn("wmi powermode command failed during resume: %d\n",
2214 ret);
2215 }
2216 }
2217
2218 ar->state = ATH6KL_STATE_ON;
2219 2350
2351 ret = ath6kl_cfg80211_deepsleep_resume(ar);
2352 if (ret) {
2353 ath6kl_warn("deep sleep resume failed: %d\n", ret);
2354 return ret;
2355 }
2220 break; 2356 break;
2221 2357
2222 case ATH6KL_STATE_CUTPOWER: 2358 case ATH6KL_STATE_CUTPOWER:
@@ -2290,31 +2426,25 @@ void ath6kl_check_wow_status(struct ath6kl *ar)
2290} 2426}
2291#endif 2427#endif
2292 2428
2293static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev, 2429static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band,
2294 struct ieee80211_channel *chan, 2430 bool ht_enable)
2295 enum nl80211_channel_type channel_type)
2296{ 2431{
2297 struct ath6kl_vif *vif; 2432 struct ath6kl_htcap *htcap = &vif->htcap;
2298
2299 /*
2300 * 'dev' could be NULL if a channel change is required for the hardware
2301 * device itself, instead of a particular VIF.
2302 *
2303 * FIXME: To be handled properly when monitor mode is supported.
2304 */
2305 if (!dev)
2306 return -EBUSY;
2307
2308 vif = netdev_priv(dev);
2309 2433
2310 if (!ath6kl_cfg80211_ready(vif)) 2434 if (htcap->ht_enable == ht_enable)
2311 return -EIO; 2435 return 0;
2312 2436
2313 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n", 2437 if (ht_enable) {
2314 __func__, chan->center_freq, chan->hw_value); 2438 /* Set default ht capabilities */
2315 vif->next_chan = chan->center_freq; 2439 htcap->ht_enable = true;
2440 htcap->cap_info = (band == IEEE80211_BAND_2GHZ) ?
2441 ath6kl_g_htcap : ath6kl_a_htcap;
2442 htcap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K;
2443 } else /* Disable ht */
2444 memset(htcap, 0, sizeof(*htcap));
2316 2445
2317 return 0; 2446 return ath6kl_wmi_set_htcap_cmd(vif->ar->wmi, vif->fw_vif_idx,
2447 band, htcap);
2318} 2448}
2319 2449
2320static bool ath6kl_is_p2p_ie(const u8 *pos) 2450static bool ath6kl_is_p2p_ie(const u8 *pos)
@@ -2391,6 +2521,81 @@ static int ath6kl_set_ies(struct ath6kl_vif *vif,
2391 return 0; 2521 return 0;
2392} 2522}
2393 2523
2524static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev,
2525 struct ieee80211_channel *chan,
2526 enum nl80211_channel_type channel_type)
2527{
2528 struct ath6kl_vif *vif;
2529
2530 /*
2531 * 'dev' could be NULL if a channel change is required for the hardware
2532 * device itself, instead of a particular VIF.
2533 *
2534 * FIXME: To be handled properly when monitor mode is supported.
2535 */
2536 if (!dev)
2537 return -EBUSY;
2538
2539 vif = netdev_priv(dev);
2540
2541 if (!ath6kl_cfg80211_ready(vif))
2542 return -EIO;
2543
2544 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n",
2545 __func__, chan->center_freq, chan->hw_value);
2546 vif->next_chan = chan->center_freq;
2547 vif->next_ch_type = channel_type;
2548 vif->next_ch_band = chan->band;
2549
2550 return 0;
2551}
2552
2553static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon,
2554 u8 *rsn_capab)
2555{
2556 const u8 *rsn_ie;
2557 size_t rsn_ie_len;
2558 u16 cnt;
2559
2560 if (!beacon->tail)
2561 return -EINVAL;
2562
2563 rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, beacon->tail, beacon->tail_len);
2564 if (!rsn_ie)
2565 return -EINVAL;
2566
2567 rsn_ie_len = *(rsn_ie + 1);
2568 /* skip element id and length */
2569 rsn_ie += 2;
2570
2571 /* skip version, group cipher */
2572 if (rsn_ie_len < 6)
2573 return -EINVAL;
2574 rsn_ie += 6;
2575 rsn_ie_len -= 6;
2576
2577 /* skip pairwise cipher suite */
2578 if (rsn_ie_len < 2)
2579 return -EINVAL;
2580 cnt = *((u16 *) rsn_ie);
2581 rsn_ie += (2 + cnt * 4);
2582 rsn_ie_len -= (2 + cnt * 4);
2583
2584 /* skip akm suite */
2585 if (rsn_ie_len < 2)
2586 return -EINVAL;
2587 cnt = *((u16 *) rsn_ie);
2588 rsn_ie += (2 + cnt * 4);
2589 rsn_ie_len -= (2 + cnt * 4);
2590
2591 if (rsn_ie_len < 2)
2592 return -EINVAL;
2593
2594 memcpy(rsn_capab, rsn_ie, 2);
2595
2596 return 0;
2597}
2598
2394static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev, 2599static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
2395 struct cfg80211_ap_settings *info) 2600 struct cfg80211_ap_settings *info)
2396{ 2601{
@@ -2403,6 +2608,7 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
2403 struct wmi_connect_cmd p; 2608 struct wmi_connect_cmd p;
2404 int res; 2609 int res;
2405 int i, ret; 2610 int i, ret;
2611 u16 rsn_capab = 0;
2406 2612
2407 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s:\n", __func__); 2613 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s:\n", __func__);
2408 2614
@@ -2532,6 +2738,34 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
2532 p.nw_subtype = SUBTYPE_NONE; 2738 p.nw_subtype = SUBTYPE_NONE;
2533 } 2739 }
2534 2740
2741 if (info->inactivity_timeout) {
2742 res = ath6kl_wmi_set_inact_period(ar->wmi, vif->fw_vif_idx,
2743 info->inactivity_timeout);
2744 if (res < 0)
2745 return res;
2746 }
2747
2748 if (ath6kl_set_htcap(vif, vif->next_ch_band,
2749 vif->next_ch_type != NL80211_CHAN_NO_HT))
2750 return -EIO;
2751
2752 /*
2753 * Get the PTKSA replay counter in the RSN IE. Supplicant
2754 * will use the RSN IE in M3 message and firmware has to
2755 * advertise the same in beacon/probe response. Send
2756 * the complete RSN IE capability field to firmware
2757 */
2758 if (!ath6kl_get_rsn_capab(&info->beacon, (u8 *) &rsn_capab) &&
2759 test_bit(ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
2760 ar->fw_capabilities)) {
2761 res = ath6kl_wmi_set_ie_cmd(ar->wmi, vif->fw_vif_idx,
2762 WLAN_EID_RSN, WMI_RSN_IE_CAPB,
2763 (const u8 *) &rsn_capab,
2764 sizeof(rsn_capab));
2765 if (res < 0)
2766 return res;
2767 }
2768
2535 res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p); 2769 res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p);
2536 if (res < 0) 2770 if (res < 0)
2537 return res; 2771 return res;
@@ -2566,6 +2800,13 @@ static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev)
2566 ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx); 2800 ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
2567 clear_bit(CONNECTED, &vif->flags); 2801 clear_bit(CONNECTED, &vif->flags);
2568 2802
2803 /* Restore ht setting in firmware */
2804 if (ath6kl_set_htcap(vif, IEEE80211_BAND_2GHZ, true))
2805 return -EIO;
2806
2807 if (ath6kl_set_htcap(vif, IEEE80211_BAND_5GHZ, true))
2808 return -EIO;
2809
2569 return 0; 2810 return 0;
2570} 2811}
2571 2812
@@ -2747,6 +2988,21 @@ static bool ath6kl_mgmt_powersave_ap(struct ath6kl_vif *vif,
2747 return false; 2988 return false;
2748} 2989}
2749 2990
2991/* Check if SSID length is greater than DIRECT- */
2992static bool ath6kl_is_p2p_go_ssid(const u8 *buf, size_t len)
2993{
2994 const struct ieee80211_mgmt *mgmt;
2995 mgmt = (const struct ieee80211_mgmt *) buf;
2996
2997 /* variable[1] contains the SSID tag length */
2998 if (buf + len >= &mgmt->u.probe_resp.variable[1] &&
2999 (mgmt->u.probe_resp.variable[1] > P2P_WILDCARD_SSID_LEN)) {
3000 return true;
3001 }
3002
3003 return false;
3004}
3005
2750static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, 3006static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
2751 struct ieee80211_channel *chan, bool offchan, 3007 struct ieee80211_channel *chan, bool offchan,
2752 enum nl80211_channel_type channel_type, 3008 enum nl80211_channel_type channel_type,
@@ -2761,11 +3017,11 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
2761 bool more_data, queued; 3017 bool more_data, queued;
2762 3018
2763 mgmt = (const struct ieee80211_mgmt *) buf; 3019 mgmt = (const struct ieee80211_mgmt *) buf;
2764 if (buf + len >= mgmt->u.probe_resp.variable && 3020 if (vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) &&
2765 vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) && 3021 ieee80211_is_probe_resp(mgmt->frame_control) &&
2766 ieee80211_is_probe_resp(mgmt->frame_control)) { 3022 ath6kl_is_p2p_go_ssid(buf, len)) {
2767 /* 3023 /*
2768 * Send Probe Response frame in AP mode using a separate WMI 3024 * Send Probe Response frame in GO mode using a separate WMI
2769 * command to allow the target to fill in the generic IEs. 3025 * command to allow the target to fill in the generic IEs.
2770 */ 3026 */
2771 *cookie = 0; /* TX status not supported */ 3027 *cookie = 0; /* TX status not supported */
@@ -2833,6 +3089,8 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
2833 if (vif->sme_state != SME_DISCONNECTED) 3089 if (vif->sme_state != SME_DISCONNECTED)
2834 return -EBUSY; 3090 return -EBUSY;
2835 3091
3092 ath6kl_cfg80211_scan_complete_event(vif, true);
3093
2836 for (i = 0; i < ar->wiphy->max_sched_scan_ssids; i++) { 3094 for (i = 0; i < ar->wiphy->max_sched_scan_ssids; i++) {
2837 ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, 3095 ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
2838 i, DISABLE_SSID_FLAG, 3096 i, DISABLE_SSID_FLAG,
@@ -3094,6 +3352,7 @@ struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
3094 vif->next_mode = nw_type; 3352 vif->next_mode = nw_type;
3095 vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL; 3353 vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL;
3096 vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME; 3354 vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME;
3355 vif->htcap.ht_enable = true;
3097 3356
3098 memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN); 3357 memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
3099 if (fw_vif_idx != 0) 3358 if (fw_vif_idx != 0)
@@ -3181,6 +3440,10 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
3181 if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities)) 3440 if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities))
3182 ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; 3441 ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
3183 3442
3443 if (test_bit(ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT,
3444 ar->fw_capabilities))
3445 ar->wiphy->features = NL80211_FEATURE_INACTIVITY_TIMER;
3446
3184 ar->wiphy->probe_resp_offload = 3447 ar->wiphy->probe_resp_offload =
3185 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 3448 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
3186 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 3449 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
index a60e78c0472f..98a886154d9c 100644
--- a/drivers/net/wireless/ath/ath6kl/common.h
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -22,7 +22,8 @@
22 22
23#define ATH6KL_MAX_IE 256 23#define ATH6KL_MAX_IE 256
24 24
25extern int ath6kl_printk(const char *level, const char *fmt, ...); 25extern __printf(2, 3)
26int ath6kl_printk(const char *level, const char *fmt, ...);
26 27
27/* 28/*
28 * Reflects the version of binary interface exposed by ATH6KL target 29 * Reflects the version of binary interface exposed by ATH6KL target
@@ -77,6 +78,7 @@ enum crypto_type {
77 78
78struct htc_endpoint_credit_dist; 79struct htc_endpoint_credit_dist;
79struct ath6kl; 80struct ath6kl;
81struct ath6kl_htcap;
80enum htc_credit_dist_reason; 82enum htc_credit_dist_reason;
81struct ath6kl_htc_credit_info; 83struct ath6kl_htc_credit_info;
82 84
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
index 45e641f3a41b..fdb3b1decc76 100644
--- a/drivers/net/wireless/ath/ath6kl/core.c
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -20,9 +20,11 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/moduleparam.h> 21#include <linux/moduleparam.h>
22#include <linux/export.h> 22#include <linux/export.h>
23#include <linux/vmalloc.h>
23 24
24#include "debug.h" 25#include "debug.h"
25#include "hif-ops.h" 26#include "hif-ops.h"
27#include "htc-ops.h"
26#include "cfg80211.h" 28#include "cfg80211.h"
27 29
28unsigned int debug_mask; 30unsigned int debug_mask;
@@ -39,12 +41,36 @@ module_param(uart_debug, uint, 0644);
39module_param(ath6kl_p2p, uint, 0644); 41module_param(ath6kl_p2p, uint, 0644);
40module_param(testmode, uint, 0644); 42module_param(testmode, uint, 0644);
41 43
42int ath6kl_core_init(struct ath6kl *ar) 44void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
45{
46 ath6kl_htc_tx_complete(ar, skb);
47}
48EXPORT_SYMBOL(ath6kl_core_tx_complete);
49
50void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe)
51{
52 ath6kl_htc_rx_complete(ar, skb, pipe);
53}
54EXPORT_SYMBOL(ath6kl_core_rx_complete);
55
56int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
43{ 57{
44 struct ath6kl_bmi_target_info targ_info; 58 struct ath6kl_bmi_target_info targ_info;
45 struct net_device *ndev; 59 struct net_device *ndev;
46 int ret = 0, i; 60 int ret = 0, i;
47 61
62 switch (htc_type) {
63 case ATH6KL_HTC_TYPE_MBOX:
64 ath6kl_htc_mbox_attach(ar);
65 break;
66 case ATH6KL_HTC_TYPE_PIPE:
67 ath6kl_htc_pipe_attach(ar);
68 break;
69 default:
70 WARN_ON(1);
71 return -ENOMEM;
72 }
73
48 ar->ath6kl_wq = create_singlethread_workqueue("ath6kl"); 74 ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
49 if (!ar->ath6kl_wq) 75 if (!ar->ath6kl_wq)
50 return -ENOMEM; 76 return -ENOMEM;
@@ -280,7 +306,7 @@ void ath6kl_core_cleanup(struct ath6kl *ar)
280 306
281 kfree(ar->fw_board); 307 kfree(ar->fw_board);
282 kfree(ar->fw_otp); 308 kfree(ar->fw_otp);
283 kfree(ar->fw); 309 vfree(ar->fw);
284 kfree(ar->fw_patch); 310 kfree(ar->fw_patch);
285 kfree(ar->fw_testscript); 311 kfree(ar->fw_testscript);
286 312
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index f1dd8906be45..9d67964a51dd 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -91,6 +91,15 @@ enum ath6kl_fw_capability {
91 */ 91 */
92 ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, 92 ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
93 93
94 /*
95 * Firmware has support to cleanup inactive stations
96 * in AP mode.
97 */
98 ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT,
99
100 /* Firmware has support to override rsn cap of rsn ie */
101 ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
102
94 /* this needs to be last */ 103 /* this needs to be last */
95 ATH6KL_FW_CAPABILITY_MAX, 104 ATH6KL_FW_CAPABILITY_MAX,
96}; 105};
@@ -205,6 +214,8 @@ struct ath6kl_fw_ie {
205#define ATH6KL_CONF_ENABLE_TX_BURST BIT(3) 214#define ATH6KL_CONF_ENABLE_TX_BURST BIT(3)
206#define ATH6KL_CONF_UART_DEBUG BIT(4) 215#define ATH6KL_CONF_UART_DEBUG BIT(4)
207 216
217#define P2P_WILDCARD_SSID_LEN 7 /* DIRECT- */
218
208enum wlan_low_pwr_state { 219enum wlan_low_pwr_state {
209 WLAN_POWER_STATE_ON, 220 WLAN_POWER_STATE_ON,
210 WLAN_POWER_STATE_CUT_PWR, 221 WLAN_POWER_STATE_CUT_PWR,
@@ -454,6 +465,11 @@ enum ath6kl_hif_type {
454 ATH6KL_HIF_TYPE_USB, 465 ATH6KL_HIF_TYPE_USB,
455}; 466};
456 467
468enum ath6kl_htc_type {
469 ATH6KL_HTC_TYPE_MBOX,
470 ATH6KL_HTC_TYPE_PIPE,
471};
472
457/* Max number of filters that hw supports */ 473/* Max number of filters that hw supports */
458#define ATH6K_MAX_MC_FILTERS_PER_LIST 7 474#define ATH6K_MAX_MC_FILTERS_PER_LIST 7
459struct ath6kl_mc_filter { 475struct ath6kl_mc_filter {
@@ -461,6 +477,12 @@ struct ath6kl_mc_filter {
461 char hw_addr[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE]; 477 char hw_addr[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE];
462}; 478};
463 479
480struct ath6kl_htcap {
481 bool ht_enable;
482 u8 ampdu_factor;
483 unsigned short cap_info;
484};
485
464/* 486/*
465 * Driver's maximum limit, note that some firmwares support only one vif 487 * Driver's maximum limit, note that some firmwares support only one vif
466 * and the runtime (current) limit must be checked from ar->vif_max. 488 * and the runtime (current) limit must be checked from ar->vif_max.
@@ -509,6 +531,7 @@ struct ath6kl_vif {
509 struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1]; 531 struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
510 struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1]; 532 struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
511 struct aggr_info *aggr_cntxt; 533 struct aggr_info *aggr_cntxt;
534 struct ath6kl_htcap htcap;
512 535
513 struct timer_list disconnect_timer; 536 struct timer_list disconnect_timer;
514 struct timer_list sched_scan_timer; 537 struct timer_list sched_scan_timer;
@@ -521,6 +544,8 @@ struct ath6kl_vif {
521 u32 send_action_id; 544 u32 send_action_id;
522 bool probe_req_report; 545 bool probe_req_report;
523 u16 next_chan; 546 u16 next_chan;
547 enum nl80211_channel_type next_ch_type;
548 enum ieee80211_band next_ch_band;
524 u16 assoc_bss_beacon_int; 549 u16 assoc_bss_beacon_int;
525 u16 listen_intvl_t; 550 u16 listen_intvl_t;
526 u16 bmiss_time_t; 551 u16 bmiss_time_t;
@@ -568,6 +593,7 @@ struct ath6kl {
568 593
569 struct ath6kl_bmi bmi; 594 struct ath6kl_bmi bmi;
570 const struct ath6kl_hif_ops *hif_ops; 595 const struct ath6kl_hif_ops *hif_ops;
596 const struct ath6kl_htc_ops *htc_ops;
571 struct wmi *wmi; 597 struct wmi *wmi;
572 int tx_pending[ENDPOINT_MAX]; 598 int tx_pending[ENDPOINT_MAX];
573 int total_tx_data_pend; 599 int total_tx_data_pend;
@@ -746,7 +772,8 @@ void init_netdev(struct net_device *dev);
746void ath6kl_cookie_init(struct ath6kl *ar); 772void ath6kl_cookie_init(struct ath6kl *ar);
747void ath6kl_cookie_cleanup(struct ath6kl *ar); 773void ath6kl_cookie_cleanup(struct ath6kl *ar);
748void ath6kl_rx(struct htc_target *target, struct htc_packet *packet); 774void ath6kl_rx(struct htc_target *target, struct htc_packet *packet);
749void ath6kl_tx_complete(void *context, struct list_head *packet_queue); 775void ath6kl_tx_complete(struct htc_target *context,
776 struct list_head *packet_queue);
750enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, 777enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
751 struct htc_packet *packet); 778 struct htc_packet *packet);
752void ath6kl_stop_txrx(struct ath6kl *ar); 779void ath6kl_stop_txrx(struct ath6kl *ar);
@@ -821,8 +848,11 @@ int ath6kl_init_hw_params(struct ath6kl *ar);
821 848
822void ath6kl_check_wow_status(struct ath6kl *ar); 849void ath6kl_check_wow_status(struct ath6kl *ar);
823 850
851void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb);
852void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe);
853
824struct ath6kl *ath6kl_core_create(struct device *dev); 854struct ath6kl *ath6kl_core_create(struct device *dev);
825int ath6kl_core_init(struct ath6kl *ar); 855int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type);
826void ath6kl_core_cleanup(struct ath6kl *ar); 856void ath6kl_core_cleanup(struct ath6kl *ar);
827void ath6kl_core_destroy(struct ath6kl *ar); 857void ath6kl_core_destroy(struct ath6kl *ar);
828 858
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index d01403a263ff..1b76aff78508 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -616,6 +616,12 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
616 "Num disconnects", tgt_stats->cs_discon_cnt); 616 "Num disconnects", tgt_stats->cs_discon_cnt);
617 len += scnprintf(buf + len, buf_len - len, "%20s %10d\n", 617 len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
618 "Beacon avg rssi", tgt_stats->cs_ave_beacon_rssi); 618 "Beacon avg rssi", tgt_stats->cs_ave_beacon_rssi);
619 len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
620 "ARP pkt received", tgt_stats->arp_received);
621 len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
622 "ARP pkt matched", tgt_stats->arp_matched);
623 len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
624 "ARP pkt replied", tgt_stats->arp_replied);
619 625
620 if (len > buf_len) 626 if (len > buf_len)
621 len = buf_len; 627 len = buf_len;
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 1803a0baae82..49639d8266c2 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -43,6 +43,7 @@ enum ATH6K_DEBUG_MASK {
43 ATH6KL_DBG_WMI_DUMP = BIT(19), 43 ATH6KL_DBG_WMI_DUMP = BIT(19),
44 ATH6KL_DBG_SUSPEND = BIT(20), 44 ATH6KL_DBG_SUSPEND = BIT(20),
45 ATH6KL_DBG_USB = BIT(21), 45 ATH6KL_DBG_USB = BIT(21),
46 ATH6KL_DBG_USB_BULK = BIT(22),
46 ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */ 47 ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */
47}; 48};
48 49
diff --git a/drivers/net/wireless/ath/ath6kl/hif-ops.h b/drivers/net/wireless/ath/ath6kl/hif-ops.h
index fd84086638e3..8c9e72d5250d 100644
--- a/drivers/net/wireless/ath/ath6kl/hif-ops.h
+++ b/drivers/net/wireless/ath/ath6kl/hif-ops.h
@@ -150,4 +150,38 @@ static inline void ath6kl_hif_stop(struct ath6kl *ar)
150 ar->hif_ops->stop(ar); 150 ar->hif_ops->stop(ar);
151} 151}
152 152
153static inline int ath6kl_hif_pipe_send(struct ath6kl *ar,
154 u8 pipe, struct sk_buff *hdr_buf,
155 struct sk_buff *buf)
156{
157 ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe send\n");
158
159 return ar->hif_ops->pipe_send(ar, pipe, hdr_buf, buf);
160}
161
162static inline void ath6kl_hif_pipe_get_default(struct ath6kl *ar,
163 u8 *ul_pipe, u8 *dl_pipe)
164{
165 ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get default\n");
166
167 ar->hif_ops->pipe_get_default(ar, ul_pipe, dl_pipe);
168}
169
170static inline int ath6kl_hif_pipe_map_service(struct ath6kl *ar,
171 u16 service_id, u8 *ul_pipe,
172 u8 *dl_pipe)
173{
174 ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get default\n");
175
176 return ar->hif_ops->pipe_map_service(ar, service_id, ul_pipe, dl_pipe);
177}
178
179static inline u16 ath6kl_hif_pipe_get_free_queue_number(struct ath6kl *ar,
180 u8 pipe)
181{
182 ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get free queue number\n");
183
184 return ar->hif_ops->pipe_get_free_queue_number(ar, pipe);
185}
186
153#endif 187#endif
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
index 20ed6b73517b..61f6b21fb0ae 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.h
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -256,6 +256,12 @@ struct ath6kl_hif_ops {
256 int (*power_on)(struct ath6kl *ar); 256 int (*power_on)(struct ath6kl *ar);
257 int (*power_off)(struct ath6kl *ar); 257 int (*power_off)(struct ath6kl *ar);
258 void (*stop)(struct ath6kl *ar); 258 void (*stop)(struct ath6kl *ar);
259 int (*pipe_send)(struct ath6kl *ar, u8 pipe, struct sk_buff *hdr_buf,
260 struct sk_buff *buf);
261 void (*pipe_get_default)(struct ath6kl *ar, u8 *pipe_ul, u8 *pipe_dl);
262 int (*pipe_map_service)(struct ath6kl *ar, u16 service_id, u8 *pipe_ul,
263 u8 *pipe_dl);
264 u16 (*pipe_get_free_queue_number)(struct ath6kl *ar, u8 pipe);
259}; 265};
260 266
261int ath6kl_hif_setup(struct ath6kl_device *dev); 267int ath6kl_hif_setup(struct ath6kl_device *dev);
diff --git a/drivers/net/wireless/ath/ath6kl/htc-ops.h b/drivers/net/wireless/ath/ath6kl/htc-ops.h
new file mode 100644
index 000000000000..2d4eed55cfd1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc-ops.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef HTC_OPS_H
18#define HTC_OPS_H
19
20#include "htc.h"
21#include "debug.h"
22
23static inline void *ath6kl_htc_create(struct ath6kl *ar)
24{
25 return ar->htc_ops->create(ar);
26}
27
28static inline int ath6kl_htc_wait_target(struct htc_target *target)
29{
30 return target->dev->ar->htc_ops->wait_target(target);
31}
32
33static inline int ath6kl_htc_start(struct htc_target *target)
34{
35 return target->dev->ar->htc_ops->start(target);
36}
37
38static inline int ath6kl_htc_conn_service(struct htc_target *target,
39 struct htc_service_connect_req *req,
40 struct htc_service_connect_resp *resp)
41{
42 return target->dev->ar->htc_ops->conn_service(target, req, resp);
43}
44
45static inline int ath6kl_htc_tx(struct htc_target *target,
46 struct htc_packet *packet)
47{
48 return target->dev->ar->htc_ops->tx(target, packet);
49}
50
51static inline void ath6kl_htc_stop(struct htc_target *target)
52{
53 return target->dev->ar->htc_ops->stop(target);
54}
55
56static inline void ath6kl_htc_cleanup(struct htc_target *target)
57{
58 return target->dev->ar->htc_ops->cleanup(target);
59}
60
61static inline void ath6kl_htc_flush_txep(struct htc_target *target,
62 enum htc_endpoint_id endpoint,
63 u16 tag)
64{
65 return target->dev->ar->htc_ops->flush_txep(target, endpoint, tag);
66}
67
68static inline void ath6kl_htc_flush_rx_buf(struct htc_target *target)
69{
70 return target->dev->ar->htc_ops->flush_rx_buf(target);
71}
72
73static inline void ath6kl_htc_activity_changed(struct htc_target *target,
74 enum htc_endpoint_id endpoint,
75 bool active)
76{
77 return target->dev->ar->htc_ops->activity_changed(target, endpoint,
78 active);
79}
80
81static inline int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
82 enum htc_endpoint_id endpoint)
83{
84 return target->dev->ar->htc_ops->get_rxbuf_num(target, endpoint);
85}
86
87static inline int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
88 struct list_head *pktq)
89{
90 return target->dev->ar->htc_ops->add_rxbuf_multiple(target, pktq);
91}
92
93static inline int ath6kl_htc_credit_setup(struct htc_target *target,
94 struct ath6kl_htc_credit_info *info)
95{
96 return target->dev->ar->htc_ops->credit_setup(target, info);
97}
98
99static inline void ath6kl_htc_tx_complete(struct ath6kl *ar,
100 struct sk_buff *skb)
101{
102 ar->htc_ops->tx_complete(ar, skb);
103}
104
105
106static inline void ath6kl_htc_rx_complete(struct ath6kl *ar,
107 struct sk_buff *skb, u8 pipe)
108{
109 ar->htc_ops->rx_complete(ar, skb, pipe);
110}
111
112
113#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
index 5027ccc36b62..a2c8ff809793 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.h
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -25,6 +25,7 @@
25/* send direction */ 25/* send direction */
26#define HTC_FLAGS_NEED_CREDIT_UPDATE (1 << 0) 26#define HTC_FLAGS_NEED_CREDIT_UPDATE (1 << 0)
27#define HTC_FLAGS_SEND_BUNDLE (1 << 1) 27#define HTC_FLAGS_SEND_BUNDLE (1 << 1)
28#define HTC_FLAGS_TX_FIXUP_NETBUF (1 << 2)
28 29
29/* receive direction */ 30/* receive direction */
30#define HTC_FLG_RX_UNUSED (1 << 0) 31#define HTC_FLG_RX_UNUSED (1 << 0)
@@ -56,6 +57,10 @@
56#define HTC_CONN_FLGS_THRESH_LVL_THREE_QUAT 0x2 57#define HTC_CONN_FLGS_THRESH_LVL_THREE_QUAT 0x2
57#define HTC_CONN_FLGS_REDUCE_CRED_DRIB 0x4 58#define HTC_CONN_FLGS_REDUCE_CRED_DRIB 0x4
58#define HTC_CONN_FLGS_THRESH_MASK 0x3 59#define HTC_CONN_FLGS_THRESH_MASK 0x3
60/* disable credit flow control on a specific service */
61#define HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL (1 << 3)
62#define HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT 8
63#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK 0xFF00
59 64
60/* connect response status codes */ 65/* connect response status codes */
61#define HTC_SERVICE_SUCCESS 0 66#define HTC_SERVICE_SUCCESS 0
@@ -75,6 +80,7 @@
75#define HTC_RECORD_LOOKAHEAD_BUNDLE 3 80#define HTC_RECORD_LOOKAHEAD_BUNDLE 3
76 81
77#define HTC_SETUP_COMP_FLG_RX_BNDL_EN (1 << 0) 82#define HTC_SETUP_COMP_FLG_RX_BNDL_EN (1 << 0)
83#define HTC_SETUP_COMP_FLG_DISABLE_TX_CREDIT_FLOW (1 << 1)
78 84
79#define MAKE_SERVICE_ID(group, index) \ 85#define MAKE_SERVICE_ID(group, index) \
80 (int)(((int)group << 8) | (int)(index)) 86 (int)(((int)group << 8) | (int)(index))
@@ -109,6 +115,8 @@
109 115
110/* HTC operational parameters */ 116/* HTC operational parameters */
111#define HTC_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */ 117#define HTC_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */
118#define HTC_TARGET_RESPONSE_POLL_WAIT 10
119#define HTC_TARGET_RESPONSE_POLL_COUNT 200
112#define HTC_TARGET_DEBUG_INTR_MASK 0x01 120#define HTC_TARGET_DEBUG_INTR_MASK 0x01
113#define HTC_TARGET_CREDIT_INTR_MASK 0xF0 121#define HTC_TARGET_CREDIT_INTR_MASK 0xF0
114 122
@@ -128,6 +136,7 @@
128 136
129#define HTC_RECV_WAIT_BUFFERS (1 << 0) 137#define HTC_RECV_WAIT_BUFFERS (1 << 0)
130#define HTC_OP_STATE_STOPPING (1 << 0) 138#define HTC_OP_STATE_STOPPING (1 << 0)
139#define HTC_OP_STATE_SETUP_COMPLETE (1 << 1)
131 140
132/* 141/*
133 * The frame header length and message formats defined herein were selected 142 * The frame header length and message formats defined herein were selected
@@ -311,6 +320,14 @@ struct htc_packet {
311 320
312 void (*completion) (struct htc_target *, struct htc_packet *); 321 void (*completion) (struct htc_target *, struct htc_packet *);
313 struct htc_target *context; 322 struct htc_target *context;
323
324 /*
325 * optimization for network-oriented data, the HTC packet
326 * can pass the network buffer corresponding to the HTC packet
327 * lower layers may optimized the transfer knowing this is
328 * a network buffer
329 */
330 struct sk_buff *skb;
314}; 331};
315 332
316enum htc_send_full_action { 333enum htc_send_full_action {
@@ -319,12 +336,14 @@ enum htc_send_full_action {
319}; 336};
320 337
321struct htc_ep_callbacks { 338struct htc_ep_callbacks {
339 void (*tx_complete) (struct htc_target *, struct htc_packet *);
322 void (*rx) (struct htc_target *, struct htc_packet *); 340 void (*rx) (struct htc_target *, struct htc_packet *);
323 void (*rx_refill) (struct htc_target *, enum htc_endpoint_id endpoint); 341 void (*rx_refill) (struct htc_target *, enum htc_endpoint_id endpoint);
324 enum htc_send_full_action (*tx_full) (struct htc_target *, 342 enum htc_send_full_action (*tx_full) (struct htc_target *,
325 struct htc_packet *); 343 struct htc_packet *);
326 struct htc_packet *(*rx_allocthresh) (struct htc_target *, 344 struct htc_packet *(*rx_allocthresh) (struct htc_target *,
327 enum htc_endpoint_id, int); 345 enum htc_endpoint_id, int);
346 void (*tx_comp_multi) (struct htc_target *, struct list_head *);
328 int rx_alloc_thresh; 347 int rx_alloc_thresh;
329 int rx_refill_thresh; 348 int rx_refill_thresh;
330}; 349};
@@ -502,6 +521,13 @@ struct htc_endpoint {
502 u32 conn_flags; 521 u32 conn_flags;
503 struct htc_endpoint_stats ep_st; 522 struct htc_endpoint_stats ep_st;
504 u16 tx_drop_packet_threshold; 523 u16 tx_drop_packet_threshold;
524
525 struct {
526 u8 pipeid_ul;
527 u8 pipeid_dl;
528 struct list_head tx_lookup_queue;
529 bool tx_credit_flow_enabled;
530 } pipe;
505}; 531};
506 532
507struct htc_control_buffer { 533struct htc_control_buffer {
@@ -509,6 +535,42 @@ struct htc_control_buffer {
509 u8 *buf; 535 u8 *buf;
510}; 536};
511 537
538struct htc_pipe_txcredit_alloc {
539 u16 service_id;
540 u8 credit_alloc;
541};
542
543enum htc_send_queue_result {
544 HTC_SEND_QUEUE_OK = 0, /* packet was queued */
545 HTC_SEND_QUEUE_DROP = 1, /* this packet should be dropped */
546};
547
548struct ath6kl_htc_ops {
549 void* (*create)(struct ath6kl *ar);
550 int (*wait_target)(struct htc_target *target);
551 int (*start)(struct htc_target *target);
552 int (*conn_service)(struct htc_target *target,
553 struct htc_service_connect_req *req,
554 struct htc_service_connect_resp *resp);
555 int (*tx)(struct htc_target *target, struct htc_packet *packet);
556 void (*stop)(struct htc_target *target);
557 void (*cleanup)(struct htc_target *target);
558 void (*flush_txep)(struct htc_target *target,
559 enum htc_endpoint_id endpoint, u16 tag);
560 void (*flush_rx_buf)(struct htc_target *target);
561 void (*activity_changed)(struct htc_target *target,
562 enum htc_endpoint_id endpoint,
563 bool active);
564 int (*get_rxbuf_num)(struct htc_target *target,
565 enum htc_endpoint_id endpoint);
566 int (*add_rxbuf_multiple)(struct htc_target *target,
567 struct list_head *pktq);
568 int (*credit_setup)(struct htc_target *target,
569 struct ath6kl_htc_credit_info *cred_info);
570 int (*tx_complete)(struct ath6kl *ar, struct sk_buff *skb);
571 int (*rx_complete)(struct ath6kl *ar, struct sk_buff *skb, u8 pipe);
572};
573
512struct ath6kl_device; 574struct ath6kl_device;
513 575
514/* our HTC target state */ 576/* our HTC target state */
@@ -557,36 +619,19 @@ struct htc_target {
557 619
558 /* counts the number of Tx without bundling continously per AC */ 620 /* counts the number of Tx without bundling continously per AC */
559 u32 ac_tx_count[WMM_NUM_AC]; 621 u32 ac_tx_count[WMM_NUM_AC];
622
623 struct {
624 struct htc_packet *htc_packet_pool;
625 u8 ctrl_response_buf[HTC_MAX_CTRL_MSG_LEN];
626 int ctrl_response_len;
627 bool ctrl_response_valid;
628 struct htc_pipe_txcredit_alloc txcredit_alloc[ENDPOINT_MAX];
629 } pipe;
560}; 630};
561 631
562void *ath6kl_htc_create(struct ath6kl *ar);
563void ath6kl_htc_set_credit_dist(struct htc_target *target,
564 struct ath6kl_htc_credit_info *cred_info,
565 u16 svc_pri_order[], int len);
566int ath6kl_htc_wait_target(struct htc_target *target);
567int ath6kl_htc_start(struct htc_target *target);
568int ath6kl_htc_conn_service(struct htc_target *target,
569 struct htc_service_connect_req *req,
570 struct htc_service_connect_resp *resp);
571int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet);
572void ath6kl_htc_stop(struct htc_target *target);
573void ath6kl_htc_cleanup(struct htc_target *target);
574void ath6kl_htc_flush_txep(struct htc_target *target,
575 enum htc_endpoint_id endpoint, u16 tag);
576void ath6kl_htc_flush_rx_buf(struct htc_target *target);
577void ath6kl_htc_indicate_activity_change(struct htc_target *target,
578 enum htc_endpoint_id endpoint,
579 bool active);
580int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
581 enum htc_endpoint_id endpoint);
582int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
583 struct list_head *pktq);
584int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, 632int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
585 u32 msg_look_ahead, int *n_pkts); 633 u32 msg_look_ahead, int *n_pkts);
586 634
587int ath6kl_credit_setup(void *htc_handle,
588 struct ath6kl_htc_credit_info *cred_info);
589
590static inline void set_htc_pkt_info(struct htc_packet *packet, void *context, 635static inline void set_htc_pkt_info(struct htc_packet *packet, void *context,
591 u8 *buf, unsigned int len, 636 u8 *buf, unsigned int len,
592 enum htc_endpoint_id eid, u16 tag) 637 enum htc_endpoint_id eid, u16 tag)
@@ -626,4 +671,7 @@ static inline int get_queue_depth(struct list_head *queue)
626 return depth; 671 return depth;
627} 672}
628 673
674void ath6kl_htc_pipe_attach(struct ath6kl *ar);
675void ath6kl_htc_mbox_attach(struct ath6kl *ar);
676
629#endif 677#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index 4849d99cce77..065e61516d7a 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -23,6 +23,14 @@
23 23
24#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask)) 24#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
25 25
26static void ath6kl_htc_mbox_cleanup(struct htc_target *target);
27static void ath6kl_htc_mbox_stop(struct htc_target *target);
28static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
29 struct list_head *pkt_queue);
30static void ath6kl_htc_set_credit_dist(struct htc_target *target,
31 struct ath6kl_htc_credit_info *cred_info,
32 u16 svc_pri_order[], int len);
33
26/* threshold to re-enable Tx bundling for an AC*/ 34/* threshold to re-enable Tx bundling for an AC*/
27#define TX_RESUME_BUNDLE_THRESHOLD 1500 35#define TX_RESUME_BUNDLE_THRESHOLD 1500
28 36
@@ -130,8 +138,8 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
130} 138}
131 139
132/* initialize and setup credit distribution */ 140/* initialize and setup credit distribution */
133int ath6kl_credit_setup(void *htc_handle, 141static int ath6kl_htc_mbox_credit_setup(struct htc_target *htc_target,
134 struct ath6kl_htc_credit_info *cred_info) 142 struct ath6kl_htc_credit_info *cred_info)
135{ 143{
136 u16 servicepriority[5]; 144 u16 servicepriority[5];
137 145
@@ -144,7 +152,7 @@ int ath6kl_credit_setup(void *htc_handle,
144 servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */ 152 servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
145 153
146 /* set priority list */ 154 /* set priority list */
147 ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5); 155 ath6kl_htc_set_credit_dist(htc_target, cred_info, servicepriority, 5);
148 156
149 return 0; 157 return 0;
150} 158}
@@ -432,7 +440,7 @@ static void htc_tx_complete(struct htc_endpoint *endpoint,
432 "htc tx complete ep %d pkts %d\n", 440 "htc tx complete ep %d pkts %d\n",
433 endpoint->eid, get_queue_depth(txq)); 441 endpoint->eid, get_queue_depth(txq));
434 442
435 ath6kl_tx_complete(endpoint->target->dev->ar, txq); 443 ath6kl_tx_complete(endpoint->target, txq);
436} 444}
437 445
438static void htc_tx_comp_handler(struct htc_target *target, 446static void htc_tx_comp_handler(struct htc_target *target,
@@ -1065,7 +1073,7 @@ static int htc_setup_tx_complete(struct htc_target *target)
1065 return status; 1073 return status;
1066} 1074}
1067 1075
1068void ath6kl_htc_set_credit_dist(struct htc_target *target, 1076static void ath6kl_htc_set_credit_dist(struct htc_target *target,
1069 struct ath6kl_htc_credit_info *credit_info, 1077 struct ath6kl_htc_credit_info *credit_info,
1070 u16 srvc_pri_order[], int list_len) 1078 u16 srvc_pri_order[], int list_len)
1071{ 1079{
@@ -1093,7 +1101,8 @@ void ath6kl_htc_set_credit_dist(struct htc_target *target,
1093 } 1101 }
1094} 1102}
1095 1103
1096int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet) 1104static int ath6kl_htc_mbox_tx(struct htc_target *target,
1105 struct htc_packet *packet)
1097{ 1106{
1098 struct htc_endpoint *endpoint; 1107 struct htc_endpoint *endpoint;
1099 struct list_head queue; 1108 struct list_head queue;
@@ -1121,7 +1130,7 @@ int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
1121} 1130}
1122 1131
1123/* flush endpoint TX queue */ 1132/* flush endpoint TX queue */
1124void ath6kl_htc_flush_txep(struct htc_target *target, 1133static void ath6kl_htc_mbox_flush_txep(struct htc_target *target,
1125 enum htc_endpoint_id eid, u16 tag) 1134 enum htc_endpoint_id eid, u16 tag)
1126{ 1135{
1127 struct htc_packet *packet, *tmp_pkt; 1136 struct htc_packet *packet, *tmp_pkt;
@@ -1173,12 +1182,13 @@ static void ath6kl_htc_flush_txep_all(struct htc_target *target)
1173 if (endpoint->svc_id == 0) 1182 if (endpoint->svc_id == 0)
1174 /* not in use.. */ 1183 /* not in use.. */
1175 continue; 1184 continue;
1176 ath6kl_htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL); 1185 ath6kl_htc_mbox_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
1177 } 1186 }
1178} 1187}
1179 1188
1180void ath6kl_htc_indicate_activity_change(struct htc_target *target, 1189static void ath6kl_htc_mbox_activity_changed(struct htc_target *target,
1181 enum htc_endpoint_id eid, bool active) 1190 enum htc_endpoint_id eid,
1191 bool active)
1182{ 1192{
1183 struct htc_endpoint *endpoint = &target->endpoint[eid]; 1193 struct htc_endpoint *endpoint = &target->endpoint[eid];
1184 bool dist = false; 1194 bool dist = false;
@@ -1246,7 +1256,7 @@ static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
1246 1256
1247 INIT_LIST_HEAD(&queue); 1257 INIT_LIST_HEAD(&queue);
1248 list_add_tail(&packet->list, &queue); 1258 list_add_tail(&packet->list, &queue);
1249 return ath6kl_htc_add_rxbuf_multiple(target, &queue); 1259 return ath6kl_htc_mbox_add_rxbuf_multiple(target, &queue);
1250} 1260}
1251 1261
1252static void htc_reclaim_rxbuf(struct htc_target *target, 1262static void htc_reclaim_rxbuf(struct htc_target *target,
@@ -1353,7 +1363,9 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
1353 sizeof(*htc_hdr)); 1363 sizeof(*htc_hdr));
1354 1364
1355 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) { 1365 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
1356 ath6kl_warn("Rx buffer requested with invalid length\n"); 1366 ath6kl_warn("Rx buffer requested with invalid length htc_hdr:eid %d, flags 0x%x, len %d\n",
1367 htc_hdr->eid, htc_hdr->flags,
1368 le16_to_cpu(htc_hdr->payld_len));
1357 return -EINVAL; 1369 return -EINVAL;
1358 } 1370 }
1359 1371
@@ -2288,7 +2300,7 @@ fail_ctrl_rx:
2288 return NULL; 2300 return NULL;
2289} 2301}
2290 2302
2291int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, 2303static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
2292 struct list_head *pkt_queue) 2304 struct list_head *pkt_queue)
2293{ 2305{
2294 struct htc_endpoint *endpoint; 2306 struct htc_endpoint *endpoint;
@@ -2350,7 +2362,7 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
2350 return status; 2362 return status;
2351} 2363}
2352 2364
2353void ath6kl_htc_flush_rx_buf(struct htc_target *target) 2365static void ath6kl_htc_mbox_flush_rx_buf(struct htc_target *target)
2354{ 2366{
2355 struct htc_endpoint *endpoint; 2367 struct htc_endpoint *endpoint;
2356 struct htc_packet *packet, *tmp_pkt; 2368 struct htc_packet *packet, *tmp_pkt;
@@ -2392,7 +2404,7 @@ void ath6kl_htc_flush_rx_buf(struct htc_target *target)
2392 } 2404 }
2393} 2405}
2394 2406
2395int ath6kl_htc_conn_service(struct htc_target *target, 2407static int ath6kl_htc_mbox_conn_service(struct htc_target *target,
2396 struct htc_service_connect_req *conn_req, 2408 struct htc_service_connect_req *conn_req,
2397 struct htc_service_connect_resp *conn_resp) 2409 struct htc_service_connect_resp *conn_resp)
2398{ 2410{
@@ -2564,7 +2576,7 @@ static void reset_ep_state(struct htc_target *target)
2564 INIT_LIST_HEAD(&target->cred_dist_list); 2576 INIT_LIST_HEAD(&target->cred_dist_list);
2565} 2577}
2566 2578
2567int ath6kl_htc_get_rxbuf_num(struct htc_target *target, 2579static int ath6kl_htc_mbox_get_rxbuf_num(struct htc_target *target,
2568 enum htc_endpoint_id endpoint) 2580 enum htc_endpoint_id endpoint)
2569{ 2581{
2570 int num; 2582 int num;
@@ -2624,7 +2636,7 @@ static void htc_setup_msg_bndl(struct htc_target *target)
2624 } 2636 }
2625} 2637}
2626 2638
2627int ath6kl_htc_wait_target(struct htc_target *target) 2639static int ath6kl_htc_mbox_wait_target(struct htc_target *target)
2628{ 2640{
2629 struct htc_packet *packet = NULL; 2641 struct htc_packet *packet = NULL;
2630 struct htc_ready_ext_msg *rdy_msg; 2642 struct htc_ready_ext_msg *rdy_msg;
@@ -2693,12 +2705,12 @@ int ath6kl_htc_wait_target(struct htc_target *target)
2693 connect.svc_id = HTC_CTRL_RSVD_SVC; 2705 connect.svc_id = HTC_CTRL_RSVD_SVC;
2694 2706
2695 /* connect fake service */ 2707 /* connect fake service */
2696 status = ath6kl_htc_conn_service((void *)target, &connect, &resp); 2708 status = ath6kl_htc_mbox_conn_service((void *)target, &connect, &resp);
2697 2709
2698 if (status) 2710 if (status)
2699 /* 2711 /*
2700 * FIXME: this call doesn't make sense, the caller should 2712 * FIXME: this call doesn't make sense, the caller should
2701 * call ath6kl_htc_cleanup() when it wants remove htc 2713 * call ath6kl_htc_mbox_cleanup() when it wants remove htc
2702 */ 2714 */
2703 ath6kl_hif_cleanup_scatter(target->dev->ar); 2715 ath6kl_hif_cleanup_scatter(target->dev->ar);
2704 2716
@@ -2715,7 +2727,7 @@ fail_wait_target:
2715 * Start HTC, enable interrupts and let the target know 2727 * Start HTC, enable interrupts and let the target know
2716 * host has finished setup. 2728 * host has finished setup.
2717 */ 2729 */
2718int ath6kl_htc_start(struct htc_target *target) 2730static int ath6kl_htc_mbox_start(struct htc_target *target)
2719{ 2731{
2720 struct htc_packet *packet; 2732 struct htc_packet *packet;
2721 int status; 2733 int status;
@@ -2752,7 +2764,7 @@ int ath6kl_htc_start(struct htc_target *target)
2752 status = ath6kl_hif_unmask_intrs(target->dev); 2764 status = ath6kl_hif_unmask_intrs(target->dev);
2753 2765
2754 if (status) 2766 if (status)
2755 ath6kl_htc_stop(target); 2767 ath6kl_htc_mbox_stop(target);
2756 2768
2757 return status; 2769 return status;
2758} 2770}
@@ -2796,7 +2808,7 @@ static int ath6kl_htc_reset(struct htc_target *target)
2796} 2808}
2797 2809
2798/* htc_stop: stop interrupt reception, and flush all queued buffers */ 2810/* htc_stop: stop interrupt reception, and flush all queued buffers */
2799void ath6kl_htc_stop(struct htc_target *target) 2811static void ath6kl_htc_mbox_stop(struct htc_target *target)
2800{ 2812{
2801 spin_lock_bh(&target->htc_lock); 2813 spin_lock_bh(&target->htc_lock);
2802 target->htc_flags |= HTC_OP_STATE_STOPPING; 2814 target->htc_flags |= HTC_OP_STATE_STOPPING;
@@ -2811,12 +2823,12 @@ void ath6kl_htc_stop(struct htc_target *target)
2811 2823
2812 ath6kl_htc_flush_txep_all(target); 2824 ath6kl_htc_flush_txep_all(target);
2813 2825
2814 ath6kl_htc_flush_rx_buf(target); 2826 ath6kl_htc_mbox_flush_rx_buf(target);
2815 2827
2816 ath6kl_htc_reset(target); 2828 ath6kl_htc_reset(target);
2817} 2829}
2818 2830
2819void *ath6kl_htc_create(struct ath6kl *ar) 2831static void *ath6kl_htc_mbox_create(struct ath6kl *ar)
2820{ 2832{
2821 struct htc_target *target = NULL; 2833 struct htc_target *target = NULL;
2822 int status = 0; 2834 int status = 0;
@@ -2857,13 +2869,13 @@ void *ath6kl_htc_create(struct ath6kl *ar)
2857 return target; 2869 return target;
2858 2870
2859err_htc_cleanup: 2871err_htc_cleanup:
2860 ath6kl_htc_cleanup(target); 2872 ath6kl_htc_mbox_cleanup(target);
2861 2873
2862 return NULL; 2874 return NULL;
2863} 2875}
2864 2876
2865/* cleanup the HTC instance */ 2877/* cleanup the HTC instance */
2866void ath6kl_htc_cleanup(struct htc_target *target) 2878static void ath6kl_htc_mbox_cleanup(struct htc_target *target)
2867{ 2879{
2868 struct htc_packet *packet, *tmp_packet; 2880 struct htc_packet *packet, *tmp_packet;
2869 2881
@@ -2888,3 +2900,24 @@ void ath6kl_htc_cleanup(struct htc_target *target)
2888 kfree(target->dev); 2900 kfree(target->dev);
2889 kfree(target); 2901 kfree(target);
2890} 2902}
2903
2904static const struct ath6kl_htc_ops ath6kl_htc_mbox_ops = {
2905 .create = ath6kl_htc_mbox_create,
2906 .wait_target = ath6kl_htc_mbox_wait_target,
2907 .start = ath6kl_htc_mbox_start,
2908 .conn_service = ath6kl_htc_mbox_conn_service,
2909 .tx = ath6kl_htc_mbox_tx,
2910 .stop = ath6kl_htc_mbox_stop,
2911 .cleanup = ath6kl_htc_mbox_cleanup,
2912 .flush_txep = ath6kl_htc_mbox_flush_txep,
2913 .flush_rx_buf = ath6kl_htc_mbox_flush_rx_buf,
2914 .activity_changed = ath6kl_htc_mbox_activity_changed,
2915 .get_rxbuf_num = ath6kl_htc_mbox_get_rxbuf_num,
2916 .add_rxbuf_multiple = ath6kl_htc_mbox_add_rxbuf_multiple,
2917 .credit_setup = ath6kl_htc_mbox_credit_setup,
2918};
2919
2920void ath6kl_htc_mbox_attach(struct ath6kl *ar)
2921{
2922 ar->htc_ops = &ath6kl_htc_mbox_ops;
2923}
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
new file mode 100644
index 000000000000..b277b3446882
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -0,0 +1,1713 @@
1/*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "debug.h"
19#include "hif-ops.h"
20
21#define HTC_PACKET_CONTAINER_ALLOCATION 32
22#define HTC_CONTROL_BUFFER_SIZE (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH)
23
24static int ath6kl_htc_pipe_tx(struct htc_target *handle,
25 struct htc_packet *packet);
26static void ath6kl_htc_pipe_cleanup(struct htc_target *handle);
27
28/* htc pipe tx path */
29static inline void restore_tx_packet(struct htc_packet *packet)
30{
31 if (packet->info.tx.flags & HTC_FLAGS_TX_FIXUP_NETBUF) {
32 skb_pull(packet->skb, sizeof(struct htc_frame_hdr));
33 packet->info.tx.flags &= ~HTC_FLAGS_TX_FIXUP_NETBUF;
34 }
35}
36
37static void do_send_completion(struct htc_endpoint *ep,
38 struct list_head *queue_to_indicate)
39{
40 struct htc_packet *packet;
41
42 if (list_empty(queue_to_indicate)) {
43 /* nothing to indicate */
44 return;
45 }
46
47 if (ep->ep_cb.tx_comp_multi != NULL) {
48 ath6kl_dbg(ATH6KL_DBG_HTC,
49 "%s: calling ep %d, send complete multiple callback (%d pkts)\n",
50 __func__, ep->eid,
51 get_queue_depth(queue_to_indicate));
52 /*
53 * a multiple send complete handler is being used,
54 * pass the queue to the handler
55 */
56 ep->ep_cb.tx_comp_multi(ep->target, queue_to_indicate);
57 /*
58 * all packets are now owned by the callback,
59 * reset queue to be safe
60 */
61 INIT_LIST_HEAD(queue_to_indicate);
62 } else {
63 /* using legacy EpTxComplete */
64 do {
65 packet = list_first_entry(queue_to_indicate,
66 struct htc_packet, list);
67
68 list_del(&packet->list);
69 ath6kl_dbg(ATH6KL_DBG_HTC,
70 "%s: calling ep %d send complete callback on packet 0x%p\n",
71 __func__, ep->eid, packet);
72 ep->ep_cb.tx_complete(ep->target, packet);
73 } while (!list_empty(queue_to_indicate));
74 }
75}
76
77static void send_packet_completion(struct htc_target *target,
78 struct htc_packet *packet)
79{
80 struct htc_endpoint *ep = &target->endpoint[packet->endpoint];
81 struct list_head container;
82
83 restore_tx_packet(packet);
84 INIT_LIST_HEAD(&container);
85 list_add_tail(&packet->list, &container);
86
87 /* do completion */
88 do_send_completion(ep, &container);
89}
90
91static void get_htc_packet_credit_based(struct htc_target *target,
92 struct htc_endpoint *ep,
93 struct list_head *queue)
94{
95 int credits_required;
96 int remainder;
97 u8 send_flags;
98 struct htc_packet *packet;
99 unsigned int transfer_len;
100
101 /* NOTE : the TX lock is held when this function is called */
102
103 /* loop until we can grab as many packets out of the queue as we can */
104 while (true) {
105 send_flags = 0;
106 if (list_empty(&ep->txq))
107 break;
108
109 /* get packet at head, but don't remove it */
110 packet = list_first_entry(&ep->txq, struct htc_packet, list);
111 if (packet == NULL)
112 break;
113
114 ath6kl_dbg(ATH6KL_DBG_HTC,
115 "%s: got head packet:0x%p , queue depth: %d\n",
116 __func__, packet, get_queue_depth(&ep->txq));
117
118 transfer_len = packet->act_len + HTC_HDR_LENGTH;
119
120 if (transfer_len <= target->tgt_cred_sz) {
121 credits_required = 1;
122 } else {
123 /* figure out how many credits this message requires */
124 credits_required = transfer_len / target->tgt_cred_sz;
125 remainder = transfer_len % target->tgt_cred_sz;
126
127 if (remainder)
128 credits_required++;
129 }
130
131 ath6kl_dbg(ATH6KL_DBG_HTC, "%s: creds required:%d got:%d\n",
132 __func__, credits_required, ep->cred_dist.credits);
133
134 if (ep->eid == ENDPOINT_0) {
135 /*
136 * endpoint 0 is special, it always has a credit and
137 * does not require credit based flow control
138 */
139 credits_required = 0;
140
141 } else {
142
143 if (ep->cred_dist.credits < credits_required)
144 break;
145
146 ep->cred_dist.credits -= credits_required;
147 ep->ep_st.cred_cosumd += credits_required;
148
149 /* check if we need credits back from the target */
150 if (ep->cred_dist.credits <
151 ep->cred_dist.cred_per_msg) {
152 /* tell the target we need credits ASAP! */
153 send_flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
154 ep->ep_st.cred_low_indicate += 1;
155 ath6kl_dbg(ATH6KL_DBG_HTC,
156 "%s: host needs credits\n",
157 __func__);
158 }
159 }
160
161 /* now we can fully dequeue */
162 packet = list_first_entry(&ep->txq, struct htc_packet, list);
163
164 list_del(&packet->list);
165 /* save the number of credits this packet consumed */
166 packet->info.tx.cred_used = credits_required;
167 /* save send flags */
168 packet->info.tx.flags = send_flags;
169 packet->info.tx.seqno = ep->seqno;
170 ep->seqno++;
171 /* queue this packet into the caller's queue */
172 list_add_tail(&packet->list, queue);
173 }
174
175}
176
177static void get_htc_packet(struct htc_target *target,
178 struct htc_endpoint *ep,
179 struct list_head *queue, int resources)
180{
181 struct htc_packet *packet;
182
183 /* NOTE : the TX lock is held when this function is called */
184
185 /* loop until we can grab as many packets out of the queue as we can */
186 while (resources) {
187 if (list_empty(&ep->txq))
188 break;
189
190 packet = list_first_entry(&ep->txq, struct htc_packet, list);
191 list_del(&packet->list);
192
193 ath6kl_dbg(ATH6KL_DBG_HTC,
194 "%s: got packet:0x%p , new queue depth: %d\n",
195 __func__, packet, get_queue_depth(&ep->txq));
196 packet->info.tx.seqno = ep->seqno;
197 packet->info.tx.flags = 0;
198 packet->info.tx.cred_used = 0;
199 ep->seqno++;
200
201 /* queue this packet into the caller's queue */
202 list_add_tail(&packet->list, queue);
203 resources--;
204 }
205}
206
207static int htc_issue_packets(struct htc_target *target,
208 struct htc_endpoint *ep,
209 struct list_head *pkt_queue)
210{
211 int status = 0;
212 u16 payload_len;
213 struct sk_buff *skb;
214 struct htc_frame_hdr *htc_hdr;
215 struct htc_packet *packet;
216
217 ath6kl_dbg(ATH6KL_DBG_HTC,
218 "%s: queue: 0x%p, pkts %d\n", __func__,
219 pkt_queue, get_queue_depth(pkt_queue));
220
221 while (!list_empty(pkt_queue)) {
222 packet = list_first_entry(pkt_queue, struct htc_packet, list);
223 list_del(&packet->list);
224
225 skb = packet->skb;
226 if (!skb) {
227 WARN_ON_ONCE(1);
228 status = -EINVAL;
229 break;
230 }
231
232 payload_len = packet->act_len;
233
234 /* setup HTC frame header */
235 htc_hdr = (struct htc_frame_hdr *) skb_push(skb,
236 sizeof(*htc_hdr));
237 if (!htc_hdr) {
238 WARN_ON_ONCE(1);
239 status = -EINVAL;
240 break;
241 }
242
243 packet->info.tx.flags |= HTC_FLAGS_TX_FIXUP_NETBUF;
244
245 /* Endianess? */
246 put_unaligned((u16) payload_len, &htc_hdr->payld_len);
247 htc_hdr->flags = packet->info.tx.flags;
248 htc_hdr->eid = (u8) packet->endpoint;
249 htc_hdr->ctrl[0] = 0;
250 htc_hdr->ctrl[1] = (u8) packet->info.tx.seqno;
251
252 spin_lock_bh(&target->tx_lock);
253
254 /* store in look up queue to match completions */
255 list_add_tail(&packet->list, &ep->pipe.tx_lookup_queue);
256 ep->ep_st.tx_issued += 1;
257 spin_unlock_bh(&target->tx_lock);
258
259 status = ath6kl_hif_pipe_send(target->dev->ar,
260 ep->pipe.pipeid_ul, NULL, skb);
261
262 if (status != 0) {
263 if (status != -ENOMEM) {
264 /* TODO: if more than 1 endpoint maps to the
265 * same PipeID, it is possible to run out of
266 * resources in the HIF layer.
267 * Don't emit the error
268 */
269 ath6kl_dbg(ATH6KL_DBG_HTC,
270 "%s: failed status:%d\n",
271 __func__, status);
272 }
273 spin_lock_bh(&target->tx_lock);
274 list_del(&packet->list);
275
276 /* reclaim credits */
277 ep->cred_dist.credits += packet->info.tx.cred_used;
278 spin_unlock_bh(&target->tx_lock);
279
280 /* put it back into the callers queue */
281 list_add(&packet->list, pkt_queue);
282 break;
283 }
284
285 }
286
287 if (status != 0) {
288 while (!list_empty(pkt_queue)) {
289 if (status != -ENOMEM) {
290 ath6kl_dbg(ATH6KL_DBG_HTC,
291 "%s: failed pkt:0x%p status:%d\n",
292 __func__, packet, status);
293 }
294
295 packet = list_first_entry(pkt_queue,
296 struct htc_packet, list);
297 list_del(&packet->list);
298 packet->status = status;
299 send_packet_completion(target, packet);
300 }
301 }
302
303 return status;
304}
305
306static enum htc_send_queue_result htc_try_send(struct htc_target *target,
307 struct htc_endpoint *ep,
308 struct list_head *txq)
309{
310 struct list_head send_queue; /* temp queue to hold packets */
311 struct htc_packet *packet, *tmp_pkt;
312 struct ath6kl *ar = target->dev->ar;
313 enum htc_send_full_action action;
314 int tx_resources, overflow, txqueue_depth, i, good_pkts;
315 u8 pipeid;
316
317 ath6kl_dbg(ATH6KL_DBG_HTC, "%s: (queue:0x%p depth:%d)\n",
318 __func__, txq,
319 (txq == NULL) ? 0 : get_queue_depth(txq));
320
321 /* init the local send queue */
322 INIT_LIST_HEAD(&send_queue);
323
324 /*
325 * txq equals to NULL means
326 * caller didn't provide a queue, just wants us to
327 * check queues and send
328 */
329 if (txq != NULL) {
330 if (list_empty(txq)) {
331 /* empty queue */
332 return HTC_SEND_QUEUE_DROP;
333 }
334
335 spin_lock_bh(&target->tx_lock);
336 txqueue_depth = get_queue_depth(&ep->txq);
337 spin_unlock_bh(&target->tx_lock);
338
339 if (txqueue_depth >= ep->max_txq_depth) {
340 /* we've already overflowed */
341 overflow = get_queue_depth(txq);
342 } else {
343 /* get how much we will overflow by */
344 overflow = txqueue_depth;
345 overflow += get_queue_depth(txq);
346 /* get how much we will overflow the TX queue by */
347 overflow -= ep->max_txq_depth;
348 }
349
350 /* if overflow is negative or zero, we are okay */
351 if (overflow > 0) {
352 ath6kl_dbg(ATH6KL_DBG_HTC,
353 "%s: Endpoint %d, TX queue will overflow :%d, Tx Depth:%d, Max:%d\n",
354 __func__, ep->eid, overflow, txqueue_depth,
355 ep->max_txq_depth);
356 }
357 if ((overflow <= 0) ||
358 (ep->ep_cb.tx_full == NULL)) {
359 /*
360 * all packets will fit or caller did not provide send
361 * full indication handler -- just move all of them
362 * to the local send_queue object
363 */
364 list_splice_tail_init(txq, &send_queue);
365 } else {
366 good_pkts = get_queue_depth(txq) - overflow;
367 if (good_pkts < 0) {
368 WARN_ON_ONCE(1);
369 return HTC_SEND_QUEUE_DROP;
370 }
371
372 /* we have overflowed, and a callback is provided */
373 /* dequeue all non-overflow packets to the sendqueue */
374 for (i = 0; i < good_pkts; i++) {
375 /* pop off caller's queue */
376 packet = list_first_entry(txq,
377 struct htc_packet,
378 list);
379 list_del(&packet->list);
380 /* insert into local queue */
381 list_add_tail(&packet->list, &send_queue);
382 }
383
384 /*
385 * the caller's queue has all the packets that won't fit
386 * walk through the caller's queue and indicate each to
387 * the send full handler
388 */
389 list_for_each_entry_safe(packet, tmp_pkt,
390 txq, list) {
391
392 ath6kl_dbg(ATH6KL_DBG_HTC,
393 "%s: Indicat overflowed TX pkts: %p\n",
394 __func__, packet);
395 action = ep->ep_cb.tx_full(ep->target, packet);
396 if (action == HTC_SEND_FULL_DROP) {
397 /* callback wants the packet dropped */
398 ep->ep_st.tx_dropped += 1;
399
400 /* leave this one in the caller's queue
401 * for cleanup */
402 } else {
403 /* callback wants to keep this packet,
404 * remove from caller's queue */
405 list_del(&packet->list);
406 /* put it in the send queue */
407 list_add_tail(&packet->list,
408 &send_queue);
409 }
410
411 }
412
413 if (list_empty(&send_queue)) {
414 /* no packets made it in, caller will cleanup */
415 return HTC_SEND_QUEUE_DROP;
416 }
417 }
418 }
419
420 if (!ep->pipe.tx_credit_flow_enabled) {
421 tx_resources =
422 ath6kl_hif_pipe_get_free_queue_number(ar,
423 ep->pipe.pipeid_ul);
424 } else {
425 tx_resources = 0;
426 }
427
428 spin_lock_bh(&target->tx_lock);
429 if (!list_empty(&send_queue)) {
430 /* transfer packets to tail */
431 list_splice_tail_init(&send_queue, &ep->txq);
432 if (!list_empty(&send_queue)) {
433 WARN_ON_ONCE(1);
434 spin_unlock_bh(&target->tx_lock);
435 return HTC_SEND_QUEUE_DROP;
436 }
437 INIT_LIST_HEAD(&send_queue);
438 }
439
440 /* increment tx processing count on entry */
441 ep->tx_proc_cnt++;
442
443 if (ep->tx_proc_cnt > 1) {
444 /*
445 * Another thread or task is draining the TX queues on this
446 * endpoint that thread will reset the tx processing count
447 * when the queue is drained.
448 */
449 ep->tx_proc_cnt--;
450 spin_unlock_bh(&target->tx_lock);
451 return HTC_SEND_QUEUE_OK;
452 }
453
454 /***** beyond this point only 1 thread may enter ******/
455
456 /*
457 * Now drain the endpoint TX queue for transmission as long as we have
458 * enough transmit resources.
459 */
460 while (true) {
461
462 if (get_queue_depth(&ep->txq) == 0)
463 break;
464
465 if (ep->pipe.tx_credit_flow_enabled) {
466 /*
467 * Credit based mechanism provides flow control
468 * based on target transmit resource availability,
469 * we assume that the HIF layer will always have
470 * bus resources greater than target transmit
471 * resources.
472 */
473 get_htc_packet_credit_based(target, ep, &send_queue);
474 } else {
475 /*
476 * Get all packets for this endpoint that we can
477 * for this pass.
478 */
479 get_htc_packet(target, ep, &send_queue, tx_resources);
480 }
481
482 if (get_queue_depth(&send_queue) == 0) {
483 /*
484 * Didn't get packets due to out of resources or TX
485 * queue was drained.
486 */
487 break;
488 }
489
490 spin_unlock_bh(&target->tx_lock);
491
492 /* send what we can */
493 htc_issue_packets(target, ep, &send_queue);
494
495 if (!ep->pipe.tx_credit_flow_enabled) {
496 pipeid = ep->pipe.pipeid_ul;
497 tx_resources =
498 ath6kl_hif_pipe_get_free_queue_number(ar, pipeid);
499 }
500
501 spin_lock_bh(&target->tx_lock);
502
503 }
504 /* done with this endpoint, we can clear the count */
505 ep->tx_proc_cnt = 0;
506 spin_unlock_bh(&target->tx_lock);
507
508 return HTC_SEND_QUEUE_OK;
509}
510
511/* htc control packet manipulation */
512static void destroy_htc_txctrl_packet(struct htc_packet *packet)
513{
514 struct sk_buff *skb;
515 skb = packet->skb;
516 if (skb != NULL)
517 dev_kfree_skb(skb);
518
519 kfree(packet);
520}
521
522static struct htc_packet *build_htc_txctrl_packet(void)
523{
524 struct htc_packet *packet = NULL;
525 struct sk_buff *skb;
526
527 packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
528 if (packet == NULL)
529 return NULL;
530
531 skb = __dev_alloc_skb(HTC_CONTROL_BUFFER_SIZE, GFP_KERNEL);
532
533 if (skb == NULL) {
534 kfree(packet);
535 return NULL;
536 }
537 packet->skb = skb;
538
539 return packet;
540}
541
542static void htc_free_txctrl_packet(struct htc_target *target,
543 struct htc_packet *packet)
544{
545 destroy_htc_txctrl_packet(packet);
546}
547
548static struct htc_packet *htc_alloc_txctrl_packet(struct htc_target *target)
549{
550 return build_htc_txctrl_packet();
551}
552
553static void htc_txctrl_complete(struct htc_target *target,
554 struct htc_packet *packet)
555{
556 htc_free_txctrl_packet(target, packet);
557}
558
559#define MAX_MESSAGE_SIZE 1536
560
561static int htc_setup_target_buffer_assignments(struct htc_target *target)
562{
563 int status, credits, credit_per_maxmsg, i;
564 struct htc_pipe_txcredit_alloc *entry;
565 unsigned int hif_usbaudioclass = 0;
566
567 credit_per_maxmsg = MAX_MESSAGE_SIZE / target->tgt_cred_sz;
568 if (MAX_MESSAGE_SIZE % target->tgt_cred_sz)
569 credit_per_maxmsg++;
570
571 /* TODO, this should be configured by the caller! */
572
573 credits = target->tgt_creds;
574 entry = &target->pipe.txcredit_alloc[0];
575
576 status = -ENOMEM;
577
578 /* FIXME: hif_usbaudioclass is always zero */
579 if (hif_usbaudioclass) {
580 ath6kl_dbg(ATH6KL_DBG_HTC,
581 "%s: For USB Audio Class- Total:%d\n",
582 __func__, credits);
583 entry++;
584 entry++;
585 /* Setup VO Service To have Max Credits */
586 entry->service_id = WMI_DATA_VO_SVC;
587 entry->credit_alloc = (credits - 6);
588 if (entry->credit_alloc == 0)
589 entry->credit_alloc++;
590
591 credits -= (int) entry->credit_alloc;
592 if (credits <= 0)
593 return status;
594
595 entry++;
596 entry->service_id = WMI_CONTROL_SVC;
597 entry->credit_alloc = credit_per_maxmsg;
598 credits -= (int) entry->credit_alloc;
599 if (credits <= 0)
600 return status;
601
602 /* leftovers go to best effort */
603 entry++;
604 entry++;
605 entry->service_id = WMI_DATA_BE_SVC;
606 entry->credit_alloc = (u8) credits;
607 status = 0;
608 } else {
609 entry++;
610 entry->service_id = WMI_DATA_VI_SVC;
611 entry->credit_alloc = credits / 4;
612 if (entry->credit_alloc == 0)
613 entry->credit_alloc++;
614
615 credits -= (int) entry->credit_alloc;
616 if (credits <= 0)
617 return status;
618
619 entry++;
620 entry->service_id = WMI_DATA_VO_SVC;
621 entry->credit_alloc = credits / 4;
622 if (entry->credit_alloc == 0)
623 entry->credit_alloc++;
624
625 credits -= (int) entry->credit_alloc;
626 if (credits <= 0)
627 return status;
628
629 entry++;
630 entry->service_id = WMI_CONTROL_SVC;
631 entry->credit_alloc = credit_per_maxmsg;
632 credits -= (int) entry->credit_alloc;
633 if (credits <= 0)
634 return status;
635
636 entry++;
637 entry->service_id = WMI_DATA_BK_SVC;
638 entry->credit_alloc = credit_per_maxmsg;
639 credits -= (int) entry->credit_alloc;
640 if (credits <= 0)
641 return status;
642
643 /* leftovers go to best effort */
644 entry++;
645 entry->service_id = WMI_DATA_BE_SVC;
646 entry->credit_alloc = (u8) credits;
647 status = 0;
648 }
649
650 if (status == 0) {
651 for (i = 0; i < ENDPOINT_MAX; i++) {
652 if (target->pipe.txcredit_alloc[i].service_id != 0) {
653 ath6kl_dbg(ATH6KL_DBG_HTC,
654 "HTC Service Index : %d TX : 0x%2.2X : alloc:%d\n",
655 i,
656 target->pipe.txcredit_alloc[i].
657 service_id,
658 target->pipe.txcredit_alloc[i].
659 credit_alloc);
660 }
661 }
662 }
663 return status;
664}
665
666/* process credit reports and call distribution function */
667static void htc_process_credit_report(struct htc_target *target,
668 struct htc_credit_report *rpt,
669 int num_entries,
670 enum htc_endpoint_id from_ep)
671{
672 int total_credits = 0, i;
673 struct htc_endpoint *ep;
674
675 /* lock out TX while we update credits */
676 spin_lock_bh(&target->tx_lock);
677
678 for (i = 0; i < num_entries; i++, rpt++) {
679 if (rpt->eid >= ENDPOINT_MAX) {
680 WARN_ON_ONCE(1);
681 spin_unlock_bh(&target->tx_lock);
682 return;
683 }
684
685 ep = &target->endpoint[rpt->eid];
686 ep->cred_dist.credits += rpt->credits;
687
688 if (ep->cred_dist.credits && get_queue_depth(&ep->txq)) {
689 spin_unlock_bh(&target->tx_lock);
690 htc_try_send(target, ep, NULL);
691 spin_lock_bh(&target->tx_lock);
692 }
693
694 total_credits += rpt->credits;
695 }
696 ath6kl_dbg(ATH6KL_DBG_HTC,
697 "Report indicated %d credits to distribute\n",
698 total_credits);
699
700 spin_unlock_bh(&target->tx_lock);
701}
702
703/* flush endpoint TX queue */
704static void htc_flush_tx_endpoint(struct htc_target *target,
705 struct htc_endpoint *ep, u16 tag)
706{
707 struct htc_packet *packet;
708
709 spin_lock_bh(&target->tx_lock);
710 while (get_queue_depth(&ep->txq)) {
711 packet = list_first_entry(&ep->txq, struct htc_packet, list);
712 list_del(&packet->list);
713 packet->status = 0;
714 send_packet_completion(target, packet);
715 }
716 spin_unlock_bh(&target->tx_lock);
717}
718
719/*
720 * In the adapted HIF layer, struct sk_buff * are passed between HIF and HTC,
721 * since upper layers expects struct htc_packet containers we use the completed
722 * skb and lookup it's corresponding HTC packet buffer from a lookup list.
723 * This is extra overhead that can be fixed by re-aligning HIF interfaces with
724 * HTC.
725 */
726static struct htc_packet *htc_lookup_tx_packet(struct htc_target *target,
727 struct htc_endpoint *ep,
728 struct sk_buff *skb)
729{
730 struct htc_packet *packet, *tmp_pkt, *found_packet = NULL;
731
732 spin_lock_bh(&target->tx_lock);
733
734 /*
735 * interate from the front of tx lookup queue
736 * this lookup should be fast since lower layers completes in-order and
737 * so the completed packet should be at the head of the list generally
738 */
739 list_for_each_entry_safe(packet, tmp_pkt, &ep->pipe.tx_lookup_queue,
740 list) {
741 /* check for removal */
742 if (skb == packet->skb) {
743 /* found it */
744 list_del(&packet->list);
745 found_packet = packet;
746 break;
747 }
748 }
749
750 spin_unlock_bh(&target->tx_lock);
751
752 return found_packet;
753}
754
755static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
756{
757 struct htc_target *target = ar->htc_target;
758 struct htc_frame_hdr *htc_hdr;
759 struct htc_endpoint *ep;
760 struct htc_packet *packet;
761 u8 ep_id, *netdata;
762 u32 netlen;
763
764 netdata = skb->data;
765 netlen = skb->len;
766
767 htc_hdr = (struct htc_frame_hdr *) netdata;
768
769 ep_id = htc_hdr->eid;
770 ep = &target->endpoint[ep_id];
771
772 packet = htc_lookup_tx_packet(target, ep, skb);
773 if (packet == NULL) {
774 /* may have already been flushed and freed */
775 ath6kl_err("HTC TX lookup failed!\n");
776 } else {
777 /* will be giving this buffer back to upper layers */
778 packet->status = 0;
779 send_packet_completion(target, packet);
780 }
781 skb = NULL;
782
783 if (!ep->pipe.tx_credit_flow_enabled) {
784 /*
785 * note: when using TX credit flow, the re-checking of queues
786 * happens when credits flow back from the target. in the
787 * non-TX credit case, we recheck after the packet completes
788 */
789 htc_try_send(target, ep, NULL);
790 }
791
792 return 0;
793}
794
795static int htc_send_packets_multiple(struct htc_target *target,
796 struct list_head *pkt_queue)
797{
798 struct htc_endpoint *ep;
799 struct htc_packet *packet, *tmp_pkt;
800
801 if (list_empty(pkt_queue))
802 return -EINVAL;
803
804 /* get first packet to find out which ep the packets will go into */
805 packet = list_first_entry(pkt_queue, struct htc_packet, list);
806 if (packet == NULL)
807 return -EINVAL;
808
809 if (packet->endpoint >= ENDPOINT_MAX) {
810 WARN_ON_ONCE(1);
811 return -EINVAL;
812 }
813 ep = &target->endpoint[packet->endpoint];
814
815 htc_try_send(target, ep, pkt_queue);
816
817 /* do completion on any packets that couldn't get in */
818 if (!list_empty(pkt_queue)) {
819 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
820 packet->status = -ENOMEM;
821 }
822
823 do_send_completion(ep, pkt_queue);
824 }
825
826 return 0;
827}
828
829/* htc pipe rx path */
830static struct htc_packet *alloc_htc_packet_container(struct htc_target *target)
831{
832 struct htc_packet *packet;
833 spin_lock_bh(&target->rx_lock);
834
835 if (target->pipe.htc_packet_pool == NULL) {
836 spin_unlock_bh(&target->rx_lock);
837 return NULL;
838 }
839
840 packet = target->pipe.htc_packet_pool;
841 target->pipe.htc_packet_pool = (struct htc_packet *) packet->list.next;
842
843 spin_unlock_bh(&target->rx_lock);
844
845 packet->list.next = NULL;
846 return packet;
847}
848
849static void free_htc_packet_container(struct htc_target *target,
850 struct htc_packet *packet)
851{
852 struct list_head *lh;
853
854 spin_lock_bh(&target->rx_lock);
855
856 if (target->pipe.htc_packet_pool == NULL) {
857 target->pipe.htc_packet_pool = packet;
858 packet->list.next = NULL;
859 } else {
860 lh = (struct list_head *) target->pipe.htc_packet_pool;
861 packet->list.next = lh;
862 target->pipe.htc_packet_pool = packet;
863 }
864
865 spin_unlock_bh(&target->rx_lock);
866}
867
868static int htc_process_trailer(struct htc_target *target, u8 *buffer,
869 int len, enum htc_endpoint_id from_ep)
870{
871 struct htc_credit_report *report;
872 struct htc_record_hdr *record;
873 u8 *record_buf, *orig_buf;
874 int orig_len, status;
875
876 orig_buf = buffer;
877 orig_len = len;
878 status = 0;
879
880 while (len > 0) {
881 if (len < sizeof(struct htc_record_hdr)) {
882 status = -EINVAL;
883 break;
884 }
885
886 /* these are byte aligned structs */
887 record = (struct htc_record_hdr *) buffer;
888 len -= sizeof(struct htc_record_hdr);
889 buffer += sizeof(struct htc_record_hdr);
890
891 if (record->len > len) {
892 /* no room left in buffer for record */
893 ath6kl_dbg(ATH6KL_DBG_HTC,
894 "invalid length: %d (id:%d) buffer has: %d bytes left\n",
895 record->len, record->rec_id, len);
896 status = -EINVAL;
897 break;
898 }
899
900 /* start of record follows the header */
901 record_buf = buffer;
902
903 switch (record->rec_id) {
904 case HTC_RECORD_CREDITS:
905 if (record->len < sizeof(struct htc_credit_report)) {
906 WARN_ON_ONCE(1);
907 return -EINVAL;
908 }
909
910 report = (struct htc_credit_report *) record_buf;
911 htc_process_credit_report(target, report,
912 record->len / sizeof(*report),
913 from_ep);
914 break;
915 default:
916 ath6kl_dbg(ATH6KL_DBG_HTC,
917 "unhandled record: id:%d length:%d\n",
918 record->rec_id, record->len);
919 break;
920 }
921
922 if (status != 0)
923 break;
924
925 /* advance buffer past this record for next time around */
926 buffer += record->len;
927 len -= record->len;
928 }
929
930 return status;
931}
932
933static void do_recv_completion(struct htc_endpoint *ep,
934 struct list_head *queue_to_indicate)
935{
936 struct htc_packet *packet;
937
938 if (list_empty(queue_to_indicate)) {
939 /* nothing to indicate */
940 return;
941 }
942
943 /* using legacy EpRecv */
944 while (!list_empty(queue_to_indicate)) {
945 packet = list_first_entry(queue_to_indicate,
946 struct htc_packet, list);
947 list_del(&packet->list);
948 ep->ep_cb.rx(ep->target, packet);
949 }
950
951 return;
952}
953
954static void recv_packet_completion(struct htc_target *target,
955 struct htc_endpoint *ep,
956 struct htc_packet *packet)
957{
958 struct list_head container;
959 INIT_LIST_HEAD(&container);
960 list_add_tail(&packet->list, &container);
961
962 /* do completion */
963 do_recv_completion(ep, &container);
964}
965
966static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
967 u8 pipeid)
968{
969 struct htc_target *target = ar->htc_target;
970 u8 *netdata, *trailer, hdr_info;
971 struct htc_frame_hdr *htc_hdr;
972 u32 netlen, trailerlen = 0;
973 struct htc_packet *packet;
974 struct htc_endpoint *ep;
975 u16 payload_len;
976 int status = 0;
977
978 netdata = skb->data;
979 netlen = skb->len;
980
981 htc_hdr = (struct htc_frame_hdr *) netdata;
982
983 ep = &target->endpoint[htc_hdr->eid];
984
985 if (htc_hdr->eid >= ENDPOINT_MAX) {
986 ath6kl_dbg(ATH6KL_DBG_HTC,
987 "HTC Rx: invalid EndpointID=%d\n",
988 htc_hdr->eid);
989 status = -EINVAL;
990 goto free_skb;
991 }
992
993 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
994
995 if (netlen < (payload_len + HTC_HDR_LENGTH)) {
996 ath6kl_dbg(ATH6KL_DBG_HTC,
997 "HTC Rx: insufficient length, got:%d expected =%u\n",
998 netlen, payload_len + HTC_HDR_LENGTH);
999 status = -EINVAL;
1000 goto free_skb;
1001 }
1002
1003 /* get flags to check for trailer */
1004 hdr_info = htc_hdr->flags;
1005 if (hdr_info & HTC_FLG_RX_TRAILER) {
1006 /* extract the trailer length */
1007 hdr_info = htc_hdr->ctrl[0];
1008 if ((hdr_info < sizeof(struct htc_record_hdr)) ||
1009 (hdr_info > payload_len)) {
1010 ath6kl_dbg(ATH6KL_DBG_HTC,
1011 "invalid header: payloadlen should be %d, CB[0]: %d\n",
1012 payload_len, hdr_info);
1013 status = -EINVAL;
1014 goto free_skb;
1015 }
1016
1017 trailerlen = hdr_info;
1018 /* process trailer after hdr/apps payload */
1019 trailer = (u8 *) htc_hdr + HTC_HDR_LENGTH +
1020 payload_len - hdr_info;
1021 status = htc_process_trailer(target, trailer, hdr_info,
1022 htc_hdr->eid);
1023 if (status != 0)
1024 goto free_skb;
1025 }
1026
1027 if (((int) payload_len - (int) trailerlen) <= 0) {
1028 /* zero length packet with trailer, just drop these */
1029 goto free_skb;
1030 }
1031
1032 if (htc_hdr->eid == ENDPOINT_0) {
1033 /* handle HTC control message */
1034 if (target->htc_flags & HTC_OP_STATE_SETUP_COMPLETE) {
1035 /*
1036 * fatal: target should not send unsolicited
1037 * messageson the endpoint 0
1038 */
1039 ath6kl_dbg(ATH6KL_DBG_HTC,
1040 "HTC ignores Rx Ctrl after setup complete\n");
1041 status = -EINVAL;
1042 goto free_skb;
1043 }
1044
1045 /* remove HTC header */
1046 skb_pull(skb, HTC_HDR_LENGTH);
1047
1048 netdata = skb->data;
1049 netlen = skb->len;
1050
1051 spin_lock_bh(&target->rx_lock);
1052
1053 target->pipe.ctrl_response_valid = true;
1054 target->pipe.ctrl_response_len = min_t(int, netlen,
1055 HTC_MAX_CTRL_MSG_LEN);
1056 memcpy(target->pipe.ctrl_response_buf, netdata,
1057 target->pipe.ctrl_response_len);
1058
1059 spin_unlock_bh(&target->rx_lock);
1060
1061 dev_kfree_skb(skb);
1062 skb = NULL;
1063 goto free_skb;
1064 }
1065
1066 /*
1067 * TODO: the message based HIF architecture allocates net bufs
1068 * for recv packets since it bridges that HIF to upper layers,
1069 * which expects HTC packets, we form the packets here
1070 */
1071 packet = alloc_htc_packet_container(target);
1072 if (packet == NULL) {
1073 status = -ENOMEM;
1074 goto free_skb;
1075 }
1076
1077 packet->status = 0;
1078 packet->endpoint = htc_hdr->eid;
1079 packet->pkt_cntxt = skb;
1080
1081 /* TODO: for backwards compatibility */
1082 packet->buf = skb_push(skb, 0) + HTC_HDR_LENGTH;
1083 packet->act_len = netlen - HTC_HDR_LENGTH - trailerlen;
1084
1085 /*
1086 * TODO: this is a hack because the driver layer will set the
1087 * actual len of the skb again which will just double the len
1088 */
1089 skb_trim(skb, 0);
1090
1091 recv_packet_completion(target, ep, packet);
1092
1093 /* recover the packet container */
1094 free_htc_packet_container(target, packet);
1095 skb = NULL;
1096
1097free_skb:
1098 if (skb != NULL)
1099 dev_kfree_skb(skb);
1100
1101 return status;
1102
1103}
1104
1105static void htc_flush_rx_queue(struct htc_target *target,
1106 struct htc_endpoint *ep)
1107{
1108 struct list_head container;
1109 struct htc_packet *packet;
1110
1111 spin_lock_bh(&target->rx_lock);
1112
1113 while (1) {
1114 if (list_empty(&ep->rx_bufq))
1115 break;
1116
1117 packet = list_first_entry(&ep->rx_bufq,
1118 struct htc_packet, list);
1119 list_del(&packet->list);
1120
1121 spin_unlock_bh(&target->rx_lock);
1122 packet->status = -ECANCELED;
1123 packet->act_len = 0;
1124
1125 ath6kl_dbg(ATH6KL_DBG_HTC,
1126 "Flushing RX packet:0x%p, length:%d, ep:%d\n",
1127 packet, packet->buf_len,
1128 packet->endpoint);
1129
1130 INIT_LIST_HEAD(&container);
1131 list_add_tail(&packet->list, &container);
1132
1133 /* give the packet back */
1134 do_recv_completion(ep, &container);
1135 spin_lock_bh(&target->rx_lock);
1136 }
1137
1138 spin_unlock_bh(&target->rx_lock);
1139}
1140
1141/* polling routine to wait for a control packet to be received */
1142static int htc_wait_recv_ctrl_message(struct htc_target *target)
1143{
1144 int count = HTC_TARGET_RESPONSE_POLL_COUNT;
1145
1146 while (count > 0) {
1147 spin_lock_bh(&target->rx_lock);
1148
1149 if (target->pipe.ctrl_response_valid) {
1150 target->pipe.ctrl_response_valid = false;
1151 spin_unlock_bh(&target->rx_lock);
1152 break;
1153 }
1154
1155 spin_unlock_bh(&target->rx_lock);
1156
1157 count--;
1158
1159 msleep_interruptible(HTC_TARGET_RESPONSE_POLL_WAIT);
1160 }
1161
1162 if (count <= 0) {
1163 ath6kl_dbg(ATH6KL_DBG_HTC, "%s: Timeout!\n", __func__);
1164 return -ECOMM;
1165 }
1166
1167 return 0;
1168}
1169
1170static void htc_rxctrl_complete(struct htc_target *context,
1171 struct htc_packet *packet)
1172{
1173 /* TODO, can't really receive HTC control messages yet.... */
1174 ath6kl_dbg(ATH6KL_DBG_HTC, "%s: invalid call function\n", __func__);
1175}
1176
1177/* htc pipe initialization */
1178static void reset_endpoint_states(struct htc_target *target)
1179{
1180 struct htc_endpoint *ep;
1181 int i;
1182
1183 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1184 ep = &target->endpoint[i];
1185 ep->svc_id = 0;
1186 ep->len_max = 0;
1187 ep->max_txq_depth = 0;
1188 ep->eid = i;
1189 INIT_LIST_HEAD(&ep->txq);
1190 INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue);
1191 INIT_LIST_HEAD(&ep->rx_bufq);
1192 ep->target = target;
1193 ep->pipe.tx_credit_flow_enabled = (bool) 1; /* FIXME */
1194 }
1195}
1196
1197/* start HTC, this is called after all services are connected */
1198static int htc_config_target_hif_pipe(struct htc_target *target)
1199{
1200 return 0;
1201}
1202
1203/* htc service functions */
1204static u8 htc_get_credit_alloc(struct htc_target *target, u16 service_id)
1205{
1206 u8 allocation = 0;
1207 int i;
1208
1209 for (i = 0; i < ENDPOINT_MAX; i++) {
1210 if (target->pipe.txcredit_alloc[i].service_id == service_id)
1211 allocation =
1212 target->pipe.txcredit_alloc[i].credit_alloc;
1213 }
1214
1215 if (allocation == 0) {
1216 ath6kl_dbg(ATH6KL_DBG_HTC,
1217 "HTC Service TX : 0x%2.2X : allocation is zero!\n",
1218 service_id);
1219 }
1220
1221 return allocation;
1222}
1223
1224static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
1225 struct htc_service_connect_req *conn_req,
1226 struct htc_service_connect_resp *conn_resp)
1227{
1228 struct ath6kl *ar = target->dev->ar;
1229 struct htc_packet *packet = NULL;
1230 struct htc_conn_service_resp *resp_msg;
1231 struct htc_conn_service_msg *conn_msg;
1232 enum htc_endpoint_id assigned_epid = ENDPOINT_MAX;
1233 bool disable_credit_flowctrl = false;
1234 unsigned int max_msg_size = 0;
1235 struct htc_endpoint *ep;
1236 int length, status = 0;
1237 struct sk_buff *skb;
1238 u8 tx_alloc;
1239 u16 flags;
1240
1241 if (conn_req->svc_id == 0) {
1242 WARN_ON_ONCE(1);
1243 status = -EINVAL;
1244 goto free_packet;
1245 }
1246
1247 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
1248 /* special case for pseudo control service */
1249 assigned_epid = ENDPOINT_0;
1250 max_msg_size = HTC_MAX_CTRL_MSG_LEN;
1251 tx_alloc = 0;
1252
1253 } else {
1254
1255 tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
1256 if (tx_alloc == 0) {
1257 status = -ENOMEM;
1258 goto free_packet;
1259 }
1260
1261 /* allocate a packet to send to the target */
1262 packet = htc_alloc_txctrl_packet(target);
1263
1264 if (packet == NULL) {
1265 WARN_ON_ONCE(1);
1266 status = -ENOMEM;
1267 goto free_packet;
1268 }
1269
1270 skb = packet->skb;
1271 length = sizeof(struct htc_conn_service_msg);
1272
1273 /* assemble connect service message */
1274 conn_msg = (struct htc_conn_service_msg *) skb_put(skb,
1275 length);
1276 if (conn_msg == NULL) {
1277 WARN_ON_ONCE(1);
1278 status = -EINVAL;
1279 goto free_packet;
1280 }
1281
1282 memset(conn_msg, 0,
1283 sizeof(struct htc_conn_service_msg));
1284 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
1285 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
1286 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags &
1287 ~HTC_CONN_FLGS_SET_RECV_ALLOC_MASK);
1288
1289 /* tell target desired recv alloc for this ep */
1290 flags = tx_alloc << HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT;
1291 conn_msg->conn_flags |= cpu_to_le16(flags);
1292
1293 if (conn_req->conn_flags &
1294 HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL) {
1295 disable_credit_flowctrl = true;
1296 }
1297
1298 set_htc_pkt_info(packet, NULL, (u8 *) conn_msg,
1299 length,
1300 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1301
1302 status = ath6kl_htc_pipe_tx(target, packet);
1303
1304 /* we don't own it anymore */
1305 packet = NULL;
1306 if (status != 0)
1307 goto free_packet;
1308
1309 /* wait for response */
1310 status = htc_wait_recv_ctrl_message(target);
1311 if (status != 0)
1312 goto free_packet;
1313
1314 /* we controlled the buffer creation so it has to be
1315 * properly aligned
1316 */
1317 resp_msg = (struct htc_conn_service_resp *)
1318 target->pipe.ctrl_response_buf;
1319
1320 if (resp_msg->msg_id != cpu_to_le16(HTC_MSG_CONN_SVC_RESP_ID) ||
1321 (target->pipe.ctrl_response_len < sizeof(*resp_msg))) {
1322 /* this message is not valid */
1323 WARN_ON_ONCE(1);
1324 status = -EINVAL;
1325 goto free_packet;
1326 }
1327
1328 ath6kl_dbg(ATH6KL_DBG_TRC,
1329 "%s: service 0x%X conn resp: status: %d ep: %d\n",
1330 __func__, resp_msg->svc_id, resp_msg->status,
1331 resp_msg->eid);
1332
1333 conn_resp->resp_code = resp_msg->status;
1334 /* check response status */
1335 if (resp_msg->status != HTC_SERVICE_SUCCESS) {
1336 ath6kl_dbg(ATH6KL_DBG_HTC,
1337 "Target failed service 0x%X connect request (status:%d)\n",
1338 resp_msg->svc_id, resp_msg->status);
1339 status = -EINVAL;
1340 goto free_packet;
1341 }
1342
1343 assigned_epid = (enum htc_endpoint_id) resp_msg->eid;
1344 max_msg_size = le16_to_cpu(resp_msg->max_msg_sz);
1345 }
1346
1347 /* the rest are parameter checks so set the error status */
1348 status = -EINVAL;
1349
1350 if (assigned_epid >= ENDPOINT_MAX) {
1351 WARN_ON_ONCE(1);
1352 goto free_packet;
1353 }
1354
1355 if (max_msg_size == 0) {
1356 WARN_ON_ONCE(1);
1357 goto free_packet;
1358 }
1359
1360 ep = &target->endpoint[assigned_epid];
1361 ep->eid = assigned_epid;
1362 if (ep->svc_id != 0) {
1363 /* endpoint already in use! */
1364 WARN_ON_ONCE(1);
1365 goto free_packet;
1366 }
1367
1368 /* return assigned endpoint to caller */
1369 conn_resp->endpoint = assigned_epid;
1370 conn_resp->len_max = max_msg_size;
1371
1372 /* setup the endpoint */
1373 ep->svc_id = conn_req->svc_id; /* this marks ep in use */
1374 ep->max_txq_depth = conn_req->max_txq_depth;
1375 ep->len_max = max_msg_size;
1376 ep->cred_dist.credits = tx_alloc;
1377 ep->cred_dist.cred_sz = target->tgt_cred_sz;
1378 ep->cred_dist.cred_per_msg = max_msg_size / target->tgt_cred_sz;
1379 if (max_msg_size % target->tgt_cred_sz)
1380 ep->cred_dist.cred_per_msg++;
1381
1382 /* copy all the callbacks */
1383 ep->ep_cb = conn_req->ep_cb;
1384
1385 status = ath6kl_hif_pipe_map_service(ar, ep->svc_id,
1386 &ep->pipe.pipeid_ul,
1387 &ep->pipe.pipeid_dl);
1388 if (status != 0)
1389 goto free_packet;
1390
1391 ath6kl_dbg(ATH6KL_DBG_HTC,
1392 "SVC Ready: 0x%4.4X: ULpipe:%d DLpipe:%d id:%d\n",
1393 ep->svc_id, ep->pipe.pipeid_ul,
1394 ep->pipe.pipeid_dl, ep->eid);
1395
1396 if (disable_credit_flowctrl && ep->pipe.tx_credit_flow_enabled) {
1397 ep->pipe.tx_credit_flow_enabled = false;
1398 ath6kl_dbg(ATH6KL_DBG_HTC,
1399 "SVC: 0x%4.4X ep:%d TX flow control off\n",
1400 ep->svc_id, assigned_epid);
1401 }
1402
1403free_packet:
1404 if (packet != NULL)
1405 htc_free_txctrl_packet(target, packet);
1406 return status;
1407}
1408
1409/* htc export functions */
1410static void *ath6kl_htc_pipe_create(struct ath6kl *ar)
1411{
1412 int status = 0;
1413 struct htc_endpoint *ep = NULL;
1414 struct htc_target *target = NULL;
1415 struct htc_packet *packet;
1416 int i;
1417
1418 target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
1419 if (target == NULL) {
1420 ath6kl_err("htc create unable to allocate memory\n");
1421 status = -ENOMEM;
1422 goto fail_htc_create;
1423 }
1424
1425 spin_lock_init(&target->htc_lock);
1426 spin_lock_init(&target->rx_lock);
1427 spin_lock_init(&target->tx_lock);
1428
1429 reset_endpoint_states(target);
1430
1431 for (i = 0; i < HTC_PACKET_CONTAINER_ALLOCATION; i++) {
1432 packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
1433
1434 if (packet != NULL)
1435 free_htc_packet_container(target, packet);
1436 }
1437
1438 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
1439 if (!target->dev) {
1440 ath6kl_err("unable to allocate memory\n");
1441 status = -ENOMEM;
1442 goto fail_htc_create;
1443 }
1444 target->dev->ar = ar;
1445 target->dev->htc_cnxt = target;
1446
1447 /* Get HIF default pipe for HTC message exchange */
1448 ep = &target->endpoint[ENDPOINT_0];
1449
1450 ath6kl_hif_pipe_get_default(ar, &ep->pipe.pipeid_ul,
1451 &ep->pipe.pipeid_dl);
1452
1453 return target;
1454
1455fail_htc_create:
1456 if (status != 0) {
1457 if (target != NULL)
1458 ath6kl_htc_pipe_cleanup(target);
1459
1460 target = NULL;
1461 }
1462 return target;
1463}
1464
1465/* cleanup the HTC instance */
1466static void ath6kl_htc_pipe_cleanup(struct htc_target *target)
1467{
1468 struct htc_packet *packet;
1469
1470 while (true) {
1471 packet = alloc_htc_packet_container(target);
1472 if (packet == NULL)
1473 break;
1474 kfree(packet);
1475 }
1476
1477 kfree(target->dev);
1478
1479 /* kfree our instance */
1480 kfree(target);
1481}
1482
1483static int ath6kl_htc_pipe_start(struct htc_target *target)
1484{
1485 struct sk_buff *skb;
1486 struct htc_setup_comp_ext_msg *setup;
1487 struct htc_packet *packet;
1488
1489 htc_config_target_hif_pipe(target);
1490
1491 /* allocate a buffer to send */
1492 packet = htc_alloc_txctrl_packet(target);
1493 if (packet == NULL) {
1494 WARN_ON_ONCE(1);
1495 return -ENOMEM;
1496 }
1497
1498 skb = packet->skb;
1499
1500 /* assemble setup complete message */
1501 setup = (struct htc_setup_comp_ext_msg *) skb_put(skb,
1502 sizeof(*setup));
1503 memset(setup, 0, sizeof(struct htc_setup_comp_ext_msg));
1504 setup->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
1505
1506 ath6kl_dbg(ATH6KL_DBG_HTC, "HTC using TX credit flow control\n");
1507
1508 set_htc_pkt_info(packet, NULL, (u8 *) setup,
1509 sizeof(struct htc_setup_comp_ext_msg),
1510 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1511
1512 target->htc_flags |= HTC_OP_STATE_SETUP_COMPLETE;
1513
1514 return ath6kl_htc_pipe_tx(target, packet);
1515}
1516
1517static void ath6kl_htc_pipe_stop(struct htc_target *target)
1518{
1519 int i;
1520 struct htc_endpoint *ep;
1521
1522 /* cleanup endpoints */
1523 for (i = 0; i < ENDPOINT_MAX; i++) {
1524 ep = &target->endpoint[i];
1525 htc_flush_rx_queue(target, ep);
1526 htc_flush_tx_endpoint(target, ep, HTC_TX_PACKET_TAG_ALL);
1527 }
1528
1529 reset_endpoint_states(target);
1530 target->htc_flags &= ~HTC_OP_STATE_SETUP_COMPLETE;
1531}
1532
1533static int ath6kl_htc_pipe_get_rxbuf_num(struct htc_target *target,
1534 enum htc_endpoint_id endpoint)
1535{
1536 int num;
1537
1538 spin_lock_bh(&target->rx_lock);
1539 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
1540 spin_unlock_bh(&target->rx_lock);
1541
1542 return num;
1543}
1544
1545static int ath6kl_htc_pipe_tx(struct htc_target *target,
1546 struct htc_packet *packet)
1547{
1548 struct list_head queue;
1549
1550 ath6kl_dbg(ATH6KL_DBG_HTC,
1551 "%s: endPointId: %d, buffer: 0x%p, length: %d\n",
1552 __func__, packet->endpoint, packet->buf,
1553 packet->act_len);
1554
1555 INIT_LIST_HEAD(&queue);
1556 list_add_tail(&packet->list, &queue);
1557
1558 return htc_send_packets_multiple(target, &queue);
1559}
1560
1561static int ath6kl_htc_pipe_wait_target(struct htc_target *target)
1562{
1563 struct htc_ready_ext_msg *ready_msg;
1564 struct htc_service_connect_req connect;
1565 struct htc_service_connect_resp resp;
1566 int status = 0;
1567
1568 status = htc_wait_recv_ctrl_message(target);
1569
1570 if (status != 0)
1571 return status;
1572
1573 if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) {
1574 ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg len:%d!\n",
1575 target->pipe.ctrl_response_len);
1576 return -ECOMM;
1577 }
1578
1579 ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf;
1580
1581 if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) {
1582 ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg : 0x%X !\n",
1583 ready_msg->ver2_0_info.msg_id);
1584 return -ECOMM;
1585 }
1586
1587 ath6kl_dbg(ATH6KL_DBG_HTC,
1588 "Target Ready! : transmit resources : %d size:%d\n",
1589 ready_msg->ver2_0_info.cred_cnt,
1590 ready_msg->ver2_0_info.cred_sz);
1591
1592 target->tgt_creds = le16_to_cpu(ready_msg->ver2_0_info.cred_cnt);
1593 target->tgt_cred_sz = le16_to_cpu(ready_msg->ver2_0_info.cred_sz);
1594
1595 if ((target->tgt_creds == 0) || (target->tgt_cred_sz == 0))
1596 return -ECOMM;
1597
1598 htc_setup_target_buffer_assignments(target);
1599
1600 /* setup our pseudo HTC control endpoint connection */
1601 memset(&connect, 0, sizeof(connect));
1602 memset(&resp, 0, sizeof(resp));
1603 connect.ep_cb.tx_complete = htc_txctrl_complete;
1604 connect.ep_cb.rx = htc_rxctrl_complete;
1605 connect.max_txq_depth = NUM_CONTROL_TX_BUFFERS;
1606 connect.svc_id = HTC_CTRL_RSVD_SVC;
1607
1608 /* connect fake service */
1609 status = ath6kl_htc_pipe_conn_service(target, &connect, &resp);
1610
1611 return status;
1612}
1613
1614static void ath6kl_htc_pipe_flush_txep(struct htc_target *target,
1615 enum htc_endpoint_id endpoint, u16 tag)
1616{
1617 struct htc_endpoint *ep = &target->endpoint[endpoint];
1618
1619 if (ep->svc_id == 0) {
1620 WARN_ON_ONCE(1);
1621 /* not in use.. */
1622 return;
1623 }
1624
1625 htc_flush_tx_endpoint(target, ep, tag);
1626}
1627
1628static int ath6kl_htc_pipe_add_rxbuf_multiple(struct htc_target *target,
1629 struct list_head *pkt_queue)
1630{
1631 struct htc_packet *packet, *tmp_pkt, *first;
1632 struct htc_endpoint *ep;
1633 int status = 0;
1634
1635 if (list_empty(pkt_queue))
1636 return -EINVAL;
1637
1638 first = list_first_entry(pkt_queue, struct htc_packet, list);
1639 if (first == NULL) {
1640 WARN_ON_ONCE(1);
1641 return -EINVAL;
1642 }
1643
1644 if (first->endpoint >= ENDPOINT_MAX) {
1645 WARN_ON_ONCE(1);
1646 return -EINVAL;
1647 }
1648
1649 ath6kl_dbg(ATH6KL_DBG_HTC, "%s: epid: %d, cnt:%d, len: %d\n",
1650 __func__, first->endpoint, get_queue_depth(pkt_queue),
1651 first->buf_len);
1652
1653 ep = &target->endpoint[first->endpoint];
1654
1655 spin_lock_bh(&target->rx_lock);
1656
1657 /* store receive packets */
1658 list_splice_tail_init(pkt_queue, &ep->rx_bufq);
1659
1660 spin_unlock_bh(&target->rx_lock);
1661
1662 if (status != 0) {
1663 /* walk through queue and mark each one canceled */
1664 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
1665 packet->status = -ECANCELED;
1666 }
1667
1668 do_recv_completion(ep, pkt_queue);
1669 }
1670
1671 return status;
1672}
1673
1674static void ath6kl_htc_pipe_activity_changed(struct htc_target *target,
1675 enum htc_endpoint_id ep,
1676 bool active)
1677{
1678 /* TODO */
1679}
1680
1681static void ath6kl_htc_pipe_flush_rx_buf(struct htc_target *target)
1682{
1683 /* TODO */
1684}
1685
1686static int ath6kl_htc_pipe_credit_setup(struct htc_target *target,
1687 struct ath6kl_htc_credit_info *info)
1688{
1689 return 0;
1690}
1691
1692static const struct ath6kl_htc_ops ath6kl_htc_pipe_ops = {
1693 .create = ath6kl_htc_pipe_create,
1694 .wait_target = ath6kl_htc_pipe_wait_target,
1695 .start = ath6kl_htc_pipe_start,
1696 .conn_service = ath6kl_htc_pipe_conn_service,
1697 .tx = ath6kl_htc_pipe_tx,
1698 .stop = ath6kl_htc_pipe_stop,
1699 .cleanup = ath6kl_htc_pipe_cleanup,
1700 .flush_txep = ath6kl_htc_pipe_flush_txep,
1701 .flush_rx_buf = ath6kl_htc_pipe_flush_rx_buf,
1702 .activity_changed = ath6kl_htc_pipe_activity_changed,
1703 .get_rxbuf_num = ath6kl_htc_pipe_get_rxbuf_num,
1704 .add_rxbuf_multiple = ath6kl_htc_pipe_add_rxbuf_multiple,
1705 .credit_setup = ath6kl_htc_pipe_credit_setup,
1706 .tx_complete = ath6kl_htc_pipe_tx_complete,
1707 .rx_complete = ath6kl_htc_pipe_rx_complete,
1708};
1709
1710void ath6kl_htc_pipe_attach(struct ath6kl *ar)
1711{
1712 ar->htc_ops = &ath6kl_htc_pipe_ops;
1713}
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 03cae142f178..29ef50ea07d5 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -16,17 +16,21 @@
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */ 17 */
18 18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
19#include <linux/moduleparam.h> 21#include <linux/moduleparam.h>
20#include <linux/errno.h> 22#include <linux/errno.h>
21#include <linux/export.h> 23#include <linux/export.h>
22#include <linux/of.h> 24#include <linux/of.h>
23#include <linux/mmc/sdio_func.h> 25#include <linux/mmc/sdio_func.h>
26#include <linux/vmalloc.h>
24 27
25#include "core.h" 28#include "core.h"
26#include "cfg80211.h" 29#include "cfg80211.h"
27#include "target.h" 30#include "target.h"
28#include "debug.h" 31#include "debug.h"
29#include "hif-ops.h" 32#include "hif-ops.h"
33#include "htc-ops.h"
30 34
31static const struct ath6kl_hw hw_list[] = { 35static const struct ath6kl_hw hw_list[] = {
32 { 36 {
@@ -256,6 +260,7 @@ static int ath6kl_init_service_ep(struct ath6kl *ar)
256 memset(&connect, 0, sizeof(connect)); 260 memset(&connect, 0, sizeof(connect));
257 261
258 /* these fields are the same for all service endpoints */ 262 /* these fields are the same for all service endpoints */
263 connect.ep_cb.tx_comp_multi = ath6kl_tx_complete;
259 connect.ep_cb.rx = ath6kl_rx; 264 connect.ep_cb.rx = ath6kl_rx;
260 connect.ep_cb.rx_refill = ath6kl_rx_refill; 265 connect.ep_cb.rx_refill = ath6kl_rx_refill;
261 connect.ep_cb.tx_full = ath6kl_tx_queue_full; 266 connect.ep_cb.tx_full = ath6kl_tx_queue_full;
@@ -485,22 +490,31 @@ int ath6kl_configure_target(struct ath6kl *ar)
485 fw_mode |= fw_iftype << (i * HI_OPTION_FW_MODE_BITS); 490 fw_mode |= fw_iftype << (i * HI_OPTION_FW_MODE_BITS);
486 491
487 /* 492 /*
488 * By default, submodes : 493 * Submodes when fw does not support dynamic interface
494 * switching:
489 * vif[0] - AP/STA/IBSS 495 * vif[0] - AP/STA/IBSS
490 * vif[1] - "P2P dev"/"P2P GO"/"P2P Client" 496 * vif[1] - "P2P dev"/"P2P GO"/"P2P Client"
491 * vif[2] - "P2P dev"/"P2P GO"/"P2P Client" 497 * vif[2] - "P2P dev"/"P2P GO"/"P2P Client"
498 * Otherwise, All the interface are initialized to p2p dev.
492 */ 499 */
493 500
494 for (i = 0; i < ar->max_norm_iface; i++) 501 if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
495 fw_submode |= HI_OPTION_FW_SUBMODE_NONE << 502 ar->fw_capabilities)) {
496 (i * HI_OPTION_FW_SUBMODE_BITS); 503 for (i = 0; i < ar->vif_max; i++)
504 fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV <<
505 (i * HI_OPTION_FW_SUBMODE_BITS);
506 } else {
507 for (i = 0; i < ar->max_norm_iface; i++)
508 fw_submode |= HI_OPTION_FW_SUBMODE_NONE <<
509 (i * HI_OPTION_FW_SUBMODE_BITS);
497 510
498 for (i = ar->max_norm_iface; i < ar->vif_max; i++) 511 for (i = ar->max_norm_iface; i < ar->vif_max; i++)
499 fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV << 512 fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV <<
500 (i * HI_OPTION_FW_SUBMODE_BITS); 513 (i * HI_OPTION_FW_SUBMODE_BITS);
501 514
502 if (ar->p2p && ar->vif_max == 1) 515 if (ar->p2p && ar->vif_max == 1)
503 fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV; 516 fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV;
517 }
504 518
505 if (ath6kl_bmi_write_hi32(ar, hi_app_host_interest, 519 if (ath6kl_bmi_write_hi32(ar, hi_app_host_interest,
506 HTC_PROTOCOL_VERSION) != 0) { 520 HTC_PROTOCOL_VERSION) != 0) {
@@ -539,18 +553,20 @@ int ath6kl_configure_target(struct ath6kl *ar)
539 * but possible in theory. 553 * but possible in theory.
540 */ 554 */
541 555
542 param = ar->hw.board_ext_data_addr; 556 if (ar->target_type == TARGET_TYPE_AR6003) {
543 ram_reserved_size = ar->hw.reserved_ram_size; 557 param = ar->hw.board_ext_data_addr;
558 ram_reserved_size = ar->hw.reserved_ram_size;
544 559
545 if (ath6kl_bmi_write_hi32(ar, hi_board_ext_data, param) != 0) { 560 if (ath6kl_bmi_write_hi32(ar, hi_board_ext_data, param) != 0) {
546 ath6kl_err("bmi_write_memory for hi_board_ext_data failed\n"); 561 ath6kl_err("bmi_write_memory for hi_board_ext_data failed\n");
547 return -EIO; 562 return -EIO;
548 } 563 }
549 564
550 if (ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz, 565 if (ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz,
551 ram_reserved_size) != 0) { 566 ram_reserved_size) != 0) {
552 ath6kl_err("bmi_write_memory for hi_end_ram_reserve_sz failed\n"); 567 ath6kl_err("bmi_write_memory for hi_end_ram_reserve_sz failed\n");
553 return -EIO; 568 return -EIO;
569 }
554 } 570 }
555 571
556 /* set the block size for the target */ 572 /* set the block size for the target */
@@ -924,13 +940,14 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
924 if (ar->fw != NULL) 940 if (ar->fw != NULL)
925 break; 941 break;
926 942
927 ar->fw = kmemdup(data, ie_len, GFP_KERNEL); 943 ar->fw = vmalloc(ie_len);
928 944
929 if (ar->fw == NULL) { 945 if (ar->fw == NULL) {
930 ret = -ENOMEM; 946 ret = -ENOMEM;
931 goto out; 947 goto out;
932 } 948 }
933 949
950 memcpy(ar->fw, data, ie_len);
934 ar->fw_len = ie_len; 951 ar->fw_len = ie_len;
935 break; 952 break;
936 case ATH6KL_FW_IE_PATCH_IMAGE: 953 case ATH6KL_FW_IE_PATCH_IMAGE:
@@ -1507,7 +1524,7 @@ int ath6kl_init_hw_start(struct ath6kl *ar)
1507 } 1524 }
1508 1525
1509 /* setup credit distribution */ 1526 /* setup credit distribution */
1510 ath6kl_credit_setup(ar->htc_target, &ar->credit_state_info); 1527 ath6kl_htc_credit_setup(ar->htc_target, &ar->credit_state_info);
1511 1528
1512 /* start HTC */ 1529 /* start HTC */
1513 ret = ath6kl_htc_start(ar->htc_target); 1530 ret = ath6kl_htc_start(ar->htc_target);
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 229e1922ebe4..4d818f96c415 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -15,6 +15,8 @@
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */ 16 */
17 17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
18#include "core.h" 20#include "core.h"
19#include "hif-ops.h" 21#include "hif-ops.h"
20#include "cfg80211.h" 22#include "cfg80211.h"
@@ -756,6 +758,10 @@ static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
756 stats->wow_evt_discarded += 758 stats->wow_evt_discarded +=
757 le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded); 759 le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded);
758 760
761 stats->arp_received = le32_to_cpu(tgt_stats->arp_stats.arp_received);
762 stats->arp_replied = le32_to_cpu(tgt_stats->arp_stats.arp_replied);
763 stats->arp_matched = le32_to_cpu(tgt_stats->arp_stats.arp_matched);
764
759 if (test_bit(STATS_UPDATE_PEND, &vif->flags)) { 765 if (test_bit(STATS_UPDATE_PEND, &vif->flags)) {
760 clear_bit(STATS_UPDATE_PEND, &vif->flags); 766 clear_bit(STATS_UPDATE_PEND, &vif->flags);
761 wake_up(&ar->event_wq); 767 wake_up(&ar->event_wq);
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 53528648b425..44ea7a742101 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -1362,7 +1362,7 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
1362 goto err_core_alloc; 1362 goto err_core_alloc;
1363 } 1363 }
1364 1364
1365 ret = ath6kl_core_init(ar); 1365 ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_MBOX);
1366 if (ret) { 1366 if (ret) {
1367 ath6kl_err("Failed to init ath6kl core\n"); 1367 ath6kl_err("Failed to init ath6kl core\n");
1368 goto err_core_alloc; 1368 goto err_core_alloc;
diff --git a/drivers/net/wireless/ath/ath6kl/testmode.c b/drivers/net/wireless/ath/ath6kl/testmode.c
index 6675c92b542b..acc9aa832f76 100644
--- a/drivers/net/wireless/ath/ath6kl/testmode.c
+++ b/drivers/net/wireless/ath/ath6kl/testmode.c
@@ -55,8 +55,9 @@ void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len)
55 ath6kl_warn("failed to allocate testmode rx skb!\n"); 55 ath6kl_warn("failed to allocate testmode rx skb!\n");
56 return; 56 return;
57 } 57 }
58 NLA_PUT_U32(skb, ATH6KL_TM_ATTR_CMD, ATH6KL_TM_CMD_TCMD); 58 if (nla_put_u32(skb, ATH6KL_TM_ATTR_CMD, ATH6KL_TM_CMD_TCMD) ||
59 NLA_PUT(skb, ATH6KL_TM_ATTR_DATA, buf_len, buf); 59 nla_put(skb, ATH6KL_TM_ATTR_DATA, buf_len, buf))
60 goto nla_put_failure;
60 cfg80211_testmode_event(skb, GFP_KERNEL); 61 cfg80211_testmode_event(skb, GFP_KERNEL);
61 return; 62 return;
62 63
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index f85353fd1792..82f2f5cb475b 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -15,8 +15,11 @@
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */ 16 */
17 17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
18#include "core.h" 20#include "core.h"
19#include "debug.h" 21#include "debug.h"
22#include "htc-ops.h"
20 23
21/* 24/*
22 * tid - tid_mux0..tid_mux3 25 * tid - tid_mux0..tid_mux3
@@ -322,6 +325,7 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
322 cookie->map_no = 0; 325 cookie->map_no = 0;
323 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len, 326 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
324 eid, ATH6KL_CONTROL_PKT_TAG); 327 eid, ATH6KL_CONTROL_PKT_TAG);
328 cookie->htc_pkt.skb = skb;
325 329
326 /* 330 /*
327 * This interface is asynchronous, if there is an error, cleanup 331 * This interface is asynchronous, if there is an error, cleanup
@@ -490,6 +494,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
490 cookie->map_no = map_no; 494 cookie->map_no = map_no;
491 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len, 495 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
492 eid, htc_tag); 496 eid, htc_tag);
497 cookie->htc_pkt.skb = skb;
493 498
494 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ", 499 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
495 skb->data, skb->len); 500 skb->data, skb->len);
@@ -570,7 +575,7 @@ void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
570 575
571notify_htc: 576notify_htc:
572 /* notify HTC, this may cause credit distribution changes */ 577 /* notify HTC, this may cause credit distribution changes */
573 ath6kl_htc_indicate_activity_change(ar->htc_target, eid, active); 578 ath6kl_htc_activity_changed(ar->htc_target, eid, active);
574} 579}
575 580
576enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, 581enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
@@ -666,9 +671,10 @@ static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
666 } 671 }
667} 672}
668 673
669void ath6kl_tx_complete(void *context, struct list_head *packet_queue) 674void ath6kl_tx_complete(struct htc_target *target,
675 struct list_head *packet_queue)
670{ 676{
671 struct ath6kl *ar = context; 677 struct ath6kl *ar = target->dev->ar;
672 struct sk_buff_head skb_queue; 678 struct sk_buff_head skb_queue;
673 struct htc_packet *packet; 679 struct htc_packet *packet;
674 struct sk_buff *skb; 680 struct sk_buff *skb;
@@ -887,6 +893,7 @@ void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
887 skb->data = PTR_ALIGN(skb->data - 4, 4); 893 skb->data = PTR_ALIGN(skb->data - 4, 4);
888 set_htc_rxpkt_info(packet, skb, skb->data, 894 set_htc_rxpkt_info(packet, skb, skb->data,
889 ATH6KL_BUFFER_SIZE, endpoint); 895 ATH6KL_BUFFER_SIZE, endpoint);
896 packet->skb = skb;
890 list_add_tail(&packet->list, &queue); 897 list_add_tail(&packet->list, &queue);
891 } 898 }
892 899
@@ -909,6 +916,8 @@ void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
909 skb->data = PTR_ALIGN(skb->data - 4, 4); 916 skb->data = PTR_ALIGN(skb->data - 4, 4);
910 set_htc_rxpkt_info(packet, skb, skb->data, 917 set_htc_rxpkt_info(packet, skb, skb->data,
911 ATH6KL_AMSDU_BUFFER_SIZE, 0); 918 ATH6KL_AMSDU_BUFFER_SIZE, 0);
919 packet->skb = skb;
920
912 spin_lock_bh(&ar->lock); 921 spin_lock_bh(&ar->lock);
913 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue); 922 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
914 spin_unlock_bh(&ar->lock); 923 spin_unlock_bh(&ar->lock);
@@ -1281,6 +1290,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1281 struct wmi_data_hdr *dhdr; 1290 struct wmi_data_hdr *dhdr;
1282 int min_hdr_len; 1291 int min_hdr_len;
1283 u8 meta_type, dot11_hdr = 0; 1292 u8 meta_type, dot11_hdr = 0;
1293 u8 pad_before_data_start;
1284 int status = packet->status; 1294 int status = packet->status;
1285 enum htc_endpoint_id ept = packet->endpoint; 1295 enum htc_endpoint_id ept = packet->endpoint;
1286 bool is_amsdu, prev_ps, ps_state = false; 1296 bool is_amsdu, prev_ps, ps_state = false;
@@ -1492,6 +1502,10 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1492 seq_no = wmi_data_hdr_get_seqno(dhdr); 1502 seq_no = wmi_data_hdr_get_seqno(dhdr);
1493 meta_type = wmi_data_hdr_get_meta(dhdr); 1503 meta_type = wmi_data_hdr_get_meta(dhdr);
1494 dot11_hdr = wmi_data_hdr_get_dot11(dhdr); 1504 dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
1505 pad_before_data_start =
1506 (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT)
1507 & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK;
1508
1495 skb_pull(skb, sizeof(struct wmi_data_hdr)); 1509 skb_pull(skb, sizeof(struct wmi_data_hdr));
1496 1510
1497 switch (meta_type) { 1511 switch (meta_type) {
@@ -1510,6 +1524,8 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1510 break; 1524 break;
1511 } 1525 }
1512 1526
1527 skb_pull(skb, pad_before_data_start);
1528
1513 if (dot11_hdr) 1529 if (dot11_hdr)
1514 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb); 1530 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
1515 else if (!is_amsdu) 1531 else if (!is_amsdu)
@@ -1579,7 +1595,8 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1579 /* aggregation code will handle the skb */ 1595 /* aggregation code will handle the skb */
1580 return; 1596 return;
1581 } 1597 }
1582 } 1598 } else if (!is_broadcast_ether_addr(datap->h_dest))
1599 vif->net_stats.multicast++;
1583 1600
1584 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); 1601 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1585} 1602}
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 325b1224c2b1..ec7f1f5fd1ca 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -21,15 +21,77 @@
21#include "debug.h" 21#include "debug.h"
22#include "core.h" 22#include "core.h"
23 23
24/* constants */
25#define TX_URB_COUNT 32
26#define RX_URB_COUNT 32
27#define ATH6KL_USB_RX_BUFFER_SIZE 1700
28
29/* tx/rx pipes for usb */
30enum ATH6KL_USB_PIPE_ID {
31 ATH6KL_USB_PIPE_TX_CTRL = 0,
32 ATH6KL_USB_PIPE_TX_DATA_LP,
33 ATH6KL_USB_PIPE_TX_DATA_MP,
34 ATH6KL_USB_PIPE_TX_DATA_HP,
35 ATH6KL_USB_PIPE_RX_CTRL,
36 ATH6KL_USB_PIPE_RX_DATA,
37 ATH6KL_USB_PIPE_RX_DATA2,
38 ATH6KL_USB_PIPE_RX_INT,
39 ATH6KL_USB_PIPE_MAX
40};
41
42#define ATH6KL_USB_PIPE_INVALID ATH6KL_USB_PIPE_MAX
43
44struct ath6kl_usb_pipe {
45 struct list_head urb_list_head;
46 struct usb_anchor urb_submitted;
47 u32 urb_alloc;
48 u32 urb_cnt;
49 u32 urb_cnt_thresh;
50 unsigned int usb_pipe_handle;
51 u32 flags;
52 u8 ep_address;
53 u8 logical_pipe_num;
54 struct ath6kl_usb *ar_usb;
55 u16 max_packet_size;
56 struct work_struct io_complete_work;
57 struct sk_buff_head io_comp_queue;
58 struct usb_endpoint_descriptor *ep_desc;
59};
60
61#define ATH6KL_USB_PIPE_FLAG_TX (1 << 0)
62
24/* usb device object */ 63/* usb device object */
25struct ath6kl_usb { 64struct ath6kl_usb {
65 /* protects pipe->urb_list_head and pipe->urb_cnt */
66 spinlock_t cs_lock;
67
26 struct usb_device *udev; 68 struct usb_device *udev;
27 struct usb_interface *interface; 69 struct usb_interface *interface;
70 struct ath6kl_usb_pipe pipes[ATH6KL_USB_PIPE_MAX];
28 u8 *diag_cmd_buffer; 71 u8 *diag_cmd_buffer;
29 u8 *diag_resp_buffer; 72 u8 *diag_resp_buffer;
30 struct ath6kl *ar; 73 struct ath6kl *ar;
31}; 74};
32 75
76/* usb urb object */
77struct ath6kl_urb_context {
78 struct list_head link;
79 struct ath6kl_usb_pipe *pipe;
80 struct sk_buff *skb;
81 struct ath6kl *ar;
82};
83
84/* USB endpoint definitions */
85#define ATH6KL_USB_EP_ADDR_APP_CTRL_IN 0x81
86#define ATH6KL_USB_EP_ADDR_APP_DATA_IN 0x82
87#define ATH6KL_USB_EP_ADDR_APP_DATA2_IN 0x83
88#define ATH6KL_USB_EP_ADDR_APP_INT_IN 0x84
89
90#define ATH6KL_USB_EP_ADDR_APP_CTRL_OUT 0x01
91#define ATH6KL_USB_EP_ADDR_APP_DATA_LP_OUT 0x02
92#define ATH6KL_USB_EP_ADDR_APP_DATA_MP_OUT 0x03
93#define ATH6KL_USB_EP_ADDR_APP_DATA_HP_OUT 0x04
94
33/* diagnostic command defnitions */ 95/* diagnostic command defnitions */
34#define ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD 1 96#define ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD 1
35#define ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP 2 97#define ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP 2
@@ -55,11 +117,493 @@ struct ath6kl_usb_ctrl_diag_resp_read {
55 __le32 value; 117 __le32 value;
56} __packed; 118} __packed;
57 119
120/* function declarations */
121static void ath6kl_usb_recv_complete(struct urb *urb);
122
123#define ATH6KL_USB_IS_BULK_EP(attr) (((attr) & 3) == 0x02)
124#define ATH6KL_USB_IS_INT_EP(attr) (((attr) & 3) == 0x03)
125#define ATH6KL_USB_IS_ISOC_EP(attr) (((attr) & 3) == 0x01)
126#define ATH6KL_USB_IS_DIR_IN(addr) ((addr) & 0x80)
127
128/* pipe/urb operations */
129static struct ath6kl_urb_context *
130ath6kl_usb_alloc_urb_from_pipe(struct ath6kl_usb_pipe *pipe)
131{
132 struct ath6kl_urb_context *urb_context = NULL;
133 unsigned long flags;
134
135 spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
136 if (!list_empty(&pipe->urb_list_head)) {
137 urb_context =
138 list_first_entry(&pipe->urb_list_head,
139 struct ath6kl_urb_context, link);
140 list_del(&urb_context->link);
141 pipe->urb_cnt--;
142 }
143 spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags);
144
145 return urb_context;
146}
147
148static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe,
149 struct ath6kl_urb_context *urb_context)
150{
151 unsigned long flags;
152
153 spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
154 pipe->urb_cnt++;
155
156 list_add(&urb_context->link, &pipe->urb_list_head);
157 spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags);
158}
159
160static void ath6kl_usb_cleanup_recv_urb(struct ath6kl_urb_context *urb_context)
161{
162 if (urb_context->skb != NULL) {
163 dev_kfree_skb(urb_context->skb);
164 urb_context->skb = NULL;
165 }
166
167 ath6kl_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
168}
169
170static inline struct ath6kl_usb *ath6kl_usb_priv(struct ath6kl *ar)
171{
172 return ar->hif_priv;
173}
174
175/* pipe resource allocation/cleanup */
176static int ath6kl_usb_alloc_pipe_resources(struct ath6kl_usb_pipe *pipe,
177 int urb_cnt)
178{
179 struct ath6kl_urb_context *urb_context;
180 int status = 0, i;
181
182 INIT_LIST_HEAD(&pipe->urb_list_head);
183 init_usb_anchor(&pipe->urb_submitted);
184
185 for (i = 0; i < urb_cnt; i++) {
186 urb_context = kzalloc(sizeof(struct ath6kl_urb_context),
187 GFP_KERNEL);
188 if (urb_context == NULL)
189 /* FIXME: set status to -ENOMEM */
190 break;
191
192 urb_context->pipe = pipe;
193
194 /*
195 * we are only allocate the urb contexts here, the actual URB
196 * is allocated from the kernel as needed to do a transaction
197 */
198 pipe->urb_alloc++;
199 ath6kl_usb_free_urb_to_pipe(pipe, urb_context);
200 }
201
202 ath6kl_dbg(ATH6KL_DBG_USB,
203 "ath6kl usb: alloc resources lpipe:%d hpipe:0x%X urbs:%d\n",
204 pipe->logical_pipe_num, pipe->usb_pipe_handle,
205 pipe->urb_alloc);
206
207 return status;
208}
209
210static void ath6kl_usb_free_pipe_resources(struct ath6kl_usb_pipe *pipe)
211{
212 struct ath6kl_urb_context *urb_context;
213
214 if (pipe->ar_usb == NULL) {
215 /* nothing allocated for this pipe */
216 return;
217 }
218
219 ath6kl_dbg(ATH6KL_DBG_USB,
220 "ath6kl usb: free resources lpipe:%d"
221 "hpipe:0x%X urbs:%d avail:%d\n",
222 pipe->logical_pipe_num, pipe->usb_pipe_handle,
223 pipe->urb_alloc, pipe->urb_cnt);
224
225 if (pipe->urb_alloc != pipe->urb_cnt) {
226 ath6kl_dbg(ATH6KL_DBG_USB,
227 "ath6kl usb: urb leak! lpipe:%d"
228 "hpipe:0x%X urbs:%d avail:%d\n",
229 pipe->logical_pipe_num, pipe->usb_pipe_handle,
230 pipe->urb_alloc, pipe->urb_cnt);
231 }
232
233 while (true) {
234 urb_context = ath6kl_usb_alloc_urb_from_pipe(pipe);
235 if (urb_context == NULL)
236 break;
237 kfree(urb_context);
238 }
239
240}
241
242static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
243{
244 int i;
245
246 for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++)
247 ath6kl_usb_free_pipe_resources(&ar_usb->pipes[i]);
248
249}
250
251static u8 ath6kl_usb_get_logical_pipe_num(struct ath6kl_usb *ar_usb,
252 u8 ep_address, int *urb_count)
253{
254 u8 pipe_num = ATH6KL_USB_PIPE_INVALID;
255
256 switch (ep_address) {
257 case ATH6KL_USB_EP_ADDR_APP_CTRL_IN:
258 pipe_num = ATH6KL_USB_PIPE_RX_CTRL;
259 *urb_count = RX_URB_COUNT;
260 break;
261 case ATH6KL_USB_EP_ADDR_APP_DATA_IN:
262 pipe_num = ATH6KL_USB_PIPE_RX_DATA;
263 *urb_count = RX_URB_COUNT;
264 break;
265 case ATH6KL_USB_EP_ADDR_APP_INT_IN:
266 pipe_num = ATH6KL_USB_PIPE_RX_INT;
267 *urb_count = RX_URB_COUNT;
268 break;
269 case ATH6KL_USB_EP_ADDR_APP_DATA2_IN:
270 pipe_num = ATH6KL_USB_PIPE_RX_DATA2;
271 *urb_count = RX_URB_COUNT;
272 break;
273 case ATH6KL_USB_EP_ADDR_APP_CTRL_OUT:
274 pipe_num = ATH6KL_USB_PIPE_TX_CTRL;
275 *urb_count = TX_URB_COUNT;
276 break;
277 case ATH6KL_USB_EP_ADDR_APP_DATA_LP_OUT:
278 pipe_num = ATH6KL_USB_PIPE_TX_DATA_LP;
279 *urb_count = TX_URB_COUNT;
280 break;
281 case ATH6KL_USB_EP_ADDR_APP_DATA_MP_OUT:
282 pipe_num = ATH6KL_USB_PIPE_TX_DATA_MP;
283 *urb_count = TX_URB_COUNT;
284 break;
285 case ATH6KL_USB_EP_ADDR_APP_DATA_HP_OUT:
286 pipe_num = ATH6KL_USB_PIPE_TX_DATA_HP;
287 *urb_count = TX_URB_COUNT;
288 break;
289 default:
290 /* note: there may be endpoints not currently used */
291 break;
292 }
293
294 return pipe_num;
295}
296
297static int ath6kl_usb_setup_pipe_resources(struct ath6kl_usb *ar_usb)
298{
299 struct usb_interface *interface = ar_usb->interface;
300 struct usb_host_interface *iface_desc = interface->cur_altsetting;
301 struct usb_endpoint_descriptor *endpoint;
302 struct ath6kl_usb_pipe *pipe;
303 int i, urbcount, status = 0;
304 u8 pipe_num;
305
306 ath6kl_dbg(ATH6KL_DBG_USB, "setting up USB Pipes using interface\n");
307
308 /* walk decriptors and setup pipes */
309 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
310 endpoint = &iface_desc->endpoint[i].desc;
311
312 if (ATH6KL_USB_IS_BULK_EP(endpoint->bmAttributes)) {
313 ath6kl_dbg(ATH6KL_DBG_USB,
314 "%s Bulk Ep:0x%2.2X maxpktsz:%d\n",
315 ATH6KL_USB_IS_DIR_IN
316 (endpoint->bEndpointAddress) ?
317 "RX" : "TX", endpoint->bEndpointAddress,
318 le16_to_cpu(endpoint->wMaxPacketSize));
319 } else if (ATH6KL_USB_IS_INT_EP(endpoint->bmAttributes)) {
320 ath6kl_dbg(ATH6KL_DBG_USB,
321 "%s Int Ep:0x%2.2X maxpktsz:%d interval:%d\n",
322 ATH6KL_USB_IS_DIR_IN
323 (endpoint->bEndpointAddress) ?
324 "RX" : "TX", endpoint->bEndpointAddress,
325 le16_to_cpu(endpoint->wMaxPacketSize),
326 endpoint->bInterval);
327 } else if (ATH6KL_USB_IS_ISOC_EP(endpoint->bmAttributes)) {
328 /* TODO for ISO */
329 ath6kl_dbg(ATH6KL_DBG_USB,
330 "%s ISOC Ep:0x%2.2X maxpktsz:%d interval:%d\n",
331 ATH6KL_USB_IS_DIR_IN
332 (endpoint->bEndpointAddress) ?
333 "RX" : "TX", endpoint->bEndpointAddress,
334 le16_to_cpu(endpoint->wMaxPacketSize),
335 endpoint->bInterval);
336 }
337 urbcount = 0;
338
339 pipe_num =
340 ath6kl_usb_get_logical_pipe_num(ar_usb,
341 endpoint->bEndpointAddress,
342 &urbcount);
343 if (pipe_num == ATH6KL_USB_PIPE_INVALID)
344 continue;
345
346 pipe = &ar_usb->pipes[pipe_num];
347 if (pipe->ar_usb != NULL) {
348 /* hmmm..pipe was already setup */
349 continue;
350 }
351
352 pipe->ar_usb = ar_usb;
353 pipe->logical_pipe_num = pipe_num;
354 pipe->ep_address = endpoint->bEndpointAddress;
355 pipe->max_packet_size = le16_to_cpu(endpoint->wMaxPacketSize);
356
357 if (ATH6KL_USB_IS_BULK_EP(endpoint->bmAttributes)) {
358 if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) {
359 pipe->usb_pipe_handle =
360 usb_rcvbulkpipe(ar_usb->udev,
361 pipe->ep_address);
362 } else {
363 pipe->usb_pipe_handle =
364 usb_sndbulkpipe(ar_usb->udev,
365 pipe->ep_address);
366 }
367 } else if (ATH6KL_USB_IS_INT_EP(endpoint->bmAttributes)) {
368 if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) {
369 pipe->usb_pipe_handle =
370 usb_rcvintpipe(ar_usb->udev,
371 pipe->ep_address);
372 } else {
373 pipe->usb_pipe_handle =
374 usb_sndintpipe(ar_usb->udev,
375 pipe->ep_address);
376 }
377 } else if (ATH6KL_USB_IS_ISOC_EP(endpoint->bmAttributes)) {
378 /* TODO for ISO */
379 if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) {
380 pipe->usb_pipe_handle =
381 usb_rcvisocpipe(ar_usb->udev,
382 pipe->ep_address);
383 } else {
384 pipe->usb_pipe_handle =
385 usb_sndisocpipe(ar_usb->udev,
386 pipe->ep_address);
387 }
388 }
389
390 pipe->ep_desc = endpoint;
391
392 if (!ATH6KL_USB_IS_DIR_IN(pipe->ep_address))
393 pipe->flags |= ATH6KL_USB_PIPE_FLAG_TX;
394
395 status = ath6kl_usb_alloc_pipe_resources(pipe, urbcount);
396 if (status != 0)
397 break;
398 }
399
400 return status;
401}
402
403/* pipe operations */
404static void ath6kl_usb_post_recv_transfers(struct ath6kl_usb_pipe *recv_pipe,
405 int buffer_length)
406{
407 struct ath6kl_urb_context *urb_context;
408 struct urb *urb;
409 int usb_status;
410
411 while (true) {
412 urb_context = ath6kl_usb_alloc_urb_from_pipe(recv_pipe);
413 if (urb_context == NULL)
414 break;
415
416 urb_context->skb = dev_alloc_skb(buffer_length);
417 if (urb_context->skb == NULL)
418 goto err_cleanup_urb;
419
420 urb = usb_alloc_urb(0, GFP_ATOMIC);
421 if (urb == NULL)
422 goto err_cleanup_urb;
423
424 usb_fill_bulk_urb(urb,
425 recv_pipe->ar_usb->udev,
426 recv_pipe->usb_pipe_handle,
427 urb_context->skb->data,
428 buffer_length,
429 ath6kl_usb_recv_complete, urb_context);
430
431 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
432 "ath6kl usb: bulk recv submit:%d, 0x%X (ep:0x%2.2X), %d bytes buf:0x%p\n",
433 recv_pipe->logical_pipe_num,
434 recv_pipe->usb_pipe_handle, recv_pipe->ep_address,
435 buffer_length, urb_context->skb);
436
437 usb_anchor_urb(urb, &recv_pipe->urb_submitted);
438 usb_status = usb_submit_urb(urb, GFP_ATOMIC);
439
440 if (usb_status) {
441 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
442 "ath6kl usb : usb bulk recv failed %d\n",
443 usb_status);
444 usb_unanchor_urb(urb);
445 usb_free_urb(urb);
446 goto err_cleanup_urb;
447 }
448 usb_free_urb(urb);
449 }
450 return;
451
452err_cleanup_urb:
453 ath6kl_usb_cleanup_recv_urb(urb_context);
454 return;
455}
456
457static void ath6kl_usb_flush_all(struct ath6kl_usb *ar_usb)
458{
459 int i;
460
461 for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++) {
462 if (ar_usb->pipes[i].ar_usb != NULL)
463 usb_kill_anchored_urbs(&ar_usb->pipes[i].urb_submitted);
464 }
465
466 /*
467 * Flushing any pending I/O may schedule work this call will block
468 * until all scheduled work runs to completion.
469 */
470 flush_scheduled_work();
471}
472
473static void ath6kl_usb_start_recv_pipes(struct ath6kl_usb *ar_usb)
474{
475 /*
476 * note: control pipe is no longer used
477 * ar_usb->pipes[ATH6KL_USB_PIPE_RX_CTRL].urb_cnt_thresh =
478 * ar_usb->pipes[ATH6KL_USB_PIPE_RX_CTRL].urb_alloc/2;
479 * ath6kl_usb_post_recv_transfers(&ar_usb->
480 * pipes[ATH6KL_USB_PIPE_RX_CTRL],
481 * ATH6KL_USB_RX_BUFFER_SIZE);
482 */
483
484 ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_cnt_thresh =
485 ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_alloc / 2;
486 ath6kl_usb_post_recv_transfers(&ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA],
487 ATH6KL_USB_RX_BUFFER_SIZE);
488}
489
490/* hif usb rx/tx completion functions */
491static void ath6kl_usb_recv_complete(struct urb *urb)
492{
493 struct ath6kl_urb_context *urb_context = urb->context;
494 struct ath6kl_usb_pipe *pipe = urb_context->pipe;
495 struct sk_buff *skb = NULL;
496 int status = 0;
497
498 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
499 "%s: recv pipe: %d, stat:%d, len:%d urb:0x%p\n", __func__,
500 pipe->logical_pipe_num, urb->status, urb->actual_length,
501 urb);
502
503 if (urb->status != 0) {
504 status = -EIO;
505 switch (urb->status) {
506 case -ECONNRESET:
507 case -ENOENT:
508 case -ESHUTDOWN:
509 /*
510 * no need to spew these errors when device
511 * removed or urb killed due to driver shutdown
512 */
513 status = -ECANCELED;
514 break;
515 default:
516 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
517 "%s recv pipe: %d (ep:0x%2.2X), failed:%d\n",
518 __func__, pipe->logical_pipe_num,
519 pipe->ep_address, urb->status);
520 break;
521 }
522 goto cleanup_recv_urb;
523 }
524
525 if (urb->actual_length == 0)
526 goto cleanup_recv_urb;
527
528 skb = urb_context->skb;
529
530 /* we are going to pass it up */
531 urb_context->skb = NULL;
532 skb_put(skb, urb->actual_length);
533
534 /* note: queue implements a lock */
535 skb_queue_tail(&pipe->io_comp_queue, skb);
536 schedule_work(&pipe->io_complete_work);
537
538cleanup_recv_urb:
539 ath6kl_usb_cleanup_recv_urb(urb_context);
540
541 if (status == 0 &&
542 pipe->urb_cnt >= pipe->urb_cnt_thresh) {
543 /* our free urbs are piling up, post more transfers */
544 ath6kl_usb_post_recv_transfers(pipe, ATH6KL_USB_RX_BUFFER_SIZE);
545 }
546}
547
548static void ath6kl_usb_usb_transmit_complete(struct urb *urb)
549{
550 struct ath6kl_urb_context *urb_context = urb->context;
551 struct ath6kl_usb_pipe *pipe = urb_context->pipe;
552 struct sk_buff *skb;
553
554 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
555 "%s: pipe: %d, stat:%d, len:%d\n",
556 __func__, pipe->logical_pipe_num, urb->status,
557 urb->actual_length);
558
559 if (urb->status != 0) {
560 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
561 "%s: pipe: %d, failed:%d\n",
562 __func__, pipe->logical_pipe_num, urb->status);
563 }
564
565 skb = urb_context->skb;
566 urb_context->skb = NULL;
567 ath6kl_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
568
569 /* note: queue implements a lock */
570 skb_queue_tail(&pipe->io_comp_queue, skb);
571 schedule_work(&pipe->io_complete_work);
572}
573
574static void ath6kl_usb_io_comp_work(struct work_struct *work)
575{
576 struct ath6kl_usb_pipe *pipe = container_of(work,
577 struct ath6kl_usb_pipe,
578 io_complete_work);
579 struct ath6kl_usb *ar_usb;
580 struct sk_buff *skb;
581
582 ar_usb = pipe->ar_usb;
583
584 while ((skb = skb_dequeue(&pipe->io_comp_queue))) {
585 if (pipe->flags & ATH6KL_USB_PIPE_FLAG_TX) {
586 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
587 "ath6kl usb xmit callback buf:0x%p\n", skb);
588 ath6kl_core_tx_complete(ar_usb->ar, skb);
589 } else {
590 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
591 "ath6kl usb recv callback buf:0x%p\n", skb);
592 ath6kl_core_rx_complete(ar_usb->ar, skb,
593 pipe->logical_pipe_num);
594 }
595 }
596}
597
58#define ATH6KL_USB_MAX_DIAG_CMD (sizeof(struct ath6kl_usb_ctrl_diag_cmd_write)) 598#define ATH6KL_USB_MAX_DIAG_CMD (sizeof(struct ath6kl_usb_ctrl_diag_cmd_write))
59#define ATH6KL_USB_MAX_DIAG_RESP (sizeof(struct ath6kl_usb_ctrl_diag_resp_read)) 599#define ATH6KL_USB_MAX_DIAG_RESP (sizeof(struct ath6kl_usb_ctrl_diag_resp_read))
60 600
61static void ath6kl_usb_destroy(struct ath6kl_usb *ar_usb) 601static void ath6kl_usb_destroy(struct ath6kl_usb *ar_usb)
62{ 602{
603 ath6kl_usb_flush_all(ar_usb);
604
605 ath6kl_usb_cleanup_pipe_resources(ar_usb);
606
63 usb_set_intfdata(ar_usb->interface, NULL); 607 usb_set_intfdata(ar_usb->interface, NULL);
64 608
65 kfree(ar_usb->diag_cmd_buffer); 609 kfree(ar_usb->diag_cmd_buffer);
@@ -70,19 +614,28 @@ static void ath6kl_usb_destroy(struct ath6kl_usb *ar_usb)
70 614
71static struct ath6kl_usb *ath6kl_usb_create(struct usb_interface *interface) 615static struct ath6kl_usb *ath6kl_usb_create(struct usb_interface *interface)
72{ 616{
73 struct ath6kl_usb *ar_usb = NULL;
74 struct usb_device *dev = interface_to_usbdev(interface); 617 struct usb_device *dev = interface_to_usbdev(interface);
618 struct ath6kl_usb *ar_usb;
619 struct ath6kl_usb_pipe *pipe;
75 int status = 0; 620 int status = 0;
621 int i;
76 622
77 ar_usb = kzalloc(sizeof(struct ath6kl_usb), GFP_KERNEL); 623 ar_usb = kzalloc(sizeof(struct ath6kl_usb), GFP_KERNEL);
78 if (ar_usb == NULL) 624 if (ar_usb == NULL)
79 goto fail_ath6kl_usb_create; 625 goto fail_ath6kl_usb_create;
80 626
81 memset(ar_usb, 0, sizeof(struct ath6kl_usb));
82 usb_set_intfdata(interface, ar_usb); 627 usb_set_intfdata(interface, ar_usb);
628 spin_lock_init(&(ar_usb->cs_lock));
83 ar_usb->udev = dev; 629 ar_usb->udev = dev;
84 ar_usb->interface = interface; 630 ar_usb->interface = interface;
85 631
632 for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++) {
633 pipe = &ar_usb->pipes[i];
634 INIT_WORK(&pipe->io_complete_work,
635 ath6kl_usb_io_comp_work);
636 skb_queue_head_init(&pipe->io_comp_queue);
637 }
638
86 ar_usb->diag_cmd_buffer = kzalloc(ATH6KL_USB_MAX_DIAG_CMD, GFP_KERNEL); 639 ar_usb->diag_cmd_buffer = kzalloc(ATH6KL_USB_MAX_DIAG_CMD, GFP_KERNEL);
87 if (ar_usb->diag_cmd_buffer == NULL) { 640 if (ar_usb->diag_cmd_buffer == NULL) {
88 status = -ENOMEM; 641 status = -ENOMEM;
@@ -96,6 +649,8 @@ static struct ath6kl_usb *ath6kl_usb_create(struct usb_interface *interface)
96 goto fail_ath6kl_usb_create; 649 goto fail_ath6kl_usb_create;
97 } 650 }
98 651
652 status = ath6kl_usb_setup_pipe_resources(ar_usb);
653
99fail_ath6kl_usb_create: 654fail_ath6kl_usb_create:
100 if (status != 0) { 655 if (status != 0) {
101 ath6kl_usb_destroy(ar_usb); 656 ath6kl_usb_destroy(ar_usb);
@@ -114,11 +669,177 @@ static void ath6kl_usb_device_detached(struct usb_interface *interface)
114 669
115 ath6kl_stop_txrx(ar_usb->ar); 670 ath6kl_stop_txrx(ar_usb->ar);
116 671
672 /* Delay to wait for the target to reboot */
673 mdelay(20);
117 ath6kl_core_cleanup(ar_usb->ar); 674 ath6kl_core_cleanup(ar_usb->ar);
118
119 ath6kl_usb_destroy(ar_usb); 675 ath6kl_usb_destroy(ar_usb);
120} 676}
121 677
678/* exported hif usb APIs for htc pipe */
679static void hif_start(struct ath6kl *ar)
680{
681 struct ath6kl_usb *device = ath6kl_usb_priv(ar);
682 int i;
683
684 ath6kl_usb_start_recv_pipes(device);
685
686 /* set the TX resource avail threshold for each TX pipe */
687 for (i = ATH6KL_USB_PIPE_TX_CTRL;
688 i <= ATH6KL_USB_PIPE_TX_DATA_HP; i++) {
689 device->pipes[i].urb_cnt_thresh =
690 device->pipes[i].urb_alloc / 2;
691 }
692}
693
694static int ath6kl_usb_send(struct ath6kl *ar, u8 PipeID,
695 struct sk_buff *hdr_skb, struct sk_buff *skb)
696{
697 struct ath6kl_usb *device = ath6kl_usb_priv(ar);
698 struct ath6kl_usb_pipe *pipe = &device->pipes[PipeID];
699 struct ath6kl_urb_context *urb_context;
700 int usb_status, status = 0;
701 struct urb *urb;
702 u8 *data;
703 u32 len;
704
705 ath6kl_dbg(ATH6KL_DBG_USB_BULK, "+%s pipe : %d, buf:0x%p\n",
706 __func__, PipeID, skb);
707
708 urb_context = ath6kl_usb_alloc_urb_from_pipe(pipe);
709
710 if (urb_context == NULL) {
711 /*
712 * TODO: it is possible to run out of urbs if
713 * 2 endpoints map to the same pipe ID
714 */
715 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
716 "%s pipe:%d no urbs left. URB Cnt : %d\n",
717 __func__, PipeID, pipe->urb_cnt);
718 status = -ENOMEM;
719 goto fail_hif_send;
720 }
721
722 urb_context->skb = skb;
723
724 data = skb->data;
725 len = skb->len;
726
727 urb = usb_alloc_urb(0, GFP_ATOMIC);
728 if (urb == NULL) {
729 status = -ENOMEM;
730 ath6kl_usb_free_urb_to_pipe(urb_context->pipe,
731 urb_context);
732 goto fail_hif_send;
733 }
734
735 usb_fill_bulk_urb(urb,
736 device->udev,
737 pipe->usb_pipe_handle,
738 data,
739 len,
740 ath6kl_usb_usb_transmit_complete, urb_context);
741
742 if ((len % pipe->max_packet_size) == 0) {
743 /* hit a max packet boundary on this pipe */
744 urb->transfer_flags |= URB_ZERO_PACKET;
745 }
746
747 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
748 "athusb bulk send submit:%d, 0x%X (ep:0x%2.2X), %d bytes\n",
749 pipe->logical_pipe_num, pipe->usb_pipe_handle,
750 pipe->ep_address, len);
751
752 usb_anchor_urb(urb, &pipe->urb_submitted);
753 usb_status = usb_submit_urb(urb, GFP_ATOMIC);
754
755 if (usb_status) {
756 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
757 "ath6kl usb : usb bulk transmit failed %d\n",
758 usb_status);
759 usb_unanchor_urb(urb);
760 ath6kl_usb_free_urb_to_pipe(urb_context->pipe,
761 urb_context);
762 status = -EINVAL;
763 }
764 usb_free_urb(urb);
765
766fail_hif_send:
767 return status;
768}
769
770static void hif_stop(struct ath6kl *ar)
771{
772 struct ath6kl_usb *device = ath6kl_usb_priv(ar);
773
774 ath6kl_usb_flush_all(device);
775}
776
777static void ath6kl_usb_get_default_pipe(struct ath6kl *ar,
778 u8 *ul_pipe, u8 *dl_pipe)
779{
780 *ul_pipe = ATH6KL_USB_PIPE_TX_CTRL;
781 *dl_pipe = ATH6KL_USB_PIPE_RX_CTRL;
782}
783
784static int ath6kl_usb_map_service_pipe(struct ath6kl *ar, u16 svc_id,
785 u8 *ul_pipe, u8 *dl_pipe)
786{
787 int status = 0;
788
789 switch (svc_id) {
790 case HTC_CTRL_RSVD_SVC:
791 case WMI_CONTROL_SVC:
792 *ul_pipe = ATH6KL_USB_PIPE_TX_CTRL;
793 /* due to large control packets, shift to data pipe */
794 *dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
795 break;
796 case WMI_DATA_BE_SVC:
797 case WMI_DATA_BK_SVC:
798 *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP;
799 /*
800 * Disable rxdata2 directly, it will be enabled
801 * if FW enable rxdata2
802 */
803 *dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
804 break;
805 case WMI_DATA_VI_SVC:
806 *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP;
807 /*
808 * Disable rxdata2 directly, it will be enabled
809 * if FW enable rxdata2
810 */
811 *dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
812 break;
813 case WMI_DATA_VO_SVC:
814 *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_HP;
815 /*
816 * Disable rxdata2 directly, it will be enabled
817 * if FW enable rxdata2
818 */
819 *dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
820 break;
821 default:
822 status = -EPERM;
823 break;
824 }
825
826 return status;
827}
828
829static u16 ath6kl_usb_get_free_queue_number(struct ath6kl *ar, u8 pipe_id)
830{
831 struct ath6kl_usb *device = ath6kl_usb_priv(ar);
832
833 return device->pipes[pipe_id].urb_cnt;
834}
835
836static void hif_detach_htc(struct ath6kl *ar)
837{
838 struct ath6kl_usb *device = ath6kl_usb_priv(ar);
839
840 ath6kl_usb_flush_all(device);
841}
842
122static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb, 843static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb,
123 u8 req, u16 value, u16 index, void *data, 844 u8 req, u16 value, u16 index, void *data,
124 u32 size) 845 u32 size)
@@ -301,14 +1022,21 @@ static int ath6kl_usb_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
301 1022
302static int ath6kl_usb_power_on(struct ath6kl *ar) 1023static int ath6kl_usb_power_on(struct ath6kl *ar)
303{ 1024{
1025 hif_start(ar);
304 return 0; 1026 return 0;
305} 1027}
306 1028
307static int ath6kl_usb_power_off(struct ath6kl *ar) 1029static int ath6kl_usb_power_off(struct ath6kl *ar)
308{ 1030{
1031 hif_detach_htc(ar);
309 return 0; 1032 return 0;
310} 1033}
311 1034
1035static void ath6kl_usb_stop(struct ath6kl *ar)
1036{
1037 hif_stop(ar);
1038}
1039
312static const struct ath6kl_hif_ops ath6kl_usb_ops = { 1040static const struct ath6kl_hif_ops ath6kl_usb_ops = {
313 .diag_read32 = ath6kl_usb_diag_read32, 1041 .diag_read32 = ath6kl_usb_diag_read32,
314 .diag_write32 = ath6kl_usb_diag_write32, 1042 .diag_write32 = ath6kl_usb_diag_write32,
@@ -316,6 +1044,11 @@ static const struct ath6kl_hif_ops ath6kl_usb_ops = {
316 .bmi_write = ath6kl_usb_bmi_write, 1044 .bmi_write = ath6kl_usb_bmi_write,
317 .power_on = ath6kl_usb_power_on, 1045 .power_on = ath6kl_usb_power_on,
318 .power_off = ath6kl_usb_power_off, 1046 .power_off = ath6kl_usb_power_off,
1047 .stop = ath6kl_usb_stop,
1048 .pipe_send = ath6kl_usb_send,
1049 .pipe_get_default = ath6kl_usb_get_default_pipe,
1050 .pipe_map_service = ath6kl_usb_map_service_pipe,
1051 .pipe_get_free_queue_number = ath6kl_usb_get_free_queue_number,
319}; 1052};
320 1053
321/* ath6kl usb driver registered functions */ 1054/* ath6kl usb driver registered functions */
@@ -368,7 +1101,7 @@ static int ath6kl_usb_probe(struct usb_interface *interface,
368 1101
369 ar_usb->ar = ar; 1102 ar_usb->ar = ar;
370 1103
371 ret = ath6kl_core_init(ar); 1104 ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_PIPE);
372 if (ret) { 1105 if (ret) {
373 ath6kl_err("Failed to init ath6kl core: %d\n", ret); 1106 ath6kl_err("Failed to init ath6kl core: %d\n", ret);
374 goto err_core_free; 1107 goto err_core_free;
@@ -392,6 +1125,46 @@ static void ath6kl_usb_remove(struct usb_interface *interface)
392 ath6kl_usb_device_detached(interface); 1125 ath6kl_usb_device_detached(interface);
393} 1126}
394 1127
1128#ifdef CONFIG_PM
1129
1130static int ath6kl_usb_suspend(struct usb_interface *interface,
1131 pm_message_t message)
1132{
1133 struct ath6kl_usb *device;
1134 device = usb_get_intfdata(interface);
1135
1136 ath6kl_usb_flush_all(device);
1137 return 0;
1138}
1139
1140static int ath6kl_usb_resume(struct usb_interface *interface)
1141{
1142 struct ath6kl_usb *device;
1143 device = usb_get_intfdata(interface);
1144
1145 ath6kl_usb_post_recv_transfers(&device->pipes[ATH6KL_USB_PIPE_RX_DATA],
1146 ATH6KL_USB_RX_BUFFER_SIZE);
1147 ath6kl_usb_post_recv_transfers(&device->pipes[ATH6KL_USB_PIPE_RX_DATA2],
1148 ATH6KL_USB_RX_BUFFER_SIZE);
1149
1150 return 0;
1151}
1152
1153static int ath6kl_usb_reset_resume(struct usb_interface *intf)
1154{
1155 if (usb_get_intfdata(intf))
1156 ath6kl_usb_remove(intf);
1157 return 0;
1158}
1159
1160#else
1161
1162#define ath6kl_usb_suspend NULL
1163#define ath6kl_usb_resume NULL
1164#define ath6kl_usb_reset_resume NULL
1165
1166#endif
1167
395/* table of devices that work with this driver */ 1168/* table of devices that work with this driver */
396static struct usb_device_id ath6kl_usb_ids[] = { 1169static struct usb_device_id ath6kl_usb_ids[] = {
397 {USB_DEVICE(0x0cf3, 0x9374)}, 1170 {USB_DEVICE(0x0cf3, 0x9374)},
@@ -403,8 +1176,12 @@ MODULE_DEVICE_TABLE(usb, ath6kl_usb_ids);
403static struct usb_driver ath6kl_usb_driver = { 1176static struct usb_driver ath6kl_usb_driver = {
404 .name = "ath6kl_usb", 1177 .name = "ath6kl_usb",
405 .probe = ath6kl_usb_probe, 1178 .probe = ath6kl_usb_probe,
1179 .suspend = ath6kl_usb_suspend,
1180 .resume = ath6kl_usb_resume,
1181 .reset_resume = ath6kl_usb_reset_resume,
406 .disconnect = ath6kl_usb_remove, 1182 .disconnect = ath6kl_usb_remove,
407 .id_table = ath6kl_usb_ids, 1183 .id_table = ath6kl_usb_ids,
1184 .supports_autosuspend = true,
408}; 1185};
409 1186
410static int ath6kl_usb_init(void) 1187static int ath6kl_usb_init(void)
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 2b442332cd0f..7c8a9977faf5 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -2882,6 +2882,43 @@ int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
2882 return ret; 2882 return ret;
2883} 2883}
2884 2884
2885int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
2886 enum ieee80211_band band,
2887 struct ath6kl_htcap *htcap)
2888{
2889 struct sk_buff *skb;
2890 struct wmi_set_htcap_cmd *cmd;
2891
2892 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
2893 if (!skb)
2894 return -ENOMEM;
2895
2896 cmd = (struct wmi_set_htcap_cmd *) skb->data;
2897
2898 /*
2899 * NOTE: Band in firmware matches enum ieee80211_band, it is unlikely
2900 * this will be changed in firmware. If at all there is any change in
2901 * band value, the host needs to be fixed.
2902 */
2903 cmd->band = band;
2904 cmd->ht_enable = !!htcap->ht_enable;
2905 cmd->ht20_sgi = !!(htcap->cap_info & IEEE80211_HT_CAP_SGI_20);
2906 cmd->ht40_supported =
2907 !!(htcap->cap_info & IEEE80211_HT_CAP_SUP_WIDTH_20_40);
2908 cmd->ht40_sgi = !!(htcap->cap_info & IEEE80211_HT_CAP_SGI_40);
2909 cmd->intolerant_40mhz =
2910 !!(htcap->cap_info & IEEE80211_HT_CAP_40MHZ_INTOLERANT);
2911 cmd->max_ampdu_len_exp = htcap->ampdu_factor;
2912
2913 ath6kl_dbg(ATH6KL_DBG_WMI,
2914 "Set htcap: band:%d ht_enable:%d 40mhz:%d sgi_20mhz:%d sgi_40mhz:%d 40mhz_intolerant:%d ampdu_len_exp:%d\n",
2915 cmd->band, cmd->ht_enable, cmd->ht40_supported,
2916 cmd->ht20_sgi, cmd->ht40_sgi, cmd->intolerant_40mhz,
2917 cmd->max_ampdu_len_exp);
2918 return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_HT_CAP_CMDID,
2919 NO_SYNC_WMIFLAG);
2920}
2921
2885int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len) 2922int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len)
2886{ 2923{
2887 struct sk_buff *skb; 2924 struct sk_buff *skb;
@@ -3032,6 +3069,9 @@ int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, const u8 *mac,
3032 cm->reason = cpu_to_le16(reason); 3069 cm->reason = cpu_to_le16(reason);
3033 cm->cmd = cmd; 3070 cm->cmd = cmd;
3034 3071
3072 ath6kl_dbg(ATH6KL_DBG_WMI, "ap_set_mlme: cmd=%d reason=%d\n", cm->cmd,
3073 cm->reason);
3074
3035 return ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_SET_MLME_CMDID, 3075 return ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_SET_MLME_CMDID,
3036 NO_SYNC_WMIFLAG); 3076 NO_SYNC_WMIFLAG);
3037} 3077}
@@ -3181,6 +3221,29 @@ int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
3181 NO_SYNC_WMIFLAG); 3221 NO_SYNC_WMIFLAG);
3182} 3222}
3183 3223
3224int ath6kl_wmi_set_ie_cmd(struct wmi *wmi, u8 if_idx, u8 ie_id, u8 ie_field,
3225 const u8 *ie_info, u8 ie_len)
3226{
3227 struct sk_buff *skb;
3228 struct wmi_set_ie_cmd *p;
3229
3230 skb = ath6kl_wmi_get_new_buf(sizeof(*p) + ie_len);
3231 if (!skb)
3232 return -ENOMEM;
3233
3234 ath6kl_dbg(ATH6KL_DBG_WMI, "set_ie_cmd: ie_id=%u ie_ie_field=%u ie_len=%u\n",
3235 ie_id, ie_field, ie_len);
3236 p = (struct wmi_set_ie_cmd *) skb->data;
3237 p->ie_id = ie_id;
3238 p->ie_field = ie_field;
3239 p->ie_len = ie_len;
3240 if (ie_info && ie_len > 0)
3241 memcpy(p->ie_info, ie_info, ie_len);
3242
3243 return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_IE_CMDID,
3244 NO_SYNC_WMIFLAG);
3245}
3246
3184int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable) 3247int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable)
3185{ 3248{
3186 struct sk_buff *skb; 3249 struct sk_buff *skb;
@@ -3392,6 +3455,23 @@ int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx)
3392 WMI_CANCEL_REMAIN_ON_CHNL_CMDID); 3455 WMI_CANCEL_REMAIN_ON_CHNL_CMDID);
3393} 3456}
3394 3457
3458int ath6kl_wmi_set_inact_period(struct wmi *wmi, u8 if_idx, int inact_timeout)
3459{
3460 struct sk_buff *skb;
3461 struct wmi_set_inact_period_cmd *cmd;
3462
3463 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
3464 if (!skb)
3465 return -ENOMEM;
3466
3467 cmd = (struct wmi_set_inact_period_cmd *) skb->data;
3468 cmd->inact_period = cpu_to_le32(inact_timeout);
3469 cmd->num_null_func = 0;
3470
3471 return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_CONN_INACT_CMDID,
3472 NO_SYNC_WMIFLAG);
3473}
3474
3395static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb) 3475static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
3396{ 3476{
3397 struct wmix_cmd_hdr *cmd; 3477 struct wmix_cmd_hdr *cmd;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 4092e3e80790..d3d2ab5c1689 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -182,6 +182,9 @@ enum wmi_data_hdr_flags {
182#define WMI_DATA_HDR_META_MASK 0x7 182#define WMI_DATA_HDR_META_MASK 0x7
183#define WMI_DATA_HDR_META_SHIFT 13 183#define WMI_DATA_HDR_META_SHIFT 13
184 184
185#define WMI_DATA_HDR_PAD_BEFORE_DATA_MASK 0xFF
186#define WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT 0x8
187
185/* Macros for operating on WMI_DATA_HDR (info3) field */ 188/* Macros for operating on WMI_DATA_HDR (info3) field */
186#define WMI_DATA_HDR_IF_IDX_MASK 0xF 189#define WMI_DATA_HDR_IF_IDX_MASK 0xF
187 190
@@ -423,6 +426,7 @@ enum wmi_cmd_id {
423 WMI_SET_FRAMERATES_CMDID, 426 WMI_SET_FRAMERATES_CMDID,
424 WMI_SET_AP_PS_CMDID, 427 WMI_SET_AP_PS_CMDID,
425 WMI_SET_QOS_SUPP_CMDID, 428 WMI_SET_QOS_SUPP_CMDID,
429 WMI_SET_IE_CMDID,
426 430
427 /* WMI_THIN_RESERVED_... mark the start and end 431 /* WMI_THIN_RESERVED_... mark the start and end
428 * values for WMI_THIN_RESERVED command IDs. These 432 * values for WMI_THIN_RESERVED command IDs. These
@@ -629,6 +633,11 @@ enum wmi_mgmt_frame_type {
629 WMI_NUM_MGMT_FRAME 633 WMI_NUM_MGMT_FRAME
630}; 634};
631 635
636enum wmi_ie_field_type {
637 WMI_RSN_IE_CAPB = 0x1,
638 WMI_IE_FULL = 0xFF, /* indicats full IE */
639};
640
632/* WMI_CONNECT_CMDID */ 641/* WMI_CONNECT_CMDID */
633enum network_type { 642enum network_type {
634 INFRA_NETWORK = 0x01, 643 INFRA_NETWORK = 0x01,
@@ -1268,6 +1277,16 @@ struct wmi_mcast_filter_add_del_cmd {
1268 u8 mcast_mac[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE]; 1277 u8 mcast_mac[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE];
1269} __packed; 1278} __packed;
1270 1279
1280struct wmi_set_htcap_cmd {
1281 u8 band;
1282 u8 ht_enable;
1283 u8 ht40_supported;
1284 u8 ht20_sgi;
1285 u8 ht40_sgi;
1286 u8 intolerant_40mhz;
1287 u8 max_ampdu_len_exp;
1288} __packed;
1289
1271/* Command Replies */ 1290/* Command Replies */
1272 1291
1273/* WMI_GET_CHANNEL_LIST_CMDID reply */ 1292/* WMI_GET_CHANNEL_LIST_CMDID reply */
@@ -1913,6 +1932,14 @@ struct wmi_set_appie_cmd {
1913 u8 ie_info[0]; 1932 u8 ie_info[0];
1914} __packed; 1933} __packed;
1915 1934
1935struct wmi_set_ie_cmd {
1936 u8 ie_id;
1937 u8 ie_field; /* enum wmi_ie_field_type */
1938 u8 ie_len;
1939 u8 reserved;
1940 u8 ie_info[0];
1941} __packed;
1942
1916/* Notify the WSC registration status to the target */ 1943/* Notify the WSC registration status to the target */
1917#define WSC_REG_ACTIVE 1 1944#define WSC_REG_ACTIVE 1
1918#define WSC_REG_INACTIVE 0 1945#define WSC_REG_INACTIVE 0
@@ -2141,6 +2168,11 @@ struct wmi_ap_hidden_ssid_cmd {
2141 u8 hidden_ssid; 2168 u8 hidden_ssid;
2142} __packed; 2169} __packed;
2143 2170
2171struct wmi_set_inact_period_cmd {
2172 __le32 inact_period;
2173 u8 num_null_func;
2174} __packed;
2175
2144/* AP mode events */ 2176/* AP mode events */
2145struct wmi_ap_set_apsd_cmd { 2177struct wmi_ap_set_apsd_cmd {
2146 u8 enable; 2178 u8 enable;
@@ -2465,6 +2497,9 @@ int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi);
2465int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg); 2497int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg);
2466int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx, 2498int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
2467 u8 keep_alive_intvl); 2499 u8 keep_alive_intvl);
2500int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
2501 enum ieee80211_band band,
2502 struct ath6kl_htcap *htcap);
2468int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len); 2503int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
2469 2504
2470s32 ath6kl_wmi_get_rate(s8 rate_index); 2505s32 ath6kl_wmi_get_rate(s8 rate_index);
@@ -2515,6 +2550,9 @@ int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
2515int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type, 2550int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
2516 const u8 *ie, u8 ie_len); 2551 const u8 *ie, u8 ie_len);
2517 2552
2553int ath6kl_wmi_set_ie_cmd(struct wmi *wmi, u8 if_idx, u8 ie_id, u8 ie_field,
2554 const u8 *ie_info, u8 ie_len);
2555
2518/* P2P */ 2556/* P2P */
2519int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable); 2557int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable);
2520 2558
@@ -2538,6 +2576,8 @@ int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx);
2538int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type, 2576int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
2539 const u8 *ie, u8 ie_len); 2577 const u8 *ie, u8 ie_len);
2540 2578
2579int ath6kl_wmi_set_inact_period(struct wmi *wmi, u8 if_idx, int inact_timeout);
2580
2541void ath6kl_wmi_sscan_timer(unsigned long ptr); 2581void ath6kl_wmi_sscan_timer(unsigned long ptr);
2542 2582
2543struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx); 2583struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx);
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 27d95fe5ade0..3f0b84723789 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -11,7 +11,10 @@ ath9k-$(CONFIG_ATH9K_PCI) += pci.o
11ath9k-$(CONFIG_ATH9K_AHB) += ahb.o 11ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
12ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o 12ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
13ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o 13ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
14ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o 14ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += \
15 dfs.o \
16 dfs_pattern_detector.o \
17 dfs_pri_detector.o
15 18
16obj-$(CONFIG_ATH9K) += ath9k.o 19obj-$(CONFIG_ATH9K) += ath9k.o
17 20
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 7e0ea4e98334..b4c77f9d7470 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -46,8 +46,8 @@ static const struct ani_ofdm_level_entry ofdm_level_table[] = {
46 { 5, 4, 1 }, /* lvl 5 */ 46 { 5, 4, 1 }, /* lvl 5 */
47 { 6, 5, 1 }, /* lvl 6 */ 47 { 6, 5, 1 }, /* lvl 6 */
48 { 7, 6, 1 }, /* lvl 7 */ 48 { 7, 6, 1 }, /* lvl 7 */
49 { 7, 7, 1 }, /* lvl 8 */ 49 { 7, 6, 0 }, /* lvl 8 */
50 { 7, 8, 0 } /* lvl 9 */ 50 { 7, 7, 0 } /* lvl 9 */
51}; 51};
52#define ATH9K_ANI_OFDM_NUM_LEVEL \ 52#define ATH9K_ANI_OFDM_NUM_LEVEL \
53 ARRAY_SIZE(ofdm_level_table) 53 ARRAY_SIZE(ofdm_level_table)
@@ -91,8 +91,8 @@ static const struct ani_cck_level_entry cck_level_table[] = {
91 { 4, 0 }, /* lvl 4 */ 91 { 4, 0 }, /* lvl 4 */
92 { 5, 0 }, /* lvl 5 */ 92 { 5, 0 }, /* lvl 5 */
93 { 6, 0 }, /* lvl 6 */ 93 { 6, 0 }, /* lvl 6 */
94 { 7, 0 }, /* lvl 7 (only for high rssi) */ 94 { 6, 0 }, /* lvl 7 (only for high rssi) */
95 { 8, 0 } /* lvl 8 (only for high rssi) */ 95 { 7, 0 } /* lvl 8 (only for high rssi) */
96}; 96};
97 97
98#define ATH9K_ANI_CCK_NUM_LEVEL \ 98#define ATH9K_ANI_CCK_NUM_LEVEL \
@@ -274,7 +274,9 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
274 aniState->rssiThrLow, aniState->rssiThrHigh); 274 aniState->rssiThrLow, aniState->rssiThrHigh);
275 275
276 if (aniState->update_ani) 276 if (aniState->update_ani)
277 aniState->ofdmNoiseImmunityLevel = immunityLevel; 277 aniState->ofdmNoiseImmunityLevel =
278 (immunityLevel > ATH9K_ANI_OFDM_DEF_LEVEL) ?
279 immunityLevel : ATH9K_ANI_OFDM_DEF_LEVEL;
278 280
279 entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel]; 281 entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
280 entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel]; 282 entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
@@ -290,16 +292,9 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
290 ATH9K_ANI_FIRSTEP_LEVEL, 292 ATH9K_ANI_FIRSTEP_LEVEL,
291 entry_ofdm->fir_step_level); 293 entry_ofdm->fir_step_level);
292 294
293 if ((ah->opmode != NL80211_IFTYPE_STATION && 295 if ((aniState->noiseFloor >= aniState->rssiThrHigh) &&
294 ah->opmode != NL80211_IFTYPE_ADHOC) || 296 (!aniState->ofdmWeakSigDetectOff !=
295 aniState->noiseFloor <= aniState->rssiThrHigh) { 297 entry_ofdm->ofdm_weak_signal_on)) {
296 if (aniState->ofdmWeakSigDetectOff)
297 /* force on ofdm weak sig detect */
298 ath9k_hw_ani_control(ah,
299 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
300 true);
301 else if (aniState->ofdmWeakSigDetectOff ==
302 entry_ofdm->ofdm_weak_signal_on)
303 ath9k_hw_ani_control(ah, 298 ath9k_hw_ani_control(ah,
304 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, 299 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
305 entry_ofdm->ofdm_weak_signal_on); 300 entry_ofdm->ofdm_weak_signal_on);
@@ -347,7 +342,9 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
347 immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI; 342 immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI;
348 343
349 if (aniState->update_ani) 344 if (aniState->update_ani)
350 aniState->cckNoiseImmunityLevel = immunityLevel; 345 aniState->cckNoiseImmunityLevel =
346 (immunityLevel > ATH9K_ANI_CCK_DEF_LEVEL) ?
347 immunityLevel : ATH9K_ANI_CCK_DEF_LEVEL;
351 348
352 entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel]; 349 entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
353 entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel]; 350 entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
@@ -717,26 +714,30 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
717 ofdmPhyErrRate, aniState->cckNoiseImmunityLevel, 714 ofdmPhyErrRate, aniState->cckNoiseImmunityLevel,
718 cckPhyErrRate, aniState->ofdmsTurn); 715 cckPhyErrRate, aniState->ofdmsTurn);
719 716
720 if (aniState->listenTime > 5 * ah->aniperiod) { 717 if (aniState->listenTime > ah->aniperiod) {
721 if (ofdmPhyErrRate <= ah->config.ofdm_trig_low && 718 if (cckPhyErrRate < ah->config.cck_trig_low &&
722 cckPhyErrRate <= ah->config.cck_trig_low) { 719 ((ofdmPhyErrRate < ah->config.ofdm_trig_low &&
720 aniState->ofdmNoiseImmunityLevel <
721 ATH9K_ANI_OFDM_DEF_LEVEL) ||
722 (ofdmPhyErrRate < ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI &&
723 aniState->ofdmNoiseImmunityLevel >=
724 ATH9K_ANI_OFDM_DEF_LEVEL))) {
723 ath9k_hw_ani_lower_immunity(ah); 725 ath9k_hw_ani_lower_immunity(ah);
724 aniState->ofdmsTurn = !aniState->ofdmsTurn; 726 aniState->ofdmsTurn = !aniState->ofdmsTurn;
725 } 727 } else if ((ofdmPhyErrRate > ah->config.ofdm_trig_high &&
726 ath9k_ani_restart(ah); 728 aniState->ofdmNoiseImmunityLevel >=
727 } else if (aniState->listenTime > ah->aniperiod) { 729 ATH9K_ANI_OFDM_DEF_LEVEL) ||
728 /* check to see if need to raise immunity */ 730 (ofdmPhyErrRate >
729 if (ofdmPhyErrRate > ah->config.ofdm_trig_high && 731 ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI &&
730 (cckPhyErrRate <= ah->config.cck_trig_high || 732 aniState->ofdmNoiseImmunityLevel <
731 aniState->ofdmsTurn)) { 733 ATH9K_ANI_OFDM_DEF_LEVEL)) {
732 ath9k_hw_ani_ofdm_err_trigger(ah); 734 ath9k_hw_ani_ofdm_err_trigger(ah);
733 ath9k_ani_restart(ah);
734 aniState->ofdmsTurn = false; 735 aniState->ofdmsTurn = false;
735 } else if (cckPhyErrRate > ah->config.cck_trig_high) { 736 } else if (cckPhyErrRate > ah->config.cck_trig_high) {
736 ath9k_hw_ani_cck_err_trigger(ah); 737 ath9k_hw_ani_cck_err_trigger(ah);
737 ath9k_ani_restart(ah);
738 aniState->ofdmsTurn = true; 738 aniState->ofdmsTurn = true;
739 } 739 }
740 ath9k_ani_restart(ah);
740 } 741 }
741} 742}
742EXPORT_SYMBOL(ath9k_hw_ani_monitor); 743EXPORT_SYMBOL(ath9k_hw_ani_monitor);
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 83029d6c7b22..72e2b874e179 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -25,11 +25,13 @@
25 25
26/* units are errors per second */ 26/* units are errors per second */
27#define ATH9K_ANI_OFDM_TRIG_HIGH_OLD 500 27#define ATH9K_ANI_OFDM_TRIG_HIGH_OLD 500
28#define ATH9K_ANI_OFDM_TRIG_HIGH_NEW 1000 28#define ATH9K_ANI_OFDM_TRIG_HIGH_NEW 3500
29#define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000
29 30
30/* units are errors per second */ 31/* units are errors per second */
31#define ATH9K_ANI_OFDM_TRIG_LOW_OLD 200 32#define ATH9K_ANI_OFDM_TRIG_LOW_OLD 200
32#define ATH9K_ANI_OFDM_TRIG_LOW_NEW 400 33#define ATH9K_ANI_OFDM_TRIG_LOW_NEW 400
34#define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900
33 35
34/* units are errors per second */ 36/* units are errors per second */
35#define ATH9K_ANI_CCK_TRIG_HIGH_OLD 200 37#define ATH9K_ANI_CCK_TRIG_HIGH_OLD 200
@@ -53,7 +55,7 @@
53#define ATH9K_ANI_RSSI_THR_LOW 7 55#define ATH9K_ANI_RSSI_THR_LOW 7
54 56
55#define ATH9K_ANI_PERIOD_OLD 100 57#define ATH9K_ANI_PERIOD_OLD 100
56#define ATH9K_ANI_PERIOD_NEW 1000 58#define ATH9K_ANI_PERIOD_NEW 300
57 59
58/* in ms */ 60/* in ms */
59#define ATH9K_ANI_POLLINTERVAL_OLD 100 61#define ATH9K_ANI_POLLINTERVAL_OLD 100
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index d7d8e9199140..c7492c6a2519 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -245,7 +245,6 @@ static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
245 REG_WRITE(ah, AR_PHY(0x37), reg32); 245 REG_WRITE(ah, AR_PHY(0x37), reg32);
246 246
247 ah->curchan = chan; 247 ah->curchan = chan;
248 ah->curchan_rad_index = -1;
249 248
250 return 0; 249 return 0;
251} 250}
@@ -619,19 +618,10 @@ static void ar5008_hw_init_bb(struct ath_hw *ah,
619 u32 synthDelay; 618 u32 synthDelay;
620 619
621 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; 620 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
622 if (IS_CHAN_B(chan))
623 synthDelay = (4 * synthDelay) / 22;
624 else
625 synthDelay /= 10;
626
627 if (IS_CHAN_HALF_RATE(chan))
628 synthDelay *= 2;
629 else if (IS_CHAN_QUARTER_RATE(chan))
630 synthDelay *= 4;
631 621
632 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); 622 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
633 623
634 udelay(synthDelay + BASE_ACTIVATE_DELAY); 624 ath9k_hw_synth_delay(ah, chan, synthDelay);
635} 625}
636 626
637static void ar5008_hw_init_chain_masks(struct ath_hw *ah) 627static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
@@ -869,7 +859,7 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
869 ar5008_hw_set_channel_regs(ah, chan); 859 ar5008_hw_set_channel_regs(ah, chan);
870 ar5008_hw_init_chain_masks(ah); 860 ar5008_hw_init_chain_masks(ah);
871 ath9k_olc_init(ah); 861 ath9k_olc_init(ah);
872 ath9k_hw_apply_txpower(ah, chan); 862 ath9k_hw_apply_txpower(ah, chan, false);
873 863
874 /* Write analog registers */ 864 /* Write analog registers */
875 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) { 865 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
@@ -949,12 +939,8 @@ static bool ar5008_hw_rfbus_req(struct ath_hw *ah)
949static void ar5008_hw_rfbus_done(struct ath_hw *ah) 939static void ar5008_hw_rfbus_done(struct ath_hw *ah)
950{ 940{
951 u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; 941 u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
952 if (IS_CHAN_B(ah->curchan))
953 synthDelay = (4 * synthDelay) / 22;
954 else
955 synthDelay /= 10;
956 942
957 udelay(synthDelay + BASE_ACTIVATE_DELAY); 943 ath9k_hw_synth_delay(ah, ah->curchan, synthDelay);
958 944
959 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0); 945 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
960} 946}
@@ -1047,46 +1033,8 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
1047 break; 1033 break;
1048 } 1034 }
1049 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{ 1035 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
1050 static const int m1ThreshLow[] = { 127, 50 };
1051 static const int m2ThreshLow[] = { 127, 40 };
1052 static const int m1Thresh[] = { 127, 0x4d };
1053 static const int m2Thresh[] = { 127, 0x40 };
1054 static const int m2CountThr[] = { 31, 16 };
1055 static const int m2CountThrLow[] = { 63, 48 };
1056 u32 on = param ? 1 : 0; 1036 u32 on = param ? 1 : 0;
1057 1037
1058 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
1059 AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
1060 m1ThreshLow[on]);
1061 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
1062 AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
1063 m2ThreshLow[on]);
1064 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
1065 AR_PHY_SFCORR_M1_THRESH,
1066 m1Thresh[on]);
1067 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
1068 AR_PHY_SFCORR_M2_THRESH,
1069 m2Thresh[on]);
1070 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
1071 AR_PHY_SFCORR_M2COUNT_THR,
1072 m2CountThr[on]);
1073 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
1074 AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
1075 m2CountThrLow[on]);
1076
1077 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1078 AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
1079 m1ThreshLow[on]);
1080 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1081 AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
1082 m2ThreshLow[on]);
1083 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1084 AR_PHY_SFCORR_EXT_M1_THRESH,
1085 m1Thresh[on]);
1086 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1087 AR_PHY_SFCORR_EXT_M2_THRESH,
1088 m2Thresh[on]);
1089
1090 if (on) 1038 if (on)
1091 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW, 1039 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
1092 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); 1040 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index aa2abaf31cba..8d78253c26ce 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -136,6 +136,7 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
136 } 136 }
137 137
138 if (sync_cause) { 138 if (sync_cause) {
139 ath9k_debug_sync_cause(common, sync_cause);
139 fatal_int = 140 fatal_int =
140 (sync_cause & 141 (sync_cause &
141 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR)) 142 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 3cbbb033fcea..846dd7974eb8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -152,7 +152,6 @@ static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
152 REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32); 152 REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
153 153
154 ah->curchan = chan; 154 ah->curchan = chan;
155 ah->curchan_rad_index = -1;
156 155
157 return 0; 156 return 0;
158} 157}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 46c79a3d4737..952cb2b4656b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -777,11 +777,11 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
777 {0x0000a074, 0x00000000}, 777 {0x0000a074, 0x00000000},
778 {0x0000a078, 0x00000000}, 778 {0x0000a078, 0x00000000},
779 {0x0000a07c, 0x00000000}, 779 {0x0000a07c, 0x00000000},
780 {0x0000a080, 0x22222229}, 780 {0x0000a080, 0x1a1a1a1a},
781 {0x0000a084, 0x1d1d1d1d}, 781 {0x0000a084, 0x1a1a1a1a},
782 {0x0000a088, 0x1d1d1d1d}, 782 {0x0000a088, 0x1a1a1a1a},
783 {0x0000a08c, 0x1d1d1d1d}, 783 {0x0000a08c, 0x1a1a1a1a},
784 {0x0000a090, 0x171d1d1d}, 784 {0x0000a090, 0x171a1a1a},
785 {0x0000a094, 0x11111717}, 785 {0x0000a094, 0x11111717},
786 {0x0000a098, 0x00030311}, 786 {0x0000a098, 0x00030311},
787 {0x0000a09c, 0x00000000}, 787 {0x0000a09c, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 63089cc1fafd..a0387a027db0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -1000,10 +1000,12 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
1000 if (mci && IS_CHAN_2GHZ(chan) && run_agc_cal) 1000 if (mci && IS_CHAN_2GHZ(chan) && run_agc_cal)
1001 ar9003_mci_init_cal_req(ah, &is_reusable); 1001 ar9003_mci_init_cal_req(ah, &is_reusable);
1002 1002
1003 txiqcal_done = ar9003_hw_tx_iq_cal_run(ah); 1003 if (!(IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))) {
1004 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS); 1004 txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
1005 udelay(5); 1005 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
1006 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); 1006 udelay(5);
1007 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
1008 }
1007 1009
1008skip_tx_iqcal: 1010skip_tx_iqcal:
1009 if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) { 1011 if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 6bb4db052bb0..ac53d901801d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -30,11 +30,6 @@
30#define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE) 30#define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE)
31#define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE) 31#define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE)
32#define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE) 32#define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE)
33#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */
34#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */
35#define PWRINCR_3_TO_1_CHAIN 9 /* 10*log(3)*2 */
36#define PWRINCR_3_TO_2_CHAIN 3 /* floor(10*log(3/2)*2) */
37#define PWRINCR_2_TO_1_CHAIN 6 /* 10*log(2)*2 */
38 33
39#define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */ 34#define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */
40#define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */ 35#define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */
@@ -2936,15 +2931,6 @@ static const struct ar9300_eeprom *ar9003_eeprom_struct_find_by_id(int id)
2936#undef N_LOOP 2931#undef N_LOOP
2937} 2932}
2938 2933
2939
2940static u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
2941{
2942 if (fbin == AR5416_BCHAN_UNUSED)
2943 return fbin;
2944
2945 return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
2946}
2947
2948static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah) 2934static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah)
2949{ 2935{
2950 return 0; 2936 return 0;
@@ -4070,7 +4056,7 @@ static u8 ar9003_hw_eeprom_get_tgt_pwr(struct ath_hw *ah,
4070 * targetpower piers stored on eeprom 4056 * targetpower piers stored on eeprom
4071 */ 4057 */
4072 for (i = 0; i < numPiers; i++) { 4058 for (i = 0; i < numPiers; i++) {
4073 freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz); 4059 freqArray[i] = ath9k_hw_fbin2freq(pFreqBin[i], is2GHz);
4074 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex]; 4060 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
4075 } 4061 }
4076 4062
@@ -4106,7 +4092,7 @@ static u8 ar9003_hw_eeprom_get_ht20_tgt_pwr(struct ath_hw *ah,
4106 * from targetpower piers stored on eeprom 4092 * from targetpower piers stored on eeprom
4107 */ 4093 */
4108 for (i = 0; i < numPiers; i++) { 4094 for (i = 0; i < numPiers; i++) {
4109 freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz); 4095 freqArray[i] = ath9k_hw_fbin2freq(pFreqBin[i], is2GHz);
4110 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex]; 4096 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
4111 } 4097 }
4112 4098
@@ -4142,7 +4128,7 @@ static u8 ar9003_hw_eeprom_get_ht40_tgt_pwr(struct ath_hw *ah,
4142 * targetpower piers stored on eeprom 4128 * targetpower piers stored on eeprom
4143 */ 4129 */
4144 for (i = 0; i < numPiers; i++) { 4130 for (i = 0; i < numPiers; i++) {
4145 freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz); 4131 freqArray[i] = ath9k_hw_fbin2freq(pFreqBin[i], is2GHz);
4146 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex]; 4132 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
4147 } 4133 }
4148 4134
@@ -4167,7 +4153,7 @@ static u8 ar9003_hw_eeprom_get_cck_tgt_pwr(struct ath_hw *ah,
4167 * targetpower piers stored on eeprom 4153 * targetpower piers stored on eeprom
4168 */ 4154 */
4169 for (i = 0; i < numPiers; i++) { 4155 for (i = 0; i < numPiers; i++) {
4170 freqArray[i] = FBIN2FREQ(pFreqBin[i], 1); 4156 freqArray[i] = ath9k_hw_fbin2freq(pFreqBin[i], 1);
4171 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex]; 4157 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
4172 } 4158 }
4173 4159
@@ -4295,18 +4281,10 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
4295#undef POW_SM 4281#undef POW_SM
4296} 4282}
4297 4283
4298static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq, 4284static void ar9003_hw_get_legacy_target_powers(struct ath_hw *ah, u16 freq,
4299 u8 *targetPowerValT2) 4285 u8 *targetPowerValT2,
4286 bool is2GHz)
4300{ 4287{
4301 /* XXX: hard code for now, need to get from eeprom struct */
4302 u8 ht40PowerIncForPdadc = 0;
4303 bool is2GHz = false;
4304 unsigned int i = 0;
4305 struct ath_common *common = ath9k_hw_common(ah);
4306
4307 if (freq < 4000)
4308 is2GHz = true;
4309
4310 targetPowerValT2[ALL_TARGET_LEGACY_6_24] = 4288 targetPowerValT2[ALL_TARGET_LEGACY_6_24] =
4311 ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_6_24, freq, 4289 ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_6_24, freq,
4312 is2GHz); 4290 is2GHz);
@@ -4319,6 +4297,11 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq,
4319 targetPowerValT2[ALL_TARGET_LEGACY_54] = 4297 targetPowerValT2[ALL_TARGET_LEGACY_54] =
4320 ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_54, freq, 4298 ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_54, freq,
4321 is2GHz); 4299 is2GHz);
4300}
4301
4302static void ar9003_hw_get_cck_target_powers(struct ath_hw *ah, u16 freq,
4303 u8 *targetPowerValT2)
4304{
4322 targetPowerValT2[ALL_TARGET_LEGACY_1L_5L] = 4305 targetPowerValT2[ALL_TARGET_LEGACY_1L_5L] =
4323 ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_1L_5L, 4306 ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_1L_5L,
4324 freq); 4307 freq);
@@ -4328,6 +4311,11 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq,
4328 ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11L, freq); 4311 ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11L, freq);
4329 targetPowerValT2[ALL_TARGET_LEGACY_11S] = 4312 targetPowerValT2[ALL_TARGET_LEGACY_11S] =
4330 ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11S, freq); 4313 ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11S, freq);
4314}
4315
4316static void ar9003_hw_get_ht20_target_powers(struct ath_hw *ah, u16 freq,
4317 u8 *targetPowerValT2, bool is2GHz)
4318{
4331 targetPowerValT2[ALL_TARGET_HT20_0_8_16] = 4319 targetPowerValT2[ALL_TARGET_HT20_0_8_16] =
4332 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq, 4320 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq,
4333 is2GHz); 4321 is2GHz);
@@ -4370,6 +4358,16 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq,
4370 targetPowerValT2[ALL_TARGET_HT20_23] = 4358 targetPowerValT2[ALL_TARGET_HT20_23] =
4371 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_23, freq, 4359 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
4372 is2GHz); 4360 is2GHz);
4361}
4362
4363static void ar9003_hw_get_ht40_target_powers(struct ath_hw *ah,
4364 u16 freq,
4365 u8 *targetPowerValT2,
4366 bool is2GHz)
4367{
4368 /* XXX: hard code for now, need to get from eeprom struct */
4369 u8 ht40PowerIncForPdadc = 0;
4370
4373 targetPowerValT2[ALL_TARGET_HT40_0_8_16] = 4371 targetPowerValT2[ALL_TARGET_HT40_0_8_16] =
4374 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq, 4372 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq,
4375 is2GHz) + ht40PowerIncForPdadc; 4373 is2GHz) + ht40PowerIncForPdadc;
@@ -4413,6 +4411,26 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq,
4413 targetPowerValT2[ALL_TARGET_HT40_23] = 4411 targetPowerValT2[ALL_TARGET_HT40_23] =
4414 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_23, freq, 4412 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
4415 is2GHz) + ht40PowerIncForPdadc; 4413 is2GHz) + ht40PowerIncForPdadc;
4414}
4415
4416static void ar9003_hw_get_target_power_eeprom(struct ath_hw *ah,
4417 struct ath9k_channel *chan,
4418 u8 *targetPowerValT2)
4419{
4420 bool is2GHz = IS_CHAN_2GHZ(chan);
4421 unsigned int i = 0;
4422 struct ath_common *common = ath9k_hw_common(ah);
4423 u16 freq = chan->channel;
4424
4425 if (is2GHz)
4426 ar9003_hw_get_cck_target_powers(ah, freq, targetPowerValT2);
4427
4428 ar9003_hw_get_legacy_target_powers(ah, freq, targetPowerValT2, is2GHz);
4429 ar9003_hw_get_ht20_target_powers(ah, freq, targetPowerValT2, is2GHz);
4430
4431 if (IS_CHAN_HT40(chan))
4432 ar9003_hw_get_ht40_target_powers(ah, freq, targetPowerValT2,
4433 is2GHz);
4416 4434
4417 for (i = 0; i < ar9300RateSize; i++) { 4435 for (i = 0; i < ar9300RateSize; i++) {
4418 ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n", 4436 ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n",
@@ -4464,7 +4482,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
4464 is2GHz = 1; 4482 is2GHz = 1;
4465 } 4483 }
4466 4484
4467 *pfrequency = FBIN2FREQ(*pCalPier, is2GHz); 4485 *pfrequency = ath9k_hw_fbin2freq(*pCalPier, is2GHz);
4468 *pcorrection = pCalPierStruct->refPower; 4486 *pcorrection = pCalPierStruct->refPower;
4469 *ptemperature = pCalPierStruct->tempMeas; 4487 *ptemperature = pCalPierStruct->tempMeas;
4470 *pvoltage = pCalPierStruct->voltMeas; 4488 *pvoltage = pCalPierStruct->voltMeas;
@@ -4789,34 +4807,9 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
4789 bool is2ghz = IS_CHAN_2GHZ(chan); 4807 bool is2ghz = IS_CHAN_2GHZ(chan);
4790 4808
4791 ath9k_hw_get_channel_centers(ah, chan, &centers); 4809 ath9k_hw_get_channel_centers(ah, chan, &centers);
4792 scaledPower = powerLimit - antenna_reduction; 4810 scaledPower = ath9k_hw_get_scaled_power(ah, powerLimit,
4793 4811 antenna_reduction);
4794 /*
4795 * Reduce scaled Power by number of chains active to get
4796 * to per chain tx power level
4797 */
4798 switch (ar5416_get_ntxchains(ah->txchainmask)) {
4799 case 1:
4800 break;
4801 case 2:
4802 if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
4803 scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
4804 else
4805 scaledPower = 0;
4806 break;
4807 case 3:
4808 if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
4809 scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
4810 else
4811 scaledPower = 0;
4812 break;
4813 }
4814 4812
4815 scaledPower = max((u16)0, scaledPower);
4816
4817 /*
4818 * Get target powers from EEPROM - our baseline for TX Power
4819 */
4820 if (is2ghz) { 4813 if (is2ghz) {
4821 /* Setup for CTL modes */ 4814 /* Setup for CTL modes */
4822 /* CTL_11B, CTL_11G, CTL_2GHT20 */ 4815 /* CTL_11B, CTL_11G, CTL_2GHT20 */
@@ -4988,7 +4981,12 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
4988 unsigned int i = 0, paprd_scale_factor = 0; 4981 unsigned int i = 0, paprd_scale_factor = 0;
4989 u8 pwr_idx, min_pwridx = 0; 4982 u8 pwr_idx, min_pwridx = 0;
4990 4983
4991 ar9003_hw_set_target_power_eeprom(ah, chan->channel, targetPowerValT2); 4984 memset(targetPowerValT2, 0 , sizeof(targetPowerValT2));
4985
4986 /*
4987 * Get target powers from EEPROM - our baseline for TX Power
4988 */
4989 ar9003_hw_get_target_power_eeprom(ah, chan, targetPowerValT2);
4992 4990
4993 if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) { 4991 if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) {
4994 if (IS_CHAN_2GHZ(chan)) 4992 if (IS_CHAN_2GHZ(chan))
@@ -5060,8 +5058,6 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
5060 i, targetPowerValT2[i]); 5058 i, targetPowerValT2[i]);
5061 } 5059 }
5062 5060
5063 ah->txpower_limit = regulatory->max_power_level;
5064
5065 /* Write target power array to registers */ 5061 /* Write target power array to registers */
5066 ar9003_hw_tx_power_regwrite(ah, targetPowerValT2); 5062 ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
5067 ar9003_hw_calibration_apply(ah, chan->channel); 5063 ar9003_hw_calibration_apply(ah, chan->channel);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index bb223fe82816..2505ac44f0c1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -42,7 +42,6 @@
42#define AR9300_EEPMISC_WOW 0x02 42#define AR9300_EEPMISC_WOW 0x02
43#define AR9300_CUSTOMER_DATA_SIZE 20 43#define AR9300_CUSTOMER_DATA_SIZE 20
44 44
45#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x))
46#define AR9300_MAX_CHAINS 3 45#define AR9300_MAX_CHAINS 3
47#define AR9300_ANT_16S 25 46#define AR9300_ANT_16S 25
48#define AR9300_FUTURE_MODAL_SZ 6 47#define AR9300_FUTURE_MODAL_SZ 6
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 0f56e322dd3b..a0e3394b10dc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -305,11 +305,6 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
305 ar9462_common_rx_gain_table_2p0, 305 ar9462_common_rx_gain_table_2p0,
306 ARRAY_SIZE(ar9462_common_rx_gain_table_2p0), 2); 306 ARRAY_SIZE(ar9462_common_rx_gain_table_2p0), 2);
307 307
308 INIT_INI_ARRAY(&ah->ini_BTCOEX_MAX_TXPWR,
309 ar9462_2p0_BTCOEX_MAX_TXPWR_table,
310 ARRAY_SIZE(ar9462_2p0_BTCOEX_MAX_TXPWR_table),
311 2);
312
313 /* Awake -> Sleep Setting */ 308 /* Awake -> Sleep Setting */
314 INIT_INI_ARRAY(&ah->iniPcieSerdes, 309 INIT_INI_ARRAY(&ah->iniPcieSerdes,
315 PCIE_PLL_ON_CREQ_DIS_L1_2P0, 310 PCIE_PLL_ON_CREQ_DIS_L1_2P0,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index a66a13b76848..d9e0824af093 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -306,6 +306,8 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
306 ar9003_mci_get_isr(ah, masked); 306 ar9003_mci_get_isr(ah, masked);
307 307
308 if (sync_cause) { 308 if (sync_cause) {
309 ath9k_debug_sync_cause(common, sync_cause);
310
309 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) { 311 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
310 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF); 312 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
311 REG_WRITE(ah, AR_RC, 0); 313 REG_WRITE(ah, AR_RC, 0);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 59647a3ceb7f..3d400e8d6535 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -54,7 +54,7 @@ void ar9003_paprd_enable(struct ath_hw *ah, bool val)
54 54
55 if (val) { 55 if (val) {
56 ah->paprd_table_write_done = true; 56 ah->paprd_table_write_done = true;
57 ath9k_hw_apply_txpower(ah, chan); 57 ath9k_hw_apply_txpower(ah, chan, false);
58 } 58 }
59 59
60 REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B0, 60 REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B0,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index bc992b237ae5..11abb972be1f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -152,7 +152,6 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
152 REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32); 152 REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32);
153 153
154 ah->curchan = chan; 154 ah->curchan = chan;
155 ah->curchan_rad_index = -1;
156 155
157 return 0; 156 return 0;
158} 157}
@@ -209,11 +208,12 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
209 continue; 208 continue;
210 negative = 0; 209 negative = 0;
211 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) 210 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah))
212 cur_bb_spur = FBIN2FREQ(spur_fbin_ptr[i], 211 cur_bb_spur = ath9k_hw_fbin2freq(spur_fbin_ptr[i],
213 IS_CHAN_2GHZ(chan)) - synth_freq; 212 IS_CHAN_2GHZ(chan));
214 else 213 else
215 cur_bb_spur = spur_freq[i] - synth_freq; 214 cur_bb_spur = spur_freq[i];
216 215
216 cur_bb_spur -= synth_freq;
217 if (cur_bb_spur < 0) { 217 if (cur_bb_spur < 0) {
218 negative = 1; 218 negative = 1;
219 cur_bb_spur = -cur_bb_spur; 219 cur_bb_spur = -cur_bb_spur;
@@ -373,7 +373,7 @@ static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
373 else 373 else
374 spur_subchannel_sd = 0; 374 spur_subchannel_sd = 0;
375 375
376 spur_freq_sd = (freq_offset << 9) / 11; 376 spur_freq_sd = ((freq_offset + 10) << 9) / 11;
377 377
378 } else { 378 } else {
379 if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL, 379 if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
@@ -382,7 +382,7 @@ static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
382 else 382 else
383 spur_subchannel_sd = 1; 383 spur_subchannel_sd = 1;
384 384
385 spur_freq_sd = (freq_offset << 9) / 11; 385 spur_freq_sd = ((freq_offset - 10) << 9) / 11;
386 386
387 } 387 }
388 388
@@ -443,7 +443,8 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
443 ar9003_hw_spur_ofdm_clear(ah); 443 ar9003_hw_spur_ofdm_clear(ah);
444 444
445 for (i = 0; i < AR_EEPROM_MODAL_SPURS && spurChansPtr[i]; i++) { 445 for (i = 0; i < AR_EEPROM_MODAL_SPURS && spurChansPtr[i]; i++) {
446 freq_offset = FBIN2FREQ(spurChansPtr[i], mode) - synth_freq; 446 freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i], mode);
447 freq_offset -= synth_freq;
447 if (abs(freq_offset) < range) { 448 if (abs(freq_offset) < range) {
448 ar9003_hw_spur_ofdm_work(ah, chan, freq_offset); 449 ar9003_hw_spur_ofdm_work(ah, chan, freq_offset);
449 break; 450 break;
@@ -525,22 +526,10 @@ static void ar9003_hw_init_bb(struct ath_hw *ah,
525 * Value is in 100ns increments. 526 * Value is in 100ns increments.
526 */ 527 */
527 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; 528 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
528 if (IS_CHAN_B(chan))
529 synthDelay = (4 * synthDelay) / 22;
530 else
531 synthDelay /= 10;
532 529
533 /* Activate the PHY (includes baseband activate + synthesizer on) */ 530 /* Activate the PHY (includes baseband activate + synthesizer on) */
534 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); 531 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
535 532 ath9k_hw_synth_delay(ah, chan, synthDelay);
536 /*
537 * There is an issue if the AP starts the calibration before
538 * the base band timeout completes. This could result in the
539 * rx_clear false triggering. As a workaround we add delay an
540 * extra BASE_ACTIVATE_DELAY usecs to ensure this condition
541 * does not happen.
542 */
543 udelay(synthDelay + BASE_ACTIVATE_DELAY);
544} 533}
545 534
546static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx) 535static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
@@ -684,9 +673,6 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
684 673
685 REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites); 674 REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
686 675
687 if (AR_SREV_9462(ah))
688 ar9003_hw_prog_ini(ah, &ah->ini_BTCOEX_MAX_TXPWR, 1);
689
690 if (chan->channel == 2484) 676 if (chan->channel == 2484)
691 ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1); 677 ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1);
692 678
@@ -694,7 +680,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
694 ar9003_hw_override_ini(ah); 680 ar9003_hw_override_ini(ah);
695 ar9003_hw_set_channel_regs(ah, chan); 681 ar9003_hw_set_channel_regs(ah, chan);
696 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); 682 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
697 ath9k_hw_apply_txpower(ah, chan); 683 ath9k_hw_apply_txpower(ah, chan, false);
698 684
699 if (AR_SREV_9462(ah)) { 685 if (AR_SREV_9462(ah)) {
700 if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0, 686 if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
@@ -725,6 +711,14 @@ static void ar9003_hw_set_rfmode(struct ath_hw *ah,
725 711
726 if (IS_CHAN_A_FAST_CLOCK(ah, chan)) 712 if (IS_CHAN_A_FAST_CLOCK(ah, chan))
727 rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE); 713 rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
714 if (IS_CHAN_QUARTER_RATE(chan))
715 rfMode |= AR_PHY_MODE_QUARTER;
716 if (IS_CHAN_HALF_RATE(chan))
717 rfMode |= AR_PHY_MODE_HALF;
718
719 if (rfMode & (AR_PHY_MODE_QUARTER | AR_PHY_MODE_HALF))
720 REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL,
721 AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW, 3);
728 722
729 REG_WRITE(ah, AR_PHY_MODE, rfMode); 723 REG_WRITE(ah, AR_PHY_MODE, rfMode);
730} 724}
@@ -795,12 +789,8 @@ static bool ar9003_hw_rfbus_req(struct ath_hw *ah)
795static void ar9003_hw_rfbus_done(struct ath_hw *ah) 789static void ar9003_hw_rfbus_done(struct ath_hw *ah)
796{ 790{
797 u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; 791 u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
798 if (IS_CHAN_B(ah->curchan))
799 synthDelay = (4 * synthDelay) / 22;
800 else
801 synthDelay /= 10;
802 792
803 udelay(synthDelay + BASE_ACTIVATE_DELAY); 793 ath9k_hw_synth_delay(ah, ah->curchan, synthDelay);
804 794
805 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0); 795 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
806} 796}
@@ -823,55 +813,6 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
823 * on == 0 means more noise imm 813 * on == 0 means more noise imm
824 */ 814 */
825 u32 on = param ? 1 : 0; 815 u32 on = param ? 1 : 0;
826 /*
827 * make register setting for default
828 * (weak sig detect ON) come from INI file
829 */
830 int m1ThreshLow = on ?
831 aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
832 int m2ThreshLow = on ?
833 aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
834 int m1Thresh = on ?
835 aniState->iniDef.m1Thresh : m1Thresh_off;
836 int m2Thresh = on ?
837 aniState->iniDef.m2Thresh : m2Thresh_off;
838 int m2CountThr = on ?
839 aniState->iniDef.m2CountThr : m2CountThr_off;
840 int m2CountThrLow = on ?
841 aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
842 int m1ThreshLowExt = on ?
843 aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
844 int m2ThreshLowExt = on ?
845 aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
846 int m1ThreshExt = on ?
847 aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
848 int m2ThreshExt = on ?
849 aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
850
851 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
852 AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
853 m1ThreshLow);
854 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
855 AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
856 m2ThreshLow);
857 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
858 AR_PHY_SFCORR_M1_THRESH, m1Thresh);
859 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
860 AR_PHY_SFCORR_M2_THRESH, m2Thresh);
861 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
862 AR_PHY_SFCORR_M2COUNT_THR, m2CountThr);
863 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
864 AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
865 m2CountThrLow);
866
867 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
868 AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLowExt);
869 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
870 AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLowExt);
871 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
872 AR_PHY_SFCORR_EXT_M1_THRESH, m1ThreshExt);
873 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
874 AR_PHY_SFCORR_EXT_M2_THRESH, m2ThreshExt);
875 816
876 if (on) 817 if (on)
877 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW, 818 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index d834d97fe727..7268a48a92a1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -468,6 +468,9 @@
468#define AR_PHY_ADDAC_PARA_CTL (AR_SM_BASE + 0x150) 468#define AR_PHY_ADDAC_PARA_CTL (AR_SM_BASE + 0x150)
469#define AR_PHY_XPA_CFG (AR_SM_BASE + 0x158) 469#define AR_PHY_XPA_CFG (AR_SM_BASE + 0x158)
470 470
471#define AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW 3
472#define AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW_S 0
473
471#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A 0x0001FC00 474#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A 0x0001FC00
472#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A_S 10 475#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A_S 10
473#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A 0x3FF 476#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A 0x3FF
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index b6ba1e8149be..1d6658e139b5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -1115,9 +1115,9 @@ static const u32 ar9462_2p0_mac_core[][2] = {
1115 {0x000081f8, 0x00000000}, 1115 {0x000081f8, 0x00000000},
1116 {0x000081fc, 0x00000000}, 1116 {0x000081fc, 0x00000000},
1117 {0x00008240, 0x00100000}, 1117 {0x00008240, 0x00100000},
1118 {0x00008244, 0x0010f400}, 1118 {0x00008244, 0x0010f424},
1119 {0x00008248, 0x00000800}, 1119 {0x00008248, 0x00000800},
1120 {0x0000824c, 0x0001e800}, 1120 {0x0000824c, 0x0001e848},
1121 {0x00008250, 0x00000000}, 1121 {0x00008250, 0x00000000},
1122 {0x00008254, 0x00000000}, 1122 {0x00008254, 0x00000000},
1123 {0x00008258, 0x00000000}, 1123 {0x00008258, 0x00000000},
@@ -1448,16 +1448,4 @@ static const u32 ar9462_common_mixed_rx_gain_table_2p0[][2] = {
1448 {0x0000b1fc, 0x00000196}, 1448 {0x0000b1fc, 0x00000196},
1449}; 1449};
1450 1450
1451static const u32 ar9462_2p0_BTCOEX_MAX_TXPWR_table[][2] = {
1452 /* Addr allmodes */
1453 {0x000018c0, 0x10101010},
1454 {0x000018c4, 0x10101010},
1455 {0x000018c8, 0x10101010},
1456 {0x000018cc, 0x10101010},
1457 {0x000018d0, 0x10101010},
1458 {0x000018d4, 0x10101010},
1459 {0x000018d8, 0x10101010},
1460 {0x000018dc, 0x10101010},
1461};
1462
1463#endif /* INITVALS_9462_2P0_H */ 1451#endif /* INITVALS_9462_2P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 8c84049682ab..a277cf6f339d 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -26,6 +26,7 @@
26#include "debug.h" 26#include "debug.h"
27#include "common.h" 27#include "common.h"
28#include "mci.h" 28#include "mci.h"
29#include "dfs.h"
29 30
30/* 31/*
31 * Header for the ath9k.ko driver core *only* -- hw code nor any other driver 32 * Header for the ath9k.ko driver core *only* -- hw code nor any other driver
@@ -369,7 +370,7 @@ struct ath_vif {
369 * number of beacon intervals, the game's up. 370 * number of beacon intervals, the game's up.
370 */ 371 */
371#define BSTUCK_THRESH 9 372#define BSTUCK_THRESH 9
372#define ATH_BCBUF 4 373#define ATH_BCBUF 8
373#define ATH_DEFAULT_BINTVAL 100 /* TU */ 374#define ATH_DEFAULT_BINTVAL 100 /* TU */
374#define ATH_DEFAULT_BMISS_LIMIT 10 375#define ATH_DEFAULT_BMISS_LIMIT 10
375#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024) 376#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
@@ -430,6 +431,8 @@ void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
430void ath_reset_work(struct work_struct *work); 431void ath_reset_work(struct work_struct *work);
431void ath_hw_check(struct work_struct *work); 432void ath_hw_check(struct work_struct *work);
432void ath_hw_pll_work(struct work_struct *work); 433void ath_hw_pll_work(struct work_struct *work);
434void ath_rx_poll(unsigned long data);
435void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon);
433void ath_paprd_calibrate(struct work_struct *work); 436void ath_paprd_calibrate(struct work_struct *work);
434void ath_ani_calibrate(unsigned long data); 437void ath_ani_calibrate(unsigned long data);
435void ath_start_ani(struct ath_common *common); 438void ath_start_ani(struct ath_common *common);
@@ -670,6 +673,7 @@ struct ath_softc {
670 struct ath_beacon_config cur_beacon_conf; 673 struct ath_beacon_config cur_beacon_conf;
671 struct delayed_work tx_complete_work; 674 struct delayed_work tx_complete_work;
672 struct delayed_work hw_pll_work; 675 struct delayed_work hw_pll_work;
676 struct timer_list rx_poll_timer;
673 677
674#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 678#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
675 struct ath_btcoex btcoex; 679 struct ath_btcoex btcoex;
@@ -680,6 +684,7 @@ struct ath_softc {
680 684
681 struct ath_ant_comb ant_comb; 685 struct ath_ant_comb ant_comb;
682 u8 ant_tx, ant_rx; 686 u8 ant_tx, ant_rx;
687 struct dfs_pattern_detector *dfs_detector;
683}; 688};
684 689
685void ath9k_tasklet(unsigned long data); 690void ath9k_tasklet(unsigned long data);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 626418222c85..11bc55e3d697 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -91,7 +91,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
91 info.txpower = MAX_RATE_POWER; 91 info.txpower = MAX_RATE_POWER;
92 info.keyix = ATH9K_TXKEYIX_INVALID; 92 info.keyix = ATH9K_TXKEYIX_INVALID;
93 info.keytype = ATH9K_KEY_TYPE_CLEAR; 93 info.keytype = ATH9K_KEY_TYPE_CLEAR;
94 info.flags = ATH9K_TXDESC_NOACK | ATH9K_TXDESC_INTREQ; 94 info.flags = ATH9K_TXDESC_NOACK | ATH9K_TXDESC_CLRDMASK;
95 95
96 info.buf_addr[0] = bf->bf_buf_addr; 96 info.buf_addr[0] = bf->bf_buf_addr;
97 info.buf_len[0] = roundup(skb->len, 4); 97 info.buf_len[0] = roundup(skb->len, 4);
@@ -359,6 +359,11 @@ void ath_beacon_tasklet(unsigned long data)
359 int slot; 359 int slot;
360 u32 bfaddr, bc = 0; 360 u32 bfaddr, bc = 0;
361 361
362 if (work_pending(&sc->hw_reset_work)) {
363 ath_dbg(common, RESET,
364 "reset work is pending, skip beaconing now\n");
365 return;
366 }
362 /* 367 /*
363 * Check if the previous beacon has gone out. If 368 * Check if the previous beacon has gone out. If
364 * not don't try to post another, skip this period 369 * not don't try to post another, skip this period
@@ -369,6 +374,9 @@ void ath_beacon_tasklet(unsigned long data)
369 if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) { 374 if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) {
370 sc->beacon.bmisscnt++; 375 sc->beacon.bmisscnt++;
371 376
377 if (!ath9k_hw_check_alive(ah))
378 ieee80211_queue_work(sc->hw, &sc->hw_check_work);
379
372 if (sc->beacon.bmisscnt < BSTUCK_THRESH * sc->nbcnvifs) { 380 if (sc->beacon.bmisscnt < BSTUCK_THRESH * sc->nbcnvifs) {
373 ath_dbg(common, BSTUCK, 381 ath_dbg(common, BSTUCK,
374 "missed %u consecutive beacons\n", 382 "missed %u consecutive beacons\n",
@@ -378,6 +386,7 @@ void ath_beacon_tasklet(unsigned long data)
378 ath9k_hw_bstuck_nfcal(ah); 386 ath9k_hw_bstuck_nfcal(ah);
379 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) { 387 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
380 ath_dbg(common, BSTUCK, "beacon is officially stuck\n"); 388 ath_dbg(common, BSTUCK, "beacon is officially stuck\n");
389 sc->beacon.bmisscnt = 0;
381 sc->sc_flags |= SC_OP_TSF_RESET; 390 sc->sc_flags |= SC_OP_TSF_RESET;
382 ieee80211_queue_work(sc->hw, &sc->hw_reset_work); 391 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
383 } 392 }
@@ -650,6 +659,8 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
650 u32 tsf, intval, nexttbtt; 659 u32 tsf, intval, nexttbtt;
651 660
652 ath9k_reset_beacon_status(sc); 661 ath9k_reset_beacon_status(sc);
662 if (!(sc->sc_flags & SC_OP_BEACONS))
663 ath9k_hw_settsf64(ah, sc->beacon.bc_tstamp);
653 664
654 intval = TU_TO_USEC(conf->beacon_interval); 665 intval = TU_TO_USEC(conf->beacon_interval);
655 tsf = roundup(ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE), intval); 666 tsf = roundup(ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE), intval);
@@ -806,8 +817,10 @@ void ath9k_set_beaconing_status(struct ath_softc *sc, bool status)
806{ 817{
807 struct ath_hw *ah = sc->sc_ah; 818 struct ath_hw *ah = sc->sc_ah;
808 819
809 if (!ath_has_valid_bslot(sc)) 820 if (!ath_has_valid_bslot(sc)) {
821 sc->sc_flags &= ~SC_OP_BEACONS;
810 return; 822 return;
823 }
811 824
812 ath9k_ps_wakeup(sc); 825 ath9k_ps_wakeup(sc);
813 if (status) { 826 if (status) {
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index ec3271993411..1ca6da80d4ad 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -108,9 +108,7 @@ void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah)
108 return; 108 return;
109 } 109 }
110 110
111 if (AR_SREV_9462(ah)) { 111 if (AR_SREV_9300_20_OR_LATER(ah)) {
112 btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
113 } else if (AR_SREV_9300_20_OR_LATER(ah)) {
114 btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE; 112 btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
115 btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300; 113 btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
116 btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300; 114 btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
@@ -284,11 +282,12 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
284 ath9k_hw_btcoex_enable_2wire(ah); 282 ath9k_hw_btcoex_enable_2wire(ah);
285 break; 283 break;
286 case ATH_BTCOEX_CFG_3WIRE: 284 case ATH_BTCOEX_CFG_3WIRE:
285 if (AR_SREV_9462(ah)) {
286 ath9k_hw_btcoex_enable_mci(ah);
287 return;
288 }
287 ath9k_hw_btcoex_enable_3wire(ah); 289 ath9k_hw_btcoex_enable_3wire(ah);
288 break; 290 break;
289 case ATH_BTCOEX_CFG_MCI:
290 ath9k_hw_btcoex_enable_mci(ah);
291 return;
292 } 291 }
293 292
294 REG_RMW(ah, AR_GPIO_PDPU, 293 REG_RMW(ah, AR_GPIO_PDPU,
@@ -305,11 +304,12 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
305 int i; 304 int i;
306 305
307 btcoex_hw->enabled = false; 306 btcoex_hw->enabled = false;
308 if (btcoex_hw->scheme == ATH_BTCOEX_CFG_MCI) { 307 if (AR_SREV_9462(ah)) {
309 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE); 308 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
310 for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++) 309 for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
311 REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i), 310 REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
312 btcoex_hw->wlan_weight[i]); 311 btcoex_hw->wlan_weight[i]);
312 return;
313 } 313 }
314 ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0); 314 ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
315 315
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 8f93aef4414f..3a1e1cfabd5e 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -51,7 +51,6 @@ enum ath_btcoex_scheme {
51 ATH_BTCOEX_CFG_NONE, 51 ATH_BTCOEX_CFG_NONE,
52 ATH_BTCOEX_CFG_2WIRE, 52 ATH_BTCOEX_CFG_2WIRE,
53 ATH_BTCOEX_CFG_3WIRE, 53 ATH_BTCOEX_CFG_3WIRE,
54 ATH_BTCOEX_CFG_MCI,
55}; 54};
56 55
57struct ath9k_hw_mci { 56struct ath9k_hw_mci {
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index ff47b32ecaf4..fde700c4e490 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -380,63 +380,75 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
380 size_t count, loff_t *ppos) 380 size_t count, loff_t *ppos)
381{ 381{
382 struct ath_softc *sc = file->private_data; 382 struct ath_softc *sc = file->private_data;
383 char buf[512];
384 unsigned int len = 0; 383 unsigned int len = 0;
384 int rv;
385 int mxlen = 4000;
386 char *buf = kmalloc(mxlen, GFP_KERNEL);
387 if (!buf)
388 return -ENOMEM;
389
390#define PR_IS(a, s) \
391 do { \
392 len += snprintf(buf + len, mxlen - len, \
393 "%21s: %10u\n", a, \
394 sc->debug.stats.istats.s); \
395 } while (0)
385 396
386 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 397 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
387 len += snprintf(buf + len, sizeof(buf) - len, 398 PR_IS("RXLP", rxlp);
388 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp); 399 PR_IS("RXHP", rxhp);
389 len += snprintf(buf + len, sizeof(buf) - len, 400 PR_IS("WATHDOG", bb_watchdog);
390 "%8s: %10u\n", "RXHP", sc->debug.stats.istats.rxhp);
391 len += snprintf(buf + len, sizeof(buf) - len,
392 "%8s: %10u\n", "WATCHDOG",
393 sc->debug.stats.istats.bb_watchdog);
394 } else { 401 } else {
395 len += snprintf(buf + len, sizeof(buf) - len, 402 PR_IS("RX", rxok);
396 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
397 } 403 }
398 len += snprintf(buf + len, sizeof(buf) - len, 404 PR_IS("RXEOL", rxeol);
399 "%8s: %10u\n", "RXEOL", sc->debug.stats.istats.rxeol); 405 PR_IS("RXORN", rxorn);
400 len += snprintf(buf + len, sizeof(buf) - len, 406 PR_IS("TX", txok);
401 "%8s: %10u\n", "RXORN", sc->debug.stats.istats.rxorn); 407 PR_IS("TXURN", txurn);
402 len += snprintf(buf + len, sizeof(buf) - len, 408 PR_IS("MIB", mib);
403 "%8s: %10u\n", "TX", sc->debug.stats.istats.txok); 409 PR_IS("RXPHY", rxphyerr);
404 len += snprintf(buf + len, sizeof(buf) - len, 410 PR_IS("RXKCM", rx_keycache_miss);
405 "%8s: %10u\n", "TXURN", sc->debug.stats.istats.txurn); 411 PR_IS("SWBA", swba);
406 len += snprintf(buf + len, sizeof(buf) - len, 412 PR_IS("BMISS", bmiss);
407 "%8s: %10u\n", "MIB", sc->debug.stats.istats.mib); 413 PR_IS("BNR", bnr);
408 len += snprintf(buf + len, sizeof(buf) - len, 414 PR_IS("CST", cst);
409 "%8s: %10u\n", "RXPHY", sc->debug.stats.istats.rxphyerr); 415 PR_IS("GTT", gtt);
410 len += snprintf(buf + len, sizeof(buf) - len, 416 PR_IS("TIM", tim);
411 "%8s: %10u\n", "RXKCM", sc->debug.stats.istats.rx_keycache_miss); 417 PR_IS("CABEND", cabend);
412 len += snprintf(buf + len, sizeof(buf) - len, 418 PR_IS("DTIMSYNC", dtimsync);
413 "%8s: %10u\n", "SWBA", sc->debug.stats.istats.swba); 419 PR_IS("DTIM", dtim);
414 len += snprintf(buf + len, sizeof(buf) - len, 420 PR_IS("TSFOOR", tsfoor);
415 "%8s: %10u\n", "BMISS", sc->debug.stats.istats.bmiss); 421 PR_IS("TOTAL", total);
416 len += snprintf(buf + len, sizeof(buf) - len, 422
417 "%8s: %10u\n", "BNR", sc->debug.stats.istats.bnr); 423 len += snprintf(buf + len, mxlen - len,
418 len += snprintf(buf + len, sizeof(buf) - len, 424 "SYNC_CAUSE stats:\n");
419 "%8s: %10u\n", "CST", sc->debug.stats.istats.cst); 425
420 len += snprintf(buf + len, sizeof(buf) - len, 426 PR_IS("Sync-All", sync_cause_all);
421 "%8s: %10u\n", "GTT", sc->debug.stats.istats.gtt); 427 PR_IS("RTC-IRQ", sync_rtc_irq);
422 len += snprintf(buf + len, sizeof(buf) - len, 428 PR_IS("MAC-IRQ", sync_mac_irq);
423 "%8s: %10u\n", "TIM", sc->debug.stats.istats.tim); 429 PR_IS("EEPROM-Illegal-Access", eeprom_illegal_access);
424 len += snprintf(buf + len, sizeof(buf) - len, 430 PR_IS("APB-Timeout", apb_timeout);
425 "%8s: %10u\n", "CABEND", sc->debug.stats.istats.cabend); 431 PR_IS("PCI-Mode-Conflict", pci_mode_conflict);
426 len += snprintf(buf + len, sizeof(buf) - len, 432 PR_IS("HOST1-Fatal", host1_fatal);
427 "%8s: %10u\n", "DTIMSYNC", sc->debug.stats.istats.dtimsync); 433 PR_IS("HOST1-Perr", host1_perr);
428 len += snprintf(buf + len, sizeof(buf) - len, 434 PR_IS("TRCV-FIFO-Perr", trcv_fifo_perr);
429 "%8s: %10u\n", "DTIM", sc->debug.stats.istats.dtim); 435 PR_IS("RADM-CPL-EP", radm_cpl_ep);
430 len += snprintf(buf + len, sizeof(buf) - len, 436 PR_IS("RADM-CPL-DLLP-Abort", radm_cpl_dllp_abort);
431 "%8s: %10u\n", "TSFOOR", sc->debug.stats.istats.tsfoor); 437 PR_IS("RADM-CPL-TLP-Abort", radm_cpl_tlp_abort);
432 len += snprintf(buf + len, sizeof(buf) - len, 438 PR_IS("RADM-CPL-ECRC-Err", radm_cpl_ecrc_err);
433 "%8s: %10u\n", "TOTAL", sc->debug.stats.istats.total); 439 PR_IS("RADM-CPL-Timeout", radm_cpl_timeout);
434 440 PR_IS("Local-Bus-Timeout", local_timeout);
435 441 PR_IS("PM-Access", pm_access);
436 if (len > sizeof(buf)) 442 PR_IS("MAC-Awake", mac_awake);
437 len = sizeof(buf); 443 PR_IS("MAC-Asleep", mac_asleep);
438 444 PR_IS("MAC-Sleep-Access", mac_sleep_access);
439 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 445
446 if (len > mxlen)
447 len = mxlen;
448
449 rv = simple_read_from_buffer(user_buf, count, ppos, buf, len);
450 kfree(buf);
451 return rv;
440} 452}
441 453
442static const struct file_operations fops_interrupt = { 454static const struct file_operations fops_interrupt = {
@@ -524,6 +536,7 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
524 PR("hw-put-tx-buf: ", puttxbuf); 536 PR("hw-put-tx-buf: ", puttxbuf);
525 PR("hw-tx-start: ", txstart); 537 PR("hw-tx-start: ", txstart);
526 PR("hw-tx-proc-desc: ", txprocdesc); 538 PR("hw-tx-proc-desc: ", txprocdesc);
539 PR("TX-Failed: ", txfailed);
527 len += snprintf(buf + len, size - len, 540 len += snprintf(buf + len, size - len,
528 "%s%11p%11p%10p%10p\n", "txq-memory-address:", 541 "%s%11p%11p%10p%10p\n", "txq-memory-address:",
529 sc->tx.txq_map[WME_AC_BE], 542 sc->tx.txq_map[WME_AC_BE],
@@ -880,6 +893,13 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
880 len += snprintf(buf + len, size - len, "%22s : %10u\n", s, \ 893 len += snprintf(buf + len, size - len, "%22s : %10u\n", s, \
881 sc->debug.stats.rxstats.phy_err_stats[p]); 894 sc->debug.stats.rxstats.phy_err_stats[p]);
882 895
896#define RXS_ERR(s, e) \
897 do { \
898 len += snprintf(buf + len, size - len, \
899 "%22s : %10u\n", s, \
900 sc->debug.stats.rxstats.e); \
901 } while (0)
902
883 struct ath_softc *sc = file->private_data; 903 struct ath_softc *sc = file->private_data;
884 char *buf; 904 char *buf;
885 unsigned int len = 0, size = 1600; 905 unsigned int len = 0, size = 1600;
@@ -889,27 +909,18 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
889 if (buf == NULL) 909 if (buf == NULL)
890 return -ENOMEM; 910 return -ENOMEM;
891 911
892 len += snprintf(buf + len, size - len, 912 RXS_ERR("CRC ERR", crc_err);
893 "%22s : %10u\n", "CRC ERR", 913 RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);
894 sc->debug.stats.rxstats.crc_err); 914 RXS_ERR("PHY ERR", phy_err);
895 len += snprintf(buf + len, size - len, 915 RXS_ERR("MIC ERR", mic_err);
896 "%22s : %10u\n", "DECRYPT CRC ERR", 916 RXS_ERR("PRE-DELIM CRC ERR", pre_delim_crc_err);
897 sc->debug.stats.rxstats.decrypt_crc_err); 917 RXS_ERR("POST-DELIM CRC ERR", post_delim_crc_err);
898 len += snprintf(buf + len, size - len, 918 RXS_ERR("DECRYPT BUSY ERR", decrypt_busy_err);
899 "%22s : %10u\n", "PHY ERR", 919 RXS_ERR("RX-LENGTH-ERR", rx_len_err);
900 sc->debug.stats.rxstats.phy_err); 920 RXS_ERR("RX-OOM-ERR", rx_oom_err);
901 len += snprintf(buf + len, size - len, 921 RXS_ERR("RX-RATE-ERR", rx_rate_err);
902 "%22s : %10u\n", "MIC ERR", 922 RXS_ERR("RX-DROP-RXFLUSH", rx_drop_rxflush);
903 sc->debug.stats.rxstats.mic_err); 923 RXS_ERR("RX-TOO-MANY-FRAGS", rx_too_many_frags_err);
904 len += snprintf(buf + len, size - len,
905 "%22s : %10u\n", "PRE-DELIM CRC ERR",
906 sc->debug.stats.rxstats.pre_delim_crc_err);
907 len += snprintf(buf + len, size - len,
908 "%22s : %10u\n", "POST-DELIM CRC ERR",
909 sc->debug.stats.rxstats.post_delim_crc_err);
910 len += snprintf(buf + len, size - len,
911 "%22s : %10u\n", "DECRYPT BUSY ERR",
912 sc->debug.stats.rxstats.decrypt_busy_err);
913 924
914 PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN); 925 PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
915 PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING); 926 PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
@@ -938,12 +949,10 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
938 PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL); 949 PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
939 PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL); 950 PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
940 951
941 len += snprintf(buf + len, size - len, 952 RXS_ERR("RX-Pkts-All", rx_pkts_all);
942 "%22s : %10u\n", "RX-Pkts-All", 953 RXS_ERR("RX-Bytes-All", rx_bytes_all);
943 sc->debug.stats.rxstats.rx_pkts_all); 954 RXS_ERR("RX-Beacons", rx_beacons);
944 len += snprintf(buf + len, size - len, 955 RXS_ERR("RX-Frags", rx_frags);
945 "%22s : %10u\n", "RX-Bytes-All",
946 sc->debug.stats.rxstats.rx_bytes_all);
947 956
948 if (len > size) 957 if (len > size)
949 len = size; 958 len = size;
@@ -953,12 +962,12 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
953 962
954 return retval; 963 return retval;
955 964
965#undef RXS_ERR
956#undef PHY_ERR 966#undef PHY_ERR
957} 967}
958 968
959void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs) 969void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
960{ 970{
961#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
962#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++ 971#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
963#define RX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].rs\ 972#define RX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].rs\
964 [sc->debug.rsidx].c) 973 [sc->debug.rsidx].c)
@@ -1004,7 +1013,6 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
1004 1013
1005#endif 1014#endif
1006 1015
1007#undef RX_STAT_INC
1008#undef RX_PHY_ERR_INC 1016#undef RX_PHY_ERR_INC
1009#undef RX_SAMP_DBG 1017#undef RX_SAMP_DBG
1010} 1018}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 64fcfad467bf..c34da09d9103 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -60,6 +60,7 @@ struct ath_buf;
60 * @tsfoor: TSF out of range, indicates that the corrected TSF received 60 * @tsfoor: TSF out of range, indicates that the corrected TSF received
61 * from a beacon differs from the PCU's internal TSF by more than a 61 * from a beacon differs from the PCU's internal TSF by more than a
62 * (programmable) threshold 62 * (programmable) threshold
63 * @local_timeout: Internal bus timeout.
63 */ 64 */
64struct ath_interrupt_stats { 65struct ath_interrupt_stats {
65 u32 total; 66 u32 total;
@@ -85,8 +86,30 @@ struct ath_interrupt_stats {
85 u32 dtim; 86 u32 dtim;
86 u32 bb_watchdog; 87 u32 bb_watchdog;
87 u32 tsfoor; 88 u32 tsfoor;
89
90 /* Sync-cause stats */
91 u32 sync_cause_all;
92 u32 sync_rtc_irq;
93 u32 sync_mac_irq;
94 u32 eeprom_illegal_access;
95 u32 apb_timeout;
96 u32 pci_mode_conflict;
97 u32 host1_fatal;
98 u32 host1_perr;
99 u32 trcv_fifo_perr;
100 u32 radm_cpl_ep;
101 u32 radm_cpl_dllp_abort;
102 u32 radm_cpl_tlp_abort;
103 u32 radm_cpl_ecrc_err;
104 u32 radm_cpl_timeout;
105 u32 local_timeout;
106 u32 pm_access;
107 u32 mac_awake;
108 u32 mac_asleep;
109 u32 mac_sleep_access;
88}; 110};
89 111
112
90/** 113/**
91 * struct ath_tx_stats - Statistics about TX 114 * struct ath_tx_stats - Statistics about TX
92 * @tx_pkts_all: No. of total frames transmitted, including ones that 115 * @tx_pkts_all: No. of total frames transmitted, including ones that
@@ -113,6 +136,7 @@ struct ath_interrupt_stats {
113 * @puttxbuf: Number of times hardware was given txbuf to write. 136 * @puttxbuf: Number of times hardware was given txbuf to write.
114 * @txstart: Number of times hardware was told to start tx. 137 * @txstart: Number of times hardware was told to start tx.
115 * @txprocdesc: Number of times tx descriptor was processed 138 * @txprocdesc: Number of times tx descriptor was processed
139 * @txfailed: Out-of-memory or other errors in xmit path.
116 */ 140 */
117struct ath_tx_stats { 141struct ath_tx_stats {
118 u32 tx_pkts_all; 142 u32 tx_pkts_all;
@@ -135,8 +159,11 @@ struct ath_tx_stats {
135 u32 puttxbuf; 159 u32 puttxbuf;
136 u32 txstart; 160 u32 txstart;
137 u32 txprocdesc; 161 u32 txprocdesc;
162 u32 txfailed;
138}; 163};
139 164
165#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
166
140/** 167/**
141 * struct ath_rx_stats - RX Statistics 168 * struct ath_rx_stats - RX Statistics
142 * @rx_pkts_all: No. of total frames received, including ones that 169 * @rx_pkts_all: No. of total frames received, including ones that
@@ -153,6 +180,13 @@ struct ath_tx_stats {
153 * @post_delim_crc_err: Post-Frame delimiter CRC error detections 180 * @post_delim_crc_err: Post-Frame delimiter CRC error detections
154 * @decrypt_busy_err: Decryption interruptions counter 181 * @decrypt_busy_err: Decryption interruptions counter
155 * @phy_err_stats: Individual PHY error statistics 182 * @phy_err_stats: Individual PHY error statistics
183 * @rx_len_err: No. of frames discarded due to bad length.
184 * @rx_oom_err: No. of frames dropped due to OOM issues.
185 * @rx_rate_err: No. of frames dropped due to rate errors.
186 * @rx_too_many_frags_err: Frames dropped due to too-many-frags received.
187 * @rx_drop_rxflush: No. of frames dropped due to RX-FLUSH.
188 * @rx_beacons: No. of beacons received.
189 * @rx_frags: No. of rx-fragements received.
156 */ 190 */
157struct ath_rx_stats { 191struct ath_rx_stats {
158 u32 rx_pkts_all; 192 u32 rx_pkts_all;
@@ -165,6 +199,13 @@ struct ath_rx_stats {
165 u32 post_delim_crc_err; 199 u32 post_delim_crc_err;
166 u32 decrypt_busy_err; 200 u32 decrypt_busy_err;
167 u32 phy_err_stats[ATH9K_PHYERR_MAX]; 201 u32 phy_err_stats[ATH9K_PHYERR_MAX];
202 u32 rx_len_err;
203 u32 rx_oom_err;
204 u32 rx_rate_err;
205 u32 rx_too_many_frags_err;
206 u32 rx_drop_rxflush;
207 u32 rx_beacons;
208 u32 rx_frags;
168}; 209};
169 210
170enum ath_reset_type { 211enum ath_reset_type {
@@ -174,6 +215,7 @@ enum ath_reset_type {
174 RESET_TYPE_TX_ERROR, 215 RESET_TYPE_TX_ERROR,
175 RESET_TYPE_TX_HANG, 216 RESET_TYPE_TX_HANG,
176 RESET_TYPE_PLL_HANG, 217 RESET_TYPE_PLL_HANG,
218 RESET_TYPE_MAC_HANG,
177 __RESET_TYPE_MAX 219 __RESET_TYPE_MAX
178}; 220};
179 221
@@ -247,6 +289,8 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
247 289
248#else 290#else
249 291
292#define RX_STAT_INC(c) /* NOP */
293
250static inline int ath9k_init_debug(struct ath_hw *ah) 294static inline int ath9k_init_debug(struct ath_hw *ah)
251{ 295{
252 return 0; 296 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index f4f56aff1e9d..ecc81792f2dc 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -21,17 +21,6 @@
21#include "dfs.h" 21#include "dfs.h"
22#include "dfs_debug.h" 22#include "dfs_debug.h"
23 23
24/*
25 * TODO: move into or synchronize this with generic header
26 * as soon as IF is defined
27 */
28struct dfs_radar_pulse {
29 u16 freq;
30 u64 ts;
31 u32 width;
32 u8 rssi;
33};
34
35/* internal struct to pass radar data */ 24/* internal struct to pass radar data */
36struct ath_radar_data { 25struct ath_radar_data {
37 u8 pulse_bw_info; 26 u8 pulse_bw_info;
@@ -60,44 +49,44 @@ static u32 dur_to_usecs(struct ath_hw *ah, u32 dur)
60#define EXT_CH_RADAR_FOUND 0x02 49#define EXT_CH_RADAR_FOUND 0x02
61static bool 50static bool
62ath9k_postprocess_radar_event(struct ath_softc *sc, 51ath9k_postprocess_radar_event(struct ath_softc *sc,
63 struct ath_radar_data *are, 52 struct ath_radar_data *ard,
64 struct dfs_radar_pulse *drp) 53 struct pulse_event *pe)
65{ 54{
66 u8 rssi; 55 u8 rssi;
67 u16 dur; 56 u16 dur;
68 57
69 ath_dbg(ath9k_hw_common(sc->sc_ah), DFS, 58 ath_dbg(ath9k_hw_common(sc->sc_ah), DFS,
70 "pulse_bw_info=0x%x, pri,ext len/rssi=(%u/%u, %u/%u)\n", 59 "pulse_bw_info=0x%x, pri,ext len/rssi=(%u/%u, %u/%u)\n",
71 are->pulse_bw_info, 60 ard->pulse_bw_info,
72 are->pulse_length_pri, are->rssi, 61 ard->pulse_length_pri, ard->rssi,
73 are->pulse_length_ext, are->ext_rssi); 62 ard->pulse_length_ext, ard->ext_rssi);
74 63
75 /* 64 /*
76 * Only the last 2 bits of the BW info are relevant, they indicate 65 * Only the last 2 bits of the BW info are relevant, they indicate
77 * which channel the radar was detected in. 66 * which channel the radar was detected in.
78 */ 67 */
79 are->pulse_bw_info &= 0x03; 68 ard->pulse_bw_info &= 0x03;
80 69
81 switch (are->pulse_bw_info) { 70 switch (ard->pulse_bw_info) {
82 case PRI_CH_RADAR_FOUND: 71 case PRI_CH_RADAR_FOUND:
83 /* radar in ctrl channel */ 72 /* radar in ctrl channel */
84 dur = are->pulse_length_pri; 73 dur = ard->pulse_length_pri;
85 DFS_STAT_INC(sc, pri_phy_errors); 74 DFS_STAT_INC(sc, pri_phy_errors);
86 /* 75 /*
87 * cannot use ctrl channel RSSI 76 * cannot use ctrl channel RSSI
88 * if extension channel is stronger 77 * if extension channel is stronger
89 */ 78 */
90 rssi = (are->ext_rssi >= (are->rssi + 3)) ? 0 : are->rssi; 79 rssi = (ard->ext_rssi >= (ard->rssi + 3)) ? 0 : ard->rssi;
91 break; 80 break;
92 case EXT_CH_RADAR_FOUND: 81 case EXT_CH_RADAR_FOUND:
93 /* radar in extension channel */ 82 /* radar in extension channel */
94 dur = are->pulse_length_ext; 83 dur = ard->pulse_length_ext;
95 DFS_STAT_INC(sc, ext_phy_errors); 84 DFS_STAT_INC(sc, ext_phy_errors);
96 /* 85 /*
97 * cannot use extension channel RSSI 86 * cannot use extension channel RSSI
98 * if control channel is stronger 87 * if control channel is stronger
99 */ 88 */
100 rssi = (are->rssi >= (are->ext_rssi + 12)) ? 0 : are->ext_rssi; 89 rssi = (ard->rssi >= (ard->ext_rssi + 12)) ? 0 : ard->ext_rssi;
101 break; 90 break;
102 case (PRI_CH_RADAR_FOUND | EXT_CH_RADAR_FOUND): 91 case (PRI_CH_RADAR_FOUND | EXT_CH_RADAR_FOUND):
103 /* 92 /*
@@ -107,14 +96,14 @@ ath9k_postprocess_radar_event(struct ath_softc *sc,
107 * Radiated testing, when pulse is on DC, different pri and 96 * Radiated testing, when pulse is on DC, different pri and
108 * ext durations are reported, so take the larger of the two 97 * ext durations are reported, so take the larger of the two
109 */ 98 */
110 if (are->pulse_length_ext >= are->pulse_length_pri) 99 if (ard->pulse_length_ext >= ard->pulse_length_pri)
111 dur = are->pulse_length_ext; 100 dur = ard->pulse_length_ext;
112 else 101 else
113 dur = are->pulse_length_pri; 102 dur = ard->pulse_length_pri;
114 DFS_STAT_INC(sc, dc_phy_errors); 103 DFS_STAT_INC(sc, dc_phy_errors);
115 104
116 /* when both are present use stronger one */ 105 /* when both are present use stronger one */
117 rssi = (are->rssi < are->ext_rssi) ? are->ext_rssi : are->rssi; 106 rssi = (ard->rssi < ard->ext_rssi) ? ard->ext_rssi : ard->rssi;
118 break; 107 break;
119 default: 108 default:
120 /* 109 /*
@@ -137,8 +126,8 @@ ath9k_postprocess_radar_event(struct ath_softc *sc,
137 */ 126 */
138 127
139 /* convert duration to usecs */ 128 /* convert duration to usecs */
140 drp->width = dur_to_usecs(sc->sc_ah, dur); 129 pe->width = dur_to_usecs(sc->sc_ah, dur);
141 drp->rssi = rssi; 130 pe->rssi = rssi;
142 131
143 DFS_STAT_INC(sc, pulses_detected); 132 DFS_STAT_INC(sc, pulses_detected);
144 return true; 133 return true;
@@ -155,15 +144,17 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
155 struct ath_radar_data ard; 144 struct ath_radar_data ard;
156 u16 datalen; 145 u16 datalen;
157 char *vdata_end; 146 char *vdata_end;
158 struct dfs_radar_pulse drp; 147 struct pulse_event pe;
159 struct ath_hw *ah = sc->sc_ah; 148 struct ath_hw *ah = sc->sc_ah;
160 struct ath_common *common = ath9k_hw_common(ah); 149 struct ath_common *common = ath9k_hw_common(ah);
161 150
162 if ((!(rs->rs_phyerr != ATH9K_PHYERR_RADAR)) && 151 DFS_STAT_INC(sc, pulses_total);
163 (!(rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT))) { 152 if ((rs->rs_phyerr != ATH9K_PHYERR_RADAR) &&
153 (rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT)) {
164 ath_dbg(common, DFS, 154 ath_dbg(common, DFS,
165 "Error: rs_phyer=0x%x not a radar error\n", 155 "Error: rs_phyer=0x%x not a radar error\n",
166 rs->rs_phyerr); 156 rs->rs_phyerr);
157 DFS_STAT_INC(sc, pulses_no_dfs);
167 return; 158 return;
168 } 159 }
169 160
@@ -189,27 +180,22 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
189 ard.pulse_bw_info = vdata_end[-1]; 180 ard.pulse_bw_info = vdata_end[-1];
190 ard.pulse_length_ext = vdata_end[-2]; 181 ard.pulse_length_ext = vdata_end[-2];
191 ard.pulse_length_pri = vdata_end[-3]; 182 ard.pulse_length_pri = vdata_end[-3];
192 183 pe.freq = ah->curchan->channel;
193 ath_dbg(common, DFS, 184 pe.ts = mactime;
194 "bw_info=%d, length_pri=%d, length_ext=%d, " 185 if (ath9k_postprocess_radar_event(sc, &ard, &pe)) {
195 "rssi_pri=%d, rssi_ext=%d\n", 186 struct dfs_pattern_detector *pd = sc->dfs_detector;
196 ard.pulse_bw_info, ard.pulse_length_pri, ard.pulse_length_ext,
197 ard.rssi, ard.ext_rssi);
198
199 drp.freq = ah->curchan->channel;
200 drp.ts = mactime;
201 if (ath9k_postprocess_radar_event(sc, &ard, &drp)) {
202 static u64 last_ts; 187 static u64 last_ts;
203 ath_dbg(common, DFS, 188 ath_dbg(common, DFS,
204 "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, " 189 "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
205 "width=%d, rssi=%d, delta_ts=%llu\n", 190 "width=%d, rssi=%d, delta_ts=%llu\n",
206 drp.freq, drp.ts, drp.width, drp.rssi, drp.ts-last_ts); 191 pe.freq, pe.ts, pe.width, pe.rssi, pe.ts-last_ts);
207 last_ts = drp.ts; 192 last_ts = pe.ts;
208 /* 193 DFS_STAT_INC(sc, pulses_processed);
209 * TODO: forward pulse to pattern detector 194 if (pd != NULL && pd->add_pulse(pd, &pe)) {
210 * 195 DFS_STAT_INC(sc, radar_detected);
211 * ieee80211_add_radar_pulse(drp.freq, drp.ts, 196 /*
212 * drp.width, drp.rssi); 197 * TODO: forward radar event to DFS management layer
213 */ 198 */
199 }
214 } 200 }
215} 201}
diff --git a/drivers/net/wireless/ath/ath9k/dfs.h b/drivers/net/wireless/ath/ath9k/dfs.h
index c2412857f122..3c839f06a06a 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.h
+++ b/drivers/net/wireless/ath/ath9k/dfs.h
@@ -17,6 +17,7 @@
17 17
18#ifndef ATH9K_DFS_H 18#ifndef ATH9K_DFS_H
19#define ATH9K_DFS_H 19#define ATH9K_DFS_H
20#include "dfs_pattern_detector.h"
20 21
21#if defined(CONFIG_ATH9K_DFS_CERTIFIED) 22#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
22/** 23/**
@@ -31,13 +32,14 @@
31 * 32 *
32 * The radar information provided as raw payload data is validated and 33 * The radar information provided as raw payload data is validated and
33 * filtered for false pulses. Events passing all tests are forwarded to 34 * filtered for false pulses. Events passing all tests are forwarded to
34 * the upper layer for pattern detection. 35 * the DFS detector for pattern detection.
35 */ 36 */
36void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data, 37void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
37 struct ath_rx_status *rs, u64 mactime); 38 struct ath_rx_status *rs, u64 mactime);
38#else 39#else
39static inline void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data, 40static inline void
40 struct ath_rx_status *rs, u64 mactime) { } 41ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
42 struct ath_rx_status *rs, u64 mactime) { }
41#endif 43#endif
42 44
43#endif /* ATH9K_DFS_H */ 45#endif /* ATH9K_DFS_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
index 4364c103ed33..55d28072adeb 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -21,9 +21,15 @@
21#include "ath9k.h" 21#include "ath9k.h"
22#include "dfs_debug.h" 22#include "dfs_debug.h"
23 23
24
25struct ath_dfs_pool_stats global_dfs_pool_stats = { 0 };
26
24#define ATH9K_DFS_STAT(s, p) \ 27#define ATH9K_DFS_STAT(s, p) \
25 len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \ 28 len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
26 sc->debug.stats.dfs_stats.p); 29 sc->debug.stats.dfs_stats.p);
30#define ATH9K_DFS_POOL_STAT(s, p) \
31 len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
32 global_dfs_pool_stats.p);
27 33
28static ssize_t read_file_dfs(struct file *file, char __user *user_buf, 34static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
29 size_t count, loff_t *ppos) 35 size_t count, loff_t *ppos)
@@ -43,6 +49,9 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
43 hw_ver->macVersion, hw_ver->macRev, 49 hw_ver->macVersion, hw_ver->macRev,
44 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ? 50 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
45 "enabled" : "disabled"); 51 "enabled" : "disabled");
52 len += snprintf(buf + len, size - len, "Pulse detector statistics:\n");
53 ATH9K_DFS_STAT("pulse events reported ", pulses_total);
54 ATH9K_DFS_STAT("invalid pulse events ", pulses_no_dfs);
46 ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected); 55 ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected);
47 ATH9K_DFS_STAT("Datalen discards ", datalen_discards); 56 ATH9K_DFS_STAT("Datalen discards ", datalen_discards);
48 ATH9K_DFS_STAT("RSSI discards ", rssi_discards); 57 ATH9K_DFS_STAT("RSSI discards ", rssi_discards);
@@ -50,6 +59,18 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
50 ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors); 59 ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors);
51 ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors); 60 ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors);
52 ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors); 61 ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors);
62 len += snprintf(buf + len, size - len, "Radar detector statistics "
63 "(current DFS region: %d)\n", sc->dfs_detector->region);
64 ATH9K_DFS_STAT("Pulse events processed ", pulses_processed);
65 ATH9K_DFS_STAT("Radars detected ", radar_detected);
66 len += snprintf(buf + len, size - len, "Global Pool statistics:\n");
67 ATH9K_DFS_POOL_STAT("Pool references ", pool_reference);
68 ATH9K_DFS_POOL_STAT("Pulses allocated ", pulse_allocated);
69 ATH9K_DFS_POOL_STAT("Pulses alloc error ", pulse_alloc_error);
70 ATH9K_DFS_POOL_STAT("Pulses in use ", pulse_used);
71 ATH9K_DFS_POOL_STAT("Seqs. allocated ", pseq_allocated);
72 ATH9K_DFS_POOL_STAT("Seqs. alloc error ", pseq_alloc_error);
73 ATH9K_DFS_POOL_STAT("Seqs. in use ", pseq_used);
53 74
54 if (len > size) 75 if (len > size)
55 len = size; 76 len = size;
@@ -60,8 +81,33 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
60 return retval; 81 return retval;
61} 82}
62 83
84/* magic number to prevent accidental reset of DFS statistics */
85#define DFS_STATS_RESET_MAGIC 0x80000000
86static ssize_t write_file_dfs(struct file *file, const char __user *user_buf,
87 size_t count, loff_t *ppos)
88{
89 struct ath_softc *sc = file->private_data;
90 unsigned long val;
91 char buf[32];
92 ssize_t len;
93
94 len = min(count, sizeof(buf) - 1);
95 if (copy_from_user(buf, user_buf, len))
96 return -EFAULT;
97
98 buf[len] = '\0';
99 if (strict_strtoul(buf, 0, &val))
100 return -EINVAL;
101
102 if (val == DFS_STATS_RESET_MAGIC)
103 memset(&sc->debug.stats.dfs_stats, 0,
104 sizeof(sc->debug.stats.dfs_stats));
105 return count;
106}
107
63static const struct file_operations fops_dfs_stats = { 108static const struct file_operations fops_dfs_stats = {
64 .read = read_file_dfs, 109 .read = read_file_dfs,
110 .write = write_file_dfs,
65 .open = simple_open, 111 .open = simple_open,
66 .owner = THIS_MODULE, 112 .owner = THIS_MODULE,
67 .llseek = default_llseek, 113 .llseek = default_llseek,
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
index 4911724cb445..e36810a4b585 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.h
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h
@@ -22,17 +22,23 @@
22#include "hw.h" 22#include "hw.h"
23 23
24/** 24/**
25 * struct ath_dfs_stats - DFS Statistics 25 * struct ath_dfs_stats - DFS Statistics per wiphy
26 * 26 * @pulses_total: pulses reported by HW
27 * @pulses_detected: No. of pulses detected so far 27 * @pulses_no_dfs: pulses wrongly reported as DFS
28 * @datalen_discards: No. of pulses discarded due to invalid datalen 28 * @pulses_detected: pulses detected so far
29 * @rssi_discards: No. of pulses discarded due to invalid RSSI 29 * @datalen_discards: pulses discarded due to invalid datalen
30 * @bwinfo_discards: No. of pulses discarded due to invalid BW info 30 * @rssi_discards: pulses discarded due to invalid RSSI
31 * @pri_phy_errors: No. of pulses reported for primary channel 31 * @bwinfo_discards: pulses discarded due to invalid BW info
32 * @ext_phy_errors: No. of pulses reported for extension channel 32 * @pri_phy_errors: pulses reported for primary channel
33 * @dc_phy_errors: No. of pulses reported for primary + extension channel 33 * @ext_phy_errors: pulses reported for extension channel
34 * @dc_phy_errors: pulses reported for primary + extension channel
35 * @pulses_processed: pulses forwarded to detector
36 * @radar_detected: radars detected
34 */ 37 */
35struct ath_dfs_stats { 38struct ath_dfs_stats {
39 /* pulse stats */
40 u32 pulses_total;
41 u32 pulses_no_dfs;
36 u32 pulses_detected; 42 u32 pulses_detected;
37 u32 datalen_discards; 43 u32 datalen_discards;
38 u32 rssi_discards; 44 u32 rssi_discards;
@@ -40,18 +46,39 @@ struct ath_dfs_stats {
40 u32 pri_phy_errors; 46 u32 pri_phy_errors;
41 u32 ext_phy_errors; 47 u32 ext_phy_errors;
42 u32 dc_phy_errors; 48 u32 dc_phy_errors;
49 /* pattern detection stats */
50 u32 pulses_processed;
51 u32 radar_detected;
43}; 52};
44 53
54/**
55 * struct ath_dfs_pool_stats - DFS Statistics for global pools
56 */
57struct ath_dfs_pool_stats {
58 u32 pool_reference;
59 u32 pulse_allocated;
60 u32 pulse_alloc_error;
61 u32 pulse_used;
62 u32 pseq_allocated;
63 u32 pseq_alloc_error;
64 u32 pseq_used;
65};
45#if defined(CONFIG_ATH9K_DFS_DEBUGFS) 66#if defined(CONFIG_ATH9K_DFS_DEBUGFS)
46 67
47#define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++) 68#define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++)
48void ath9k_dfs_init_debug(struct ath_softc *sc); 69void ath9k_dfs_init_debug(struct ath_softc *sc);
49 70
71#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
72#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
73extern struct ath_dfs_pool_stats global_dfs_pool_stats;
74
50#else 75#else
51 76
52#define DFS_STAT_INC(sc, c) do { } while (0) 77#define DFS_STAT_INC(sc, c) do { } while (0)
53static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { } 78static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { }
54 79
80#define DFS_POOL_STAT_INC(c) do { } while (0)
81#define DFS_POOL_STAT_DEC(c) do { } while (0)
55#endif /* CONFIG_ATH9K_DFS_DEBUGFS */ 82#endif /* CONFIG_ATH9K_DFS_DEBUGFS */
56 83
57#endif /* ATH9K_DFS_DEBUG_H */ 84#endif /* ATH9K_DFS_DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
new file mode 100644
index 000000000000..ea2a6cf7ef23
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
@@ -0,0 +1,300 @@
1/*
2 * Copyright (c) 2012 Neratec Solutions AG
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/slab.h>
18#include <linux/export.h>
19
20#include "dfs_pattern_detector.h"
21#include "dfs_pri_detector.h"
22
23/*
24 * tolerated deviation of radar time stamp in usecs on both sides
25 * TODO: this might need to be HW-dependent
26 */
27#define PRI_TOLERANCE 16
28
29/**
30 * struct radar_types - contains array of patterns defined for one DFS domain
31 * @domain: DFS regulatory domain
32 * @num_radar_types: number of radar types to follow
33 * @radar_types: radar types array
34 */
35struct radar_types {
36 enum nl80211_dfs_regions region;
37 u32 num_radar_types;
38 const struct radar_detector_specs *radar_types;
39};
40
41/* percentage on ppb threshold to trigger detection */
42#define MIN_PPB_THRESH 50
43#define PPB_THRESH(PPB) ((PPB * MIN_PPB_THRESH + 50) / 100)
44#define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF)
45
46#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
47{ \
48 ID, WMIN, WMAX, (PRF2PRI(PMAX) - PRI_TOLERANCE), \
49 (PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF, \
50 PPB_THRESH(PPB), PRI_TOLERANCE, \
51}
52
53/* radar types as defined by ETSI EN-301-893 v1.5.1 */
54static const struct radar_detector_specs etsi_radar_ref_types_v15[] = {
55 ETSI_PATTERN(0, 0, 1, 700, 700, 1, 18),
56 ETSI_PATTERN(1, 0, 5, 200, 1000, 1, 10),
57 ETSI_PATTERN(2, 0, 15, 200, 1600, 1, 15),
58 ETSI_PATTERN(3, 0, 15, 2300, 4000, 1, 25),
59 ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20),
60 ETSI_PATTERN(5, 0, 2, 300, 400, 3, 10),
61 ETSI_PATTERN(6, 0, 2, 400, 1200, 3, 15),
62};
63
64static const struct radar_types etsi_radar_types_v15 = {
65 .region = NL80211_DFS_ETSI,
66 .num_radar_types = ARRAY_SIZE(etsi_radar_ref_types_v15),
67 .radar_types = etsi_radar_ref_types_v15,
68};
69
70/* for now, we support ETSI radar types, FCC and JP are TODO */
71static const struct radar_types *dfs_domains[] = {
72 &etsi_radar_types_v15,
73};
74
75/**
76 * get_dfs_domain_radar_types() - get radar types for a given DFS domain
77 * @param domain DFS domain
78 * @return radar_types ptr on success, NULL if DFS domain is not supported
79 */
80static const struct radar_types *
81get_dfs_domain_radar_types(enum nl80211_dfs_regions region)
82{
83 u32 i;
84 for (i = 0; i < ARRAY_SIZE(dfs_domains); i++) {
85 if (dfs_domains[i]->region == region)
86 return dfs_domains[i];
87 }
88 return NULL;
89}
90
91/**
92 * struct channel_detector - detector elements for a DFS channel
93 * @head: list_head
94 * @freq: frequency for this channel detector in MHz
95 * @detectors: array of dynamically created detector elements for this freq
96 *
97 * Channel detectors are required to provide multi-channel DFS detection, e.g.
98 * to support off-channel scanning. A pattern detector has a list of channels
99 * radar pulses have been reported for in the past.
100 */
101struct channel_detector {
102 struct list_head head;
103 u16 freq;
104 struct pri_detector **detectors;
105};
106
107/* channel_detector_reset() - reset detector lines for a given channel */
108static void channel_detector_reset(struct dfs_pattern_detector *dpd,
109 struct channel_detector *cd)
110{
111 u32 i;
112 if (cd == NULL)
113 return;
114 for (i = 0; i < dpd->num_radar_types; i++)
115 cd->detectors[i]->reset(cd->detectors[i], dpd->last_pulse_ts);
116}
117
118/* channel_detector_exit() - destructor */
119static void channel_detector_exit(struct dfs_pattern_detector *dpd,
120 struct channel_detector *cd)
121{
122 u32 i;
123 if (cd == NULL)
124 return;
125 list_del(&cd->head);
126 for (i = 0; i < dpd->num_radar_types; i++) {
127 struct pri_detector *de = cd->detectors[i];
128 if (de != NULL)
129 de->exit(de);
130 }
131 kfree(cd->detectors);
132 kfree(cd);
133}
134
135static struct channel_detector *
136channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
137{
138 u32 sz, i;
139 struct channel_detector *cd;
140
141 cd = kmalloc(sizeof(*cd), GFP_KERNEL);
142 if (cd == NULL)
143 goto fail;
144
145 INIT_LIST_HEAD(&cd->head);
146 cd->freq = freq;
147 sz = sizeof(cd->detectors) * dpd->num_radar_types;
148 cd->detectors = kzalloc(sz, GFP_KERNEL);
149 if (cd->detectors == NULL)
150 goto fail;
151
152 for (i = 0; i < dpd->num_radar_types; i++) {
153 const struct radar_detector_specs *rs = &dpd->radar_spec[i];
154 struct pri_detector *de = pri_detector_init(rs);
155 if (de == NULL)
156 goto fail;
157 cd->detectors[i] = de;
158 }
159 list_add(&cd->head, &dpd->channel_detectors);
160 return cd;
161
162fail:
163 pr_err("failed to allocate channel_detector for freq=%d\n", freq);
164 channel_detector_exit(dpd, cd);
165 return NULL;
166}
167
168/**
169 * channel_detector_get() - get channel detector for given frequency
170 * @param dpd instance pointer
171 * @param freq frequency in MHz
172 * @return pointer to channel detector on success, NULL otherwise
173 *
174 * Return existing channel detector for the given frequency or return a
175 * newly create one.
176 */
177static struct channel_detector *
178channel_detector_get(struct dfs_pattern_detector *dpd, u16 freq)
179{
180 struct channel_detector *cd;
181 list_for_each_entry(cd, &dpd->channel_detectors, head) {
182 if (cd->freq == freq)
183 return cd;
184 }
185 return channel_detector_create(dpd, freq);
186}
187
188/*
189 * DFS Pattern Detector
190 */
191
192/* dpd_reset(): reset all channel detectors */
193static void dpd_reset(struct dfs_pattern_detector *dpd)
194{
195 struct channel_detector *cd;
196 if (!list_empty(&dpd->channel_detectors))
197 list_for_each_entry(cd, &dpd->channel_detectors, head)
198 channel_detector_reset(dpd, cd);
199
200}
201static void dpd_exit(struct dfs_pattern_detector *dpd)
202{
203 struct channel_detector *cd, *cd0;
204 if (!list_empty(&dpd->channel_detectors))
205 list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head)
206 channel_detector_exit(dpd, cd);
207 kfree(dpd);
208}
209
210static bool
211dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
212{
213 u32 i;
214 bool ts_wraparound;
215 struct channel_detector *cd;
216
217 if (dpd->region == NL80211_DFS_UNSET) {
218 /*
219 * pulses received for a non-supported or un-initialized
220 * domain are treated as detected radars
221 */
222 return true;
223 }
224
225 cd = channel_detector_get(dpd, event->freq);
226 if (cd == NULL)
227 return false;
228
229 ts_wraparound = (event->ts < dpd->last_pulse_ts);
230 dpd->last_pulse_ts = event->ts;
231 if (ts_wraparound) {
232 /*
233 * reset detector on time stamp wraparound
234 * with monotonic time stamps, this should never happen
235 */
236 pr_warn("DFS: time stamp wraparound detected, resetting\n");
237 dpd_reset(dpd);
238 }
239 /* do type individual pattern matching */
240 for (i = 0; i < dpd->num_radar_types; i++) {
241 if (cd->detectors[i]->add_pulse(cd->detectors[i], event) != 0) {
242 channel_detector_reset(dpd, cd);
243 return true;
244 }
245 }
246 return false;
247}
248
249static bool dpd_set_domain(struct dfs_pattern_detector *dpd,
250 enum nl80211_dfs_regions region)
251{
252 const struct radar_types *rt;
253 struct channel_detector *cd, *cd0;
254
255 if (dpd->region == region)
256 return true;
257
258 dpd->region = NL80211_DFS_UNSET;
259
260 rt = get_dfs_domain_radar_types(region);
261 if (rt == NULL)
262 return false;
263
264 /* delete all channel detectors for previous DFS domain */
265 if (!list_empty(&dpd->channel_detectors))
266 list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head)
267 channel_detector_exit(dpd, cd);
268 dpd->radar_spec = rt->radar_types;
269 dpd->num_radar_types = rt->num_radar_types;
270
271 dpd->region = region;
272 return true;
273}
274
275static struct dfs_pattern_detector default_dpd = {
276 .exit = dpd_exit,
277 .set_domain = dpd_set_domain,
278 .add_pulse = dpd_add_pulse,
279 .region = NL80211_DFS_UNSET,
280};
281
282struct dfs_pattern_detector *
283dfs_pattern_detector_init(enum nl80211_dfs_regions region)
284{
285 struct dfs_pattern_detector *dpd;
286 dpd = kmalloc(sizeof(*dpd), GFP_KERNEL);
287 if (dpd == NULL) {
288 pr_err("allocation of dfs_pattern_detector failed\n");
289 return NULL;
290 }
291 *dpd = default_dpd;
292 INIT_LIST_HEAD(&dpd->channel_detectors);
293
294 if (dpd->set_domain(dpd, region))
295 return dpd;
296
297 pr_err("Could not set DFS domain to %d. ", region);
298 return NULL;
299}
300EXPORT_SYMBOL(dfs_pattern_detector_init);
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
new file mode 100644
index 000000000000..fd0328a30995
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
@@ -0,0 +1,104 @@
1/*
2 * Copyright (c) 2012 Neratec Solutions AG
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef DFS_PATTERN_DETECTOR_H
18#define DFS_PATTERN_DETECTOR_H
19
20#include <linux/types.h>
21#include <linux/list.h>
22#include <linux/nl80211.h>
23
24/**
25 * struct pulse_event - describing pulses reported by PHY
26 * @ts: pulse time stamp in us
27 * @freq: channel frequency in MHz
28 * @width: pulse duration in us
29 * @rssi: rssi of radar event
30 */
31struct pulse_event {
32 u64 ts;
33 u16 freq;
34 u8 width;
35 u8 rssi;
36};
37
38/**
39 * struct radar_detector_specs - detector specs for a radar pattern type
40 * @type_id: pattern type, as defined by regulatory
41 * @width_min: minimum radar pulse width in [us]
42 * @width_max: maximum radar pulse width in [us]
43 * @pri_min: minimum pulse repetition interval in [us] (including tolerance)
44 * @pri_max: minimum pri in [us] (including tolerance)
45 * @num_pri: maximum number of different pri for this type
46 * @ppb: pulses per bursts for this type
47 * @ppb_thresh: number of pulses required to trigger detection
48 * @max_pri_tolerance: pulse time stamp tolerance on both sides [us]
49 */
50struct radar_detector_specs {
51 u8 type_id;
52 u8 width_min;
53 u8 width_max;
54 u16 pri_min;
55 u16 pri_max;
56 u8 num_pri;
57 u8 ppb;
58 u8 ppb_thresh;
59 u8 max_pri_tolerance;
60};
61
62/**
63 * struct dfs_pattern_detector - DFS pattern detector
64 * @exit(): destructor
65 * @set_domain(): set DFS domain, resets detector lines upon domain changes
66 * @add_pulse(): add radar pulse to detector, returns true on detection
67 * @region: active DFS region, NL80211_DFS_UNSET until set
68 * @num_radar_types: number of different radar types
69 * @last_pulse_ts: time stamp of last valid pulse in usecs
70 * @radar_detector_specs: array of radar detection specs
71 * @channel_detectors: list connecting channel_detector elements
72 */
73struct dfs_pattern_detector {
74 void (*exit)(struct dfs_pattern_detector *dpd);
75 bool (*set_domain)(struct dfs_pattern_detector *dpd,
76 enum nl80211_dfs_regions region);
77 bool (*add_pulse)(struct dfs_pattern_detector *dpd,
78 struct pulse_event *pe);
79
80 enum nl80211_dfs_regions region;
81 u8 num_radar_types;
82 u64 last_pulse_ts;
83
84 const struct radar_detector_specs *radar_spec;
85 struct list_head channel_detectors;
86};
87
88/**
89 * dfs_pattern_detector_init() - constructor for pattern detector class
90 * @param region: DFS domain to be used, can be NL80211_DFS_UNSET at creation
91 * @return instance pointer on success, NULL otherwise
92 */
93#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
94extern struct dfs_pattern_detector *
95dfs_pattern_detector_init(enum nl80211_dfs_regions region);
96#else
97static inline struct dfs_pattern_detector *
98dfs_pattern_detector_init(enum nl80211_dfs_regions region)
99{
100 return NULL;
101}
102#endif /* CONFIG_ATH9K_DFS_CERTIFIED */
103
104#endif /* DFS_PATTERN_DETECTOR_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
new file mode 100644
index 000000000000..91b8dceeadb1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
@@ -0,0 +1,452 @@
1/*
2 * Copyright (c) 2012 Neratec Solutions AG
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19
20#include "ath9k.h"
21#include "dfs_pattern_detector.h"
22#include "dfs_pri_detector.h"
23#include "dfs_debug.h"
24
25/**
26 * struct pri_sequence - sequence of pulses matching one PRI
27 * @head: list_head
28 * @pri: pulse repetition interval (PRI) in usecs
29 * @dur: duration of sequence in usecs
30 * @count: number of pulses in this sequence
31 * @count_falses: number of not matching pulses in this sequence
32 * @first_ts: time stamp of first pulse in usecs
33 * @last_ts: time stamp of last pulse in usecs
34 * @deadline_ts: deadline when this sequence becomes invalid (first_ts + dur)
35 */
36struct pri_sequence {
37 struct list_head head;
38 u32 pri;
39 u32 dur;
40 u32 count;
41 u32 count_falses;
42 u64 first_ts;
43 u64 last_ts;
44 u64 deadline_ts;
45};
46
47/**
48 * struct pulse_elem - elements in pulse queue
49 * @ts: time stamp in usecs
50 */
51struct pulse_elem {
52 struct list_head head;
53 u64 ts;
54};
55
56/**
57 * pde_get_multiple() - get number of multiples considering a given tolerance
58 * @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
59 */
60static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance)
61{
62 u32 remainder;
63 u32 factor;
64 u32 delta;
65
66 if (fraction == 0)
67 return 0;
68
69 delta = (val < fraction) ? (fraction - val) : (val - fraction);
70
71 if (delta <= tolerance)
72 /* val and fraction are within tolerance */
73 return 1;
74
75 factor = val / fraction;
76 remainder = val % fraction;
77 if (remainder > tolerance) {
78 /* no exact match */
79 if ((fraction - remainder) <= tolerance)
80 /* remainder is within tolerance */
81 factor++;
82 else
83 factor = 0;
84 }
85 return factor;
86}
87
88/**
89 * DOC: Singleton Pulse and Sequence Pools
90 *
91 * Instances of pri_sequence and pulse_elem are kept in singleton pools to
92 * reduce the number of dynamic allocations. They are shared between all
93 * instances and grow up to the peak number of simultaneously used objects.
94 *
95 * Memory is freed after all references to the pools are released.
96 */
97static u32 singleton_pool_references;
98static LIST_HEAD(pulse_pool);
99static LIST_HEAD(pseq_pool);
100static DEFINE_SPINLOCK(pool_lock);
101
102static void pool_register_ref(void)
103{
104 spin_lock_bh(&pool_lock);
105 singleton_pool_references++;
106 DFS_POOL_STAT_INC(pool_reference);
107 spin_unlock_bh(&pool_lock);
108}
109
110static void pool_deregister_ref(void)
111{
112 spin_lock_bh(&pool_lock);
113 singleton_pool_references--;
114 DFS_POOL_STAT_DEC(pool_reference);
115 if (singleton_pool_references == 0) {
116 /* free singleton pools with no references left */
117 struct pri_sequence *ps, *ps0;
118 struct pulse_elem *p, *p0;
119
120 list_for_each_entry_safe(p, p0, &pulse_pool, head) {
121 list_del(&p->head);
122 DFS_POOL_STAT_DEC(pulse_allocated);
123 kfree(p);
124 }
125 list_for_each_entry_safe(ps, ps0, &pseq_pool, head) {
126 list_del(&ps->head);
127 DFS_POOL_STAT_DEC(pseq_allocated);
128 kfree(ps);
129 }
130 }
131 spin_unlock_bh(&pool_lock);
132}
133
134static void pool_put_pulse_elem(struct pulse_elem *pe)
135{
136 spin_lock_bh(&pool_lock);
137 list_add(&pe->head, &pulse_pool);
138 DFS_POOL_STAT_DEC(pulse_used);
139 spin_unlock_bh(&pool_lock);
140}
141
142static void pool_put_pseq_elem(struct pri_sequence *pse)
143{
144 spin_lock_bh(&pool_lock);
145 list_add(&pse->head, &pseq_pool);
146 DFS_POOL_STAT_DEC(pseq_used);
147 spin_unlock_bh(&pool_lock);
148}
149
150static struct pri_sequence *pool_get_pseq_elem(void)
151{
152 struct pri_sequence *pse = NULL;
153 spin_lock_bh(&pool_lock);
154 if (!list_empty(&pseq_pool)) {
155 pse = list_first_entry(&pseq_pool, struct pri_sequence, head);
156 list_del(&pse->head);
157 DFS_POOL_STAT_INC(pseq_used);
158 }
159 spin_unlock_bh(&pool_lock);
160 return pse;
161}
162
163static struct pulse_elem *pool_get_pulse_elem(void)
164{
165 struct pulse_elem *pe = NULL;
166 spin_lock_bh(&pool_lock);
167 if (!list_empty(&pulse_pool)) {
168 pe = list_first_entry(&pulse_pool, struct pulse_elem, head);
169 list_del(&pe->head);
170 DFS_POOL_STAT_INC(pulse_used);
171 }
172 spin_unlock_bh(&pool_lock);
173 return pe;
174}
175
176static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde)
177{
178 struct list_head *l = &pde->pulses;
179 if (list_empty(l))
180 return NULL;
181 return list_entry(l->prev, struct pulse_elem, head);
182}
183
184static bool pulse_queue_dequeue(struct pri_detector *pde)
185{
186 struct pulse_elem *p = pulse_queue_get_tail(pde);
187 if (p != NULL) {
188 list_del_init(&p->head);
189 pde->count--;
190 /* give it back to pool */
191 pool_put_pulse_elem(p);
192 }
193 return (pde->count > 0);
194}
195
196/* remove pulses older than window */
197static void pulse_queue_check_window(struct pri_detector *pde)
198{
199 u64 min_valid_ts;
200 struct pulse_elem *p;
201
202 /* there is no delta time with less than 2 pulses */
203 if (pde->count < 2)
204 return;
205
206 if (pde->last_ts <= pde->window_size)
207 return;
208
209 min_valid_ts = pde->last_ts - pde->window_size;
210 while ((p = pulse_queue_get_tail(pde)) != NULL) {
211 if (p->ts >= min_valid_ts)
212 return;
213 pulse_queue_dequeue(pde);
214 }
215}
216
217static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
218{
219 struct pulse_elem *p = pool_get_pulse_elem();
220 if (p == NULL) {
221 p = kmalloc(sizeof(*p), GFP_KERNEL);
222 if (p == NULL) {
223 DFS_POOL_STAT_INC(pulse_alloc_error);
224 return false;
225 }
226 DFS_POOL_STAT_INC(pulse_allocated);
227 DFS_POOL_STAT_INC(pulse_used);
228 }
229 INIT_LIST_HEAD(&p->head);
230 p->ts = ts;
231 list_add(&p->head, &pde->pulses);
232 pde->count++;
233 pde->last_ts = ts;
234 pulse_queue_check_window(pde);
235 if (pde->count >= pde->max_count)
236 pulse_queue_dequeue(pde);
237 return true;
238}
239
240static bool pseq_handler_create_sequences(struct pri_detector *pde,
241 u64 ts, u32 min_count)
242{
243 struct pulse_elem *p;
244 list_for_each_entry(p, &pde->pulses, head) {
245 struct pri_sequence ps, *new_ps;
246 struct pulse_elem *p2;
247 u32 tmp_false_count;
248 u64 min_valid_ts;
249 u32 delta_ts = ts - p->ts;
250
251 if (delta_ts < pde->rs->pri_min)
252 /* ignore too small pri */
253 continue;
254
255 if (delta_ts > pde->rs->pri_max)
256 /* stop on too large pri (sorted list) */
257 break;
258
259 /* build a new sequence with new potential pri */
260 ps.count = 2;
261 ps.count_falses = 0;
262 ps.first_ts = p->ts;
263 ps.last_ts = ts;
264 ps.pri = ts - p->ts;
265 ps.dur = ps.pri * (pde->rs->ppb - 1)
266 + 2 * pde->rs->max_pri_tolerance;
267
268 p2 = p;
269 tmp_false_count = 0;
270 min_valid_ts = ts - ps.dur;
271 /* check which past pulses are candidates for new sequence */
272 list_for_each_entry_continue(p2, &pde->pulses, head) {
273 u32 factor;
274 if (p2->ts < min_valid_ts)
275 /* stop on crossing window border */
276 break;
277 /* check if pulse match (multi)PRI */
278 factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri,
279 pde->rs->max_pri_tolerance);
280 if (factor > 0) {
281 ps.count++;
282 ps.first_ts = p2->ts;
283 /*
284 * on match, add the intermediate falses
285 * and reset counter
286 */
287 ps.count_falses += tmp_false_count;
288 tmp_false_count = 0;
289 } else {
290 /* this is a potential false one */
291 tmp_false_count++;
292 }
293 }
294 if (ps.count < min_count)
295 /* did not reach minimum count, drop sequence */
296 continue;
297
298 /* this is a valid one, add it */
299 ps.deadline_ts = ps.first_ts + ps.dur;
300 new_ps = pool_get_pseq_elem();
301 if (new_ps == NULL) {
302 new_ps = kmalloc(sizeof(*new_ps), GFP_KERNEL);
303 if (new_ps == NULL) {
304 DFS_POOL_STAT_INC(pseq_alloc_error);
305 return false;
306 }
307 DFS_POOL_STAT_INC(pseq_allocated);
308 DFS_POOL_STAT_INC(pseq_used);
309 }
310 memcpy(new_ps, &ps, sizeof(ps));
311 INIT_LIST_HEAD(&new_ps->head);
312 list_add(&new_ps->head, &pde->sequences);
313 }
314 return true;
315}
316
317/* check new ts and add to all matching existing sequences */
318static u32
319pseq_handler_add_to_existing_seqs(struct pri_detector *pde, u64 ts)
320{
321 u32 max_count = 0;
322 struct pri_sequence *ps, *ps2;
323 list_for_each_entry_safe(ps, ps2, &pde->sequences, head) {
324 u32 delta_ts;
325 u32 factor;
326
327 /* first ensure that sequence is within window */
328 if (ts > ps->deadline_ts) {
329 list_del_init(&ps->head);
330 pool_put_pseq_elem(ps);
331 continue;
332 }
333
334 delta_ts = ts - ps->last_ts;
335 factor = pde_get_multiple(delta_ts, ps->pri,
336 pde->rs->max_pri_tolerance);
337 if (factor > 0) {
338 ps->last_ts = ts;
339 ps->count++;
340
341 if (max_count < ps->count)
342 max_count = ps->count;
343 } else {
344 ps->count_falses++;
345 }
346 }
347 return max_count;
348}
349
350static struct pri_sequence *
351pseq_handler_check_detection(struct pri_detector *pde)
352{
353 struct pri_sequence *ps;
354
355 if (list_empty(&pde->sequences))
356 return NULL;
357
358 list_for_each_entry(ps, &pde->sequences, head) {
359 /*
360 * we assume to have enough matching confidence if we
361 * 1) have enough pulses
362 * 2) have more matching than false pulses
363 */
364 if ((ps->count >= pde->rs->ppb_thresh) &&
365 (ps->count * pde->rs->num_pri >= ps->count_falses))
366 return ps;
367 }
368 return NULL;
369}
370
371
372/* free pulse queue and sequences list and give objects back to pools */
373static void pri_detector_reset(struct pri_detector *pde, u64 ts)
374{
375 struct pri_sequence *ps, *ps0;
376 struct pulse_elem *p, *p0;
377 list_for_each_entry_safe(ps, ps0, &pde->sequences, head) {
378 list_del_init(&ps->head);
379 pool_put_pseq_elem(ps);
380 }
381 list_for_each_entry_safe(p, p0, &pde->pulses, head) {
382 list_del_init(&p->head);
383 pool_put_pulse_elem(p);
384 }
385 pde->count = 0;
386 pde->last_ts = ts;
387}
388
389static void pri_detector_exit(struct pri_detector *de)
390{
391 pri_detector_reset(de, 0);
392 pool_deregister_ref();
393 kfree(de);
394}
395
396static bool pri_detector_add_pulse(struct pri_detector *de,
397 struct pulse_event *event)
398{
399 u32 max_updated_seq;
400 struct pri_sequence *ps;
401 u64 ts = event->ts;
402 const struct radar_detector_specs *rs = de->rs;
403
404 /* ignore pulses not within width range */
405 if ((rs->width_min > event->width) || (rs->width_max < event->width))
406 return false;
407
408 if ((ts - de->last_ts) < rs->max_pri_tolerance)
409 /* if delta to last pulse is too short, don't use this pulse */
410 return false;
411 de->last_ts = ts;
412
413 max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
414
415 if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
416 pr_err("failed to create pulse sequences\n");
417 pri_detector_reset(de, ts);
418 return false;
419 }
420
421 ps = pseq_handler_check_detection(de);
422
423 if (ps != NULL) {
424 pr_info("DFS: radar found: pri=%d, count=%d, count_false=%d\n",
425 ps->pri, ps->count, ps->count_falses);
426 pri_detector_reset(de, ts);
427 return true;
428 }
429 pulse_queue_enqueue(de, ts);
430 return false;
431}
432
433struct pri_detector *
434pri_detector_init(const struct radar_detector_specs *rs)
435{
436 struct pri_detector *de;
437 de = kzalloc(sizeof(*de), GFP_KERNEL);
438 if (de == NULL)
439 return NULL;
440 de->exit = pri_detector_exit;
441 de->add_pulse = pri_detector_add_pulse;
442 de->reset = pri_detector_reset;
443
444 INIT_LIST_HEAD(&de->sequences);
445 INIT_LIST_HEAD(&de->pulses);
446 de->window_size = rs->pri_max * rs->ppb * rs->num_pri;
447 de->max_count = rs->ppb * 2;
448 de->rs = rs;
449
450 pool_register_ref();
451 return de;
452}
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h
new file mode 100644
index 000000000000..81cde9f28e44
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h
@@ -0,0 +1,52 @@
1/*
2 * Copyright (c) 2012 Neratec Solutions AG
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef DFS_PRI_DETECTOR_H
18#define DFS_PRI_DETECTOR_H
19
20#include <linux/list.h>
21
22/**
23 * struct pri_detector - PRI detector element for a dedicated radar type
24 * @exit(): destructor
25 * @add_pulse(): add pulse event, returns true if pattern was detected
26 * @reset(): clear states and reset to given time stamp
27 * @rs: detector specs for this detector element
28 * @last_ts: last pulse time stamp considered for this element in usecs
29 * @sequences: list_head holding potential pulse sequences
30 * @pulses: list connecting pulse_elem objects
31 * @count: number of pulses in queue
32 * @max_count: maximum number of pulses to be queued
33 * @window_size: window size back from newest pulse time stamp in usecs
34 */
35struct pri_detector {
36 void (*exit) (struct pri_detector *de);
37 bool (*add_pulse)(struct pri_detector *de, struct pulse_event *e);
38 void (*reset) (struct pri_detector *de, u64 ts);
39
40/* private: internal use only */
41 const struct radar_detector_specs *rs;
42 u64 last_ts;
43 struct list_head sequences;
44 struct list_head pulses;
45 u32 count;
46 u32 max_count;
47 u32 window_size;
48};
49
50struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs);
51
52#endif /* DFS_PRI_DETECTOR_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index c43523233319..0512397a293c 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -16,14 +16,6 @@
16 16
17#include "hw.h" 17#include "hw.h"
18 18
19static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
20{
21 if (fbin == AR5416_BCHAN_UNUSED)
22 return fbin;
23
24 return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
25}
26
27void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val) 19void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val)
28{ 20{
29 REG_WRITE(ah, reg, val); 21 REG_WRITE(ah, reg, val);
@@ -290,6 +282,34 @@ u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower,
290 return twiceMaxEdgePower; 282 return twiceMaxEdgePower;
291} 283}
292 284
285u16 ath9k_hw_get_scaled_power(struct ath_hw *ah, u16 power_limit,
286 u8 antenna_reduction)
287{
288 u16 reduction = antenna_reduction;
289
290 /*
291 * Reduce scaled Power by number of chains active
292 * to get the per chain tx power level.
293 */
294 switch (ar5416_get_ntxchains(ah->txchainmask)) {
295 case 1:
296 break;
297 case 2:
298 reduction += POWER_CORRECTION_FOR_TWO_CHAIN;
299 break;
300 case 3:
301 reduction += POWER_CORRECTION_FOR_THREE_CHAIN;
302 break;
303 }
304
305 if (power_limit > reduction)
306 power_limit -= reduction;
307 else
308 power_limit = 0;
309
310 return power_limit;
311}
312
293void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah) 313void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah)
294{ 314{
295 struct ath_common *common = ath9k_hw_common(ah); 315 struct ath_common *common = ath9k_hw_common(ah);
@@ -299,10 +319,10 @@ void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah)
299 case 1: 319 case 1:
300 break; 320 break;
301 case 2: 321 case 2:
302 regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN; 322 regulatory->max_power_level += POWER_CORRECTION_FOR_TWO_CHAIN;
303 break; 323 break;
304 case 3: 324 case 3:
305 regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN; 325 regulatory->max_power_level += POWER_CORRECTION_FOR_THREE_CHAIN;
306 break; 326 break;
307 default: 327 default:
308 ath_dbg(common, EEPROM, "Invalid chainmask configuration\n"); 328 ath_dbg(common, EEPROM, "Invalid chainmask configuration\n");
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 5ff7ab965120..33acb920ed3f 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -79,8 +79,8 @@
79#define SUB_NUM_CTL_MODES_AT_5G_40 2 79#define SUB_NUM_CTL_MODES_AT_5G_40 2
80#define SUB_NUM_CTL_MODES_AT_2G_40 3 80#define SUB_NUM_CTL_MODES_AT_2G_40 3
81 81
82#define INCREASE_MAXPOW_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */ 82#define POWER_CORRECTION_FOR_TWO_CHAIN 6 /* 10*log10(2)*2 */
83#define INCREASE_MAXPOW_BY_THREE_CHAIN 10 /* 10*log10(3)*2 */ 83#define POWER_CORRECTION_FOR_THREE_CHAIN 10 /* 10*log10(3)*2 */
84 84
85/* 85/*
86 * For AR9285 and later chipsets, the following bits are not being programmed 86 * For AR9285 and later chipsets, the following bits are not being programmed
@@ -686,6 +686,8 @@ void ath9k_hw_get_target_powers(struct ath_hw *ah,
686 u16 numRates, bool isHt40Target); 686 u16 numRates, bool isHt40Target);
687u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower, 687u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower,
688 bool is2GHz, int num_band_edges); 688 bool is2GHz, int num_band_edges);
689u16 ath9k_hw_get_scaled_power(struct ath_hw *ah, u16 power_limit,
690 u8 antenna_reduction);
689void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah); 691void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah);
690int ath9k_hw_eeprom_init(struct ath_hw *ah); 692int ath9k_hw_eeprom_init(struct ath_hw *ah);
691 693
@@ -697,6 +699,14 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
697 u16 *pPdGainBoundaries, u8 *pPDADCValues, 699 u16 *pPdGainBoundaries, u8 *pPDADCValues,
698 u16 numXpdGains); 700 u16 numXpdGains);
699 701
702static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
703{
704 if (fbin == AR5416_BCHAN_UNUSED)
705 return fbin;
706
707 return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
708}
709
700#define ar5416_get_ntxchains(_txchainmask) \ 710#define ar5416_get_ntxchains(_txchainmask) \
701 (((_txchainmask >> 2) & 1) + \ 711 (((_txchainmask >> 2) & 1) + \
702 ((_txchainmask >> 1) & 1) + (_txchainmask & 1)) 712 ((_txchainmask >> 1) & 1) + (_txchainmask & 1))
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index f272236d8053..aa614767adff 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -564,9 +564,6 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
564 (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \ 564 (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \
565 ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL)) 565 ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))
566 566
567#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6
568#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10
569
570 u16 twiceMaxEdgePower; 567 u16 twiceMaxEdgePower;
571 int i; 568 int i;
572 struct cal_ctl_data_ar9287 *rep; 569 struct cal_ctl_data_ar9287 *rep;
@@ -591,29 +588,8 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
591 tx_chainmask = ah->txchainmask; 588 tx_chainmask = ah->txchainmask;
592 589
593 ath9k_hw_get_channel_centers(ah, chan, &centers); 590 ath9k_hw_get_channel_centers(ah, chan, &centers);
594 scaledPower = powerLimit - antenna_reduction; 591 scaledPower = ath9k_hw_get_scaled_power(ah, powerLimit,
595 592 antenna_reduction);
596 /*
597 * Reduce scaled Power by number of chains active
598 * to get the per chain tx power level.
599 */
600 switch (ar5416_get_ntxchains(tx_chainmask)) {
601 case 1:
602 break;
603 case 2:
604 if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
605 scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
606 else
607 scaledPower = 0;
608 break;
609 case 3:
610 if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
611 scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
612 else
613 scaledPower = 0;
614 break;
615 }
616 scaledPower = max((u16)0, scaledPower);
617 593
618 /* 594 /*
619 * Get TX power from EEPROM. 595 * Get TX power from EEPROM.
@@ -786,8 +762,6 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
786 762
787#undef CMP_CTL 763#undef CMP_CTL
788#undef CMP_NO_CTL 764#undef CMP_NO_CTL
789#undef REDUCE_SCALED_POWER_BY_TWO_CHAIN
790#undef REDUCE_SCALED_POWER_BY_THREE_CHAIN
791} 765}
792 766
793static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah, 767static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
@@ -824,6 +798,8 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
824 regulatory->max_power_level = ratesArray[i]; 798 regulatory->max_power_level = ratesArray[i];
825 } 799 }
826 800
801 ath9k_hw_update_regulatory_maxpower(ah);
802
827 if (test) 803 if (test)
828 return; 804 return;
829 805
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 619b95d764ff..b5fba8b18b8b 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -991,9 +991,6 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
991 u16 antenna_reduction, 991 u16 antenna_reduction,
992 u16 powerLimit) 992 u16 powerLimit)
993{ 993{
994#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */
995#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */
996
997 struct ar5416_eeprom_def *pEepData = &ah->eeprom.def; 994 struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
998 u16 twiceMaxEdgePower; 995 u16 twiceMaxEdgePower;
999 int i; 996 int i;
@@ -1027,24 +1024,8 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
1027 1024
1028 ath9k_hw_get_channel_centers(ah, chan, &centers); 1025 ath9k_hw_get_channel_centers(ah, chan, &centers);
1029 1026
1030 scaledPower = powerLimit - antenna_reduction; 1027 scaledPower = ath9k_hw_get_scaled_power(ah, powerLimit,
1031 1028 antenna_reduction);
1032 switch (ar5416_get_ntxchains(tx_chainmask)) {
1033 case 1:
1034 break;
1035 case 2:
1036 if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
1037 scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
1038 else
1039 scaledPower = 0;
1040 break;
1041 case 3:
1042 if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
1043 scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
1044 else
1045 scaledPower = 0;
1046 break;
1047 }
1048 1029
1049 if (IS_CHAN_2GHZ(chan)) { 1030 if (IS_CHAN_2GHZ(chan)) {
1050 numCtlModes = ARRAY_SIZE(ctlModesFor11g) - 1031 numCtlModes = ARRAY_SIZE(ctlModesFor11g) -
@@ -1263,20 +1244,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1263 regulatory->max_power_level = ratesArray[i]; 1244 regulatory->max_power_level = ratesArray[i];
1264 } 1245 }
1265 1246
1266 switch(ar5416_get_ntxchains(ah->txchainmask)) { 1247 ath9k_hw_update_regulatory_maxpower(ah);
1267 case 1:
1268 break;
1269 case 2:
1270 regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN;
1271 break;
1272 case 3:
1273 regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
1274 break;
1275 default:
1276 ath_dbg(ath9k_hw_common(ah), EEPROM,
1277 "Invalid chainmask configuration\n");
1278 break;
1279 }
1280 1248
1281 if (test) 1249 if (test)
1282 return; 1250 return;
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index fbe23de1297f..281a9af0f1b6 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -41,6 +41,9 @@ void ath_init_leds(struct ath_softc *sc)
41{ 41{
42 int ret; 42 int ret;
43 43
44 if (AR_SREV_9100(sc->sc_ah))
45 return;
46
44 if (sc->sc_ah->led_pin < 0) { 47 if (sc->sc_ah->led_pin < 0) {
45 if (AR_SREV_9287(sc->sc_ah)) 48 if (AR_SREV_9287(sc->sc_ah))
46 sc->sc_ah->led_pin = ATH_LED_PIN_9287; 49 sc->sc_ah->led_pin = ATH_LED_PIN_9287;
@@ -362,7 +365,7 @@ void ath9k_stop_btcoex(struct ath_softc *sc)
362 ath9k_hw_btcoex_disable(ah); 365 ath9k_hw_btcoex_disable(ah);
363 if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) 366 if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
364 ath9k_btcoex_timer_pause(sc); 367 ath9k_btcoex_timer_pause(sc);
365 if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_MCI) 368 if (AR_SREV_9462(ah))
366 ath_mci_flush_profile(&sc->btcoex.mci); 369 ath_mci_flush_profile(&sc->btcoex.mci);
367 } 370 }
368} 371}
@@ -373,7 +376,7 @@ void ath9k_deinit_btcoex(struct ath_softc *sc)
373 ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE) 376 ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE)
374 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer); 377 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
375 378
376 if (ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_MCI) 379 if (AR_SREV_9462(sc->sc_ah))
377 ath_mci_cleanup(sc); 380 ath_mci_cleanup(sc);
378} 381}
379 382
@@ -399,17 +402,16 @@ int ath9k_init_btcoex(struct ath_softc *sc)
399 txq = sc->tx.txq_map[WME_AC_BE]; 402 txq = sc->tx.txq_map[WME_AC_BE];
400 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum); 403 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
401 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 404 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
402 break; 405 if (AR_SREV_9462(ah)) {
403 case ATH_BTCOEX_CFG_MCI: 406 sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
404 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 407 INIT_LIST_HEAD(&sc->btcoex.mci.info);
405 sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
406 INIT_LIST_HEAD(&sc->btcoex.mci.info);
407 408
408 r = ath_mci_setup(sc); 409 r = ath_mci_setup(sc);
409 if (r) 410 if (r)
410 return r; 411 return r;
411 412
412 ath9k_hw_btcoex_init_mci(ah); 413 ath9k_hw_btcoex_init_mci(ah);
414 }
413 415
414 break; 416 break;
415 default: 417 default:
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 424aabb2c730..f67cd952e741 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -53,6 +53,8 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
53 .driver_info = AR9280_USB }, /* SMC Networks */ 53 .driver_info = AR9280_USB }, /* SMC Networks */
54 { USB_DEVICE(0x0411, 0x017f), 54 { USB_DEVICE(0x0411, 0x017f),
55 .driver_info = AR9280_USB }, /* Sony UWA-BR100 */ 55 .driver_info = AR9280_USB }, /* Sony UWA-BR100 */
56 { USB_DEVICE(0x04da, 0x3904),
57 .driver_info = AR9280_USB },
56 58
57 { USB_DEVICE(0x0cf3, 0x20ff), 59 { USB_DEVICE(0x0cf3, 0x20ff),
58 .driver_info = STORAGE_DEVICE }, 60 .driver_info = STORAGE_DEVICE },
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index de5ee15ee639..25213d521bc2 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -14,6 +14,8 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include "htc.h" 19#include "htc.h"
18 20
19MODULE_AUTHOR("Atheros Communications"); 21MODULE_AUTHOR("Atheros Communications");
@@ -711,7 +713,8 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
711 713
712 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 714 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
713 715
714 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 716 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN |
717 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
715 718
716 hw->queues = 4; 719 hw->queues = 4;
717 hw->channel_change_time = 5000; 720 hw->channel_change_time = 5000;
@@ -966,9 +969,7 @@ int ath9k_htc_resume(struct htc_target *htc_handle)
966static int __init ath9k_htc_init(void) 969static int __init ath9k_htc_init(void)
967{ 970{
968 if (ath9k_hif_usb_init() < 0) { 971 if (ath9k_hif_usb_init() < 0) {
969 printk(KERN_ERR 972 pr_err("No USB devices found, driver not installed\n");
970 "ath9k_htc: No USB devices found,"
971 " driver not installed.\n");
972 return -ENODEV; 973 return -ENODEV;
973 } 974 }
974 975
@@ -979,6 +980,6 @@ module_init(ath9k_htc_init);
979static void __exit ath9k_htc_exit(void) 980static void __exit ath9k_htc_exit(void)
980{ 981{
981 ath9k_hif_usb_exit(); 982 ath9k_hif_usb_exit();
982 printk(KERN_INFO "ath9k_htc: Driver unloaded\n"); 983 pr_info("Driver unloaded\n");
983} 984}
984module_exit(ath9k_htc_exit); 985module_exit(ath9k_htc_exit);
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index c25226a32ddc..4a9570dfba72 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -14,6 +14,8 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include "htc.h" 19#include "htc.h"
18 20
19static int htc_issue_send(struct htc_target *target, struct sk_buff* skb, 21static int htc_issue_send(struct htc_target *target, struct sk_buff* skb,
@@ -461,7 +463,7 @@ int ath9k_htc_hw_init(struct htc_target *target,
461 char *product, u32 drv_info) 463 char *product, u32 drv_info)
462{ 464{
463 if (ath9k_htc_probe_device(target, dev, devid, product, drv_info)) { 465 if (ath9k_htc_probe_device(target, dev, devid, product, drv_info)) {
464 printk(KERN_ERR "Failed to initialize the device\n"); 466 pr_err("Failed to initialize the device\n");
465 return -ENODEV; 467 return -ENODEV;
466 } 468 }
467 469
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 6c69e4e8b1cb..f84477c5ebb1 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -24,6 +24,8 @@
24#include "rc.h" 24#include "rc.h"
25#include "ar9003_mac.h" 25#include "ar9003_mac.h"
26#include "ar9003_mci.h" 26#include "ar9003_mci.h"
27#include "debug.h"
28#include "ath9k.h"
27 29
28static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); 30static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
29 31
@@ -83,6 +85,53 @@ static void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah)
83/* Helper Functions */ 85/* Helper Functions */
84/********************/ 86/********************/
85 87
88#ifdef CONFIG_ATH9K_DEBUGFS
89
90void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause)
91{
92 struct ath_softc *sc = common->priv;
93 if (sync_cause)
94 sc->debug.stats.istats.sync_cause_all++;
95 if (sync_cause & AR_INTR_SYNC_RTC_IRQ)
96 sc->debug.stats.istats.sync_rtc_irq++;
97 if (sync_cause & AR_INTR_SYNC_MAC_IRQ)
98 sc->debug.stats.istats.sync_mac_irq++;
99 if (sync_cause & AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS)
100 sc->debug.stats.istats.eeprom_illegal_access++;
101 if (sync_cause & AR_INTR_SYNC_APB_TIMEOUT)
102 sc->debug.stats.istats.apb_timeout++;
103 if (sync_cause & AR_INTR_SYNC_PCI_MODE_CONFLICT)
104 sc->debug.stats.istats.pci_mode_conflict++;
105 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL)
106 sc->debug.stats.istats.host1_fatal++;
107 if (sync_cause & AR_INTR_SYNC_HOST1_PERR)
108 sc->debug.stats.istats.host1_perr++;
109 if (sync_cause & AR_INTR_SYNC_TRCV_FIFO_PERR)
110 sc->debug.stats.istats.trcv_fifo_perr++;
111 if (sync_cause & AR_INTR_SYNC_RADM_CPL_EP)
112 sc->debug.stats.istats.radm_cpl_ep++;
113 if (sync_cause & AR_INTR_SYNC_RADM_CPL_DLLP_ABORT)
114 sc->debug.stats.istats.radm_cpl_dllp_abort++;
115 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TLP_ABORT)
116 sc->debug.stats.istats.radm_cpl_tlp_abort++;
117 if (sync_cause & AR_INTR_SYNC_RADM_CPL_ECRC_ERR)
118 sc->debug.stats.istats.radm_cpl_ecrc_err++;
119 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT)
120 sc->debug.stats.istats.radm_cpl_timeout++;
121 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
122 sc->debug.stats.istats.local_timeout++;
123 if (sync_cause & AR_INTR_SYNC_PM_ACCESS)
124 sc->debug.stats.istats.pm_access++;
125 if (sync_cause & AR_INTR_SYNC_MAC_AWAKE)
126 sc->debug.stats.istats.mac_awake++;
127 if (sync_cause & AR_INTR_SYNC_MAC_ASLEEP)
128 sc->debug.stats.istats.mac_asleep++;
129 if (sync_cause & AR_INTR_SYNC_MAC_SLEEP_ACCESS)
130 sc->debug.stats.istats.mac_sleep_access++;
131}
132#endif
133
134
86static void ath9k_hw_set_clockrate(struct ath_hw *ah) 135static void ath9k_hw_set_clockrate(struct ath_hw *ah)
87{ 136{
88 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; 137 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
@@ -142,6 +191,22 @@ bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
142} 191}
143EXPORT_SYMBOL(ath9k_hw_wait); 192EXPORT_SYMBOL(ath9k_hw_wait);
144 193
194void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
195 int hw_delay)
196{
197 if (IS_CHAN_B(chan))
198 hw_delay = (4 * hw_delay) / 22;
199 else
200 hw_delay /= 10;
201
202 if (IS_CHAN_HALF_RATE(chan))
203 hw_delay *= 2;
204 else if (IS_CHAN_QUARTER_RATE(chan))
205 hw_delay *= 4;
206
207 udelay(hw_delay + BASE_ACTIVATE_DELAY);
208}
209
145void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array, 210void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array,
146 int column, unsigned int *writecnt) 211 int column, unsigned int *writecnt)
147{ 212{
@@ -388,8 +453,8 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
388{ 453{
389 int i; 454 int i;
390 455
391 ah->config.dma_beacon_response_time = 2; 456 ah->config.dma_beacon_response_time = 1;
392 ah->config.sw_beacon_response_time = 10; 457 ah->config.sw_beacon_response_time = 6;
393 ah->config.additional_swba_backoff = 0; 458 ah->config.additional_swba_backoff = 0;
394 ah->config.ack_6mb = 0x0; 459 ah->config.ack_6mb = 0x0;
395 ah->config.cwm_ignore_extcca = 0; 460 ah->config.cwm_ignore_extcca = 0;
@@ -445,7 +510,6 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
445 AR_STA_ID1_MCAST_KSRCH; 510 AR_STA_ID1_MCAST_KSRCH;
446 if (AR_SREV_9100(ah)) 511 if (AR_SREV_9100(ah))
447 ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX; 512 ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX;
448 ah->enable_32kHz_clock = DONT_USE_32KHZ;
449 ah->slottime = ATH9K_SLOT_TIME_9; 513 ah->slottime = ATH9K_SLOT_TIME_9;
450 ah->globaltxtimeout = (u32) -1; 514 ah->globaltxtimeout = (u32) -1;
451 ah->power_mode = ATH9K_PM_UNDEFINED; 515 ah->power_mode = ATH9K_PM_UNDEFINED;
@@ -972,7 +1036,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
972 struct ath_common *common = ath9k_hw_common(ah); 1036 struct ath_common *common = ath9k_hw_common(ah);
973 struct ieee80211_conf *conf = &common->hw->conf; 1037 struct ieee80211_conf *conf = &common->hw->conf;
974 const struct ath9k_channel *chan = ah->curchan; 1038 const struct ath9k_channel *chan = ah->curchan;
975 int acktimeout, ctstimeout; 1039 int acktimeout, ctstimeout, ack_offset = 0;
976 int slottime; 1040 int slottime;
977 int sifstime; 1041 int sifstime;
978 int rx_lat = 0, tx_lat = 0, eifs = 0; 1042 int rx_lat = 0, tx_lat = 0, eifs = 0;
@@ -993,6 +1057,11 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
993 rx_lat = 37; 1057 rx_lat = 37;
994 tx_lat = 54; 1058 tx_lat = 54;
995 1059
1060 if (IS_CHAN_5GHZ(chan))
1061 sifstime = 16;
1062 else
1063 sifstime = 10;
1064
996 if (IS_CHAN_HALF_RATE(chan)) { 1065 if (IS_CHAN_HALF_RATE(chan)) {
997 eifs = 175; 1066 eifs = 175;
998 rx_lat *= 2; 1067 rx_lat *= 2;
@@ -1000,8 +1069,9 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
1000 if (IS_CHAN_A_FAST_CLOCK(ah, chan)) 1069 if (IS_CHAN_A_FAST_CLOCK(ah, chan))
1001 tx_lat += 11; 1070 tx_lat += 11;
1002 1071
1072 sifstime *= 2;
1073 ack_offset = 16;
1003 slottime = 13; 1074 slottime = 13;
1004 sifstime = 32;
1005 } else if (IS_CHAN_QUARTER_RATE(chan)) { 1075 } else if (IS_CHAN_QUARTER_RATE(chan)) {
1006 eifs = 340; 1076 eifs = 340;
1007 rx_lat = (rx_lat * 4) - 1; 1077 rx_lat = (rx_lat * 4) - 1;
@@ -1009,8 +1079,9 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
1009 if (IS_CHAN_A_FAST_CLOCK(ah, chan)) 1079 if (IS_CHAN_A_FAST_CLOCK(ah, chan))
1010 tx_lat += 22; 1080 tx_lat += 22;
1011 1081
1082 sifstime *= 4;
1083 ack_offset = 32;
1012 slottime = 21; 1084 slottime = 21;
1013 sifstime = 64;
1014 } else { 1085 } else {
1015 if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) { 1086 if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) {
1016 eifs = AR_D_GBL_IFS_EIFS_ASYNC_FIFO; 1087 eifs = AR_D_GBL_IFS_EIFS_ASYNC_FIFO;
@@ -1024,14 +1095,10 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
1024 tx_lat = MS(reg, AR_USEC_TX_LAT); 1095 tx_lat = MS(reg, AR_USEC_TX_LAT);
1025 1096
1026 slottime = ah->slottime; 1097 slottime = ah->slottime;
1027 if (IS_CHAN_5GHZ(chan))
1028 sifstime = 16;
1029 else
1030 sifstime = 10;
1031 } 1098 }
1032 1099
1033 /* As defined by IEEE 802.11-2007 17.3.8.6 */ 1100 /* As defined by IEEE 802.11-2007 17.3.8.6 */
1034 acktimeout = slottime + sifstime + 3 * ah->coverage_class; 1101 acktimeout = slottime + sifstime + 3 * ah->coverage_class + ack_offset;
1035 ctstimeout = acktimeout; 1102 ctstimeout = acktimeout;
1036 1103
1037 /* 1104 /*
@@ -1041,7 +1108,8 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
1041 * BA frames in some implementations, but it has been found to fix ACK 1108 * BA frames in some implementations, but it has been found to fix ACK
1042 * timeout issues in other cases as well. 1109 * timeout issues in other cases as well.
1043 */ 1110 */
1044 if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ) { 1111 if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ &&
1112 !IS_CHAN_HALF_RATE(chan) && !IS_CHAN_QUARTER_RATE(chan)) {
1045 acktimeout += 64 - sifstime - ah->slottime; 1113 acktimeout += 64 - sifstime - ah->slottime;
1046 ctstimeout += 48 - sifstime - ah->slottime; 1114 ctstimeout += 48 - sifstime - ah->slottime;
1047 } 1115 }
@@ -1454,7 +1522,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1454 return false; 1522 return false;
1455 } 1523 }
1456 ath9k_hw_set_clockrate(ah); 1524 ath9k_hw_set_clockrate(ah);
1457 ath9k_hw_apply_txpower(ah, chan); 1525 ath9k_hw_apply_txpower(ah, chan, false);
1458 ath9k_hw_rfbus_done(ah); 1526 ath9k_hw_rfbus_done(ah);
1459 1527
1460 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) 1528 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
@@ -1491,11 +1559,84 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
1491 } 1559 }
1492} 1560}
1493 1561
1562static bool ath9k_hw_check_dcs(u32 dma_dbg, u32 num_dcu_states,
1563 int *hang_state, int *hang_pos)
1564{
1565 static u32 dcu_chain_state[] = {5, 6, 9}; /* DCU chain stuck states */
1566 u32 chain_state, dcs_pos, i;
1567
1568 for (dcs_pos = 0; dcs_pos < num_dcu_states; dcs_pos++) {
1569 chain_state = (dma_dbg >> (5 * dcs_pos)) & 0x1f;
1570 for (i = 0; i < 3; i++) {
1571 if (chain_state == dcu_chain_state[i]) {
1572 *hang_state = chain_state;
1573 *hang_pos = dcs_pos;
1574 return true;
1575 }
1576 }
1577 }
1578 return false;
1579}
1580
1581#define DCU_COMPLETE_STATE 1
1582#define DCU_COMPLETE_STATE_MASK 0x3
1583#define NUM_STATUS_READS 50
1584static bool ath9k_hw_detect_mac_hang(struct ath_hw *ah)
1585{
1586 u32 chain_state, comp_state, dcs_reg = AR_DMADBG_4;
1587 u32 i, hang_pos, hang_state, num_state = 6;
1588
1589 comp_state = REG_READ(ah, AR_DMADBG_6);
1590
1591 if ((comp_state & DCU_COMPLETE_STATE_MASK) != DCU_COMPLETE_STATE) {
1592 ath_dbg(ath9k_hw_common(ah), RESET,
1593 "MAC Hang signature not found at DCU complete\n");
1594 return false;
1595 }
1596
1597 chain_state = REG_READ(ah, dcs_reg);
1598 if (ath9k_hw_check_dcs(chain_state, num_state, &hang_state, &hang_pos))
1599 goto hang_check_iter;
1600
1601 dcs_reg = AR_DMADBG_5;
1602 num_state = 4;
1603 chain_state = REG_READ(ah, dcs_reg);
1604 if (ath9k_hw_check_dcs(chain_state, num_state, &hang_state, &hang_pos))
1605 goto hang_check_iter;
1606
1607 ath_dbg(ath9k_hw_common(ah), RESET,
1608 "MAC Hang signature 1 not found\n");
1609 return false;
1610
1611hang_check_iter:
1612 ath_dbg(ath9k_hw_common(ah), RESET,
1613 "DCU registers: chain %08x complete %08x Hang: state %d pos %d\n",
1614 chain_state, comp_state, hang_state, hang_pos);
1615
1616 for (i = 0; i < NUM_STATUS_READS; i++) {
1617 chain_state = REG_READ(ah, dcs_reg);
1618 chain_state = (chain_state >> (5 * hang_pos)) & 0x1f;
1619 comp_state = REG_READ(ah, AR_DMADBG_6);
1620
1621 if (((comp_state & DCU_COMPLETE_STATE_MASK) !=
1622 DCU_COMPLETE_STATE) ||
1623 (chain_state != hang_state))
1624 return false;
1625 }
1626
1627 ath_dbg(ath9k_hw_common(ah), RESET, "MAC Hang signature 1 found\n");
1628
1629 return true;
1630}
1631
1494bool ath9k_hw_check_alive(struct ath_hw *ah) 1632bool ath9k_hw_check_alive(struct ath_hw *ah)
1495{ 1633{
1496 int count = 50; 1634 int count = 50;
1497 u32 reg; 1635 u32 reg;
1498 1636
1637 if (AR_SREV_9300(ah))
1638 return !ath9k_hw_detect_mac_hang(ah);
1639
1499 if (AR_SREV_9285_12_OR_LATER(ah)) 1640 if (AR_SREV_9285_12_OR_LATER(ah))
1500 return true; 1641 return true;
1501 1642
@@ -1546,6 +1687,10 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
1546 if (chan->channel == ah->curchan->channel) 1687 if (chan->channel == ah->curchan->channel)
1547 goto fail; 1688 goto fail;
1548 1689
1690 if ((ah->curchan->channelFlags | chan->channelFlags) &
1691 (CHANNEL_HALF | CHANNEL_QUARTER))
1692 goto fail;
1693
1549 if ((chan->channelFlags & CHANNEL_ALL) != 1694 if ((chan->channelFlags & CHANNEL_ALL) !=
1550 (ah->curchan->channelFlags & CHANNEL_ALL)) 1695 (ah->curchan->channelFlags & CHANNEL_ALL))
1551 goto fail; 1696 goto fail;
@@ -2652,7 +2797,8 @@ static int get_antenna_gain(struct ath_hw *ah, struct ath9k_channel *chan)
2652 return ah->eep_ops->get_eeprom(ah, gain_param); 2797 return ah->eep_ops->get_eeprom(ah, gain_param);
2653} 2798}
2654 2799
2655void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan) 2800void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
2801 bool test)
2656{ 2802{
2657 struct ath_regulatory *reg = ath9k_hw_regulatory(ah); 2803 struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
2658 struct ieee80211_channel *channel; 2804 struct ieee80211_channel *channel;
@@ -2673,7 +2819,7 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan)
2673 2819
2674 ah->eep_ops->set_txpower(ah, chan, 2820 ah->eep_ops->set_txpower(ah, chan,
2675 ath9k_regd_get_ctl(reg, chan), 2821 ath9k_regd_get_ctl(reg, chan),
2676 ant_reduction, new_pwr, false); 2822 ant_reduction, new_pwr, test);
2677} 2823}
2678 2824
2679void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test) 2825void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
@@ -2686,7 +2832,7 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
2686 if (test) 2832 if (test)
2687 channel->max_power = MAX_RATE_POWER / 2; 2833 channel->max_power = MAX_RATE_POWER / 2;
2688 2834
2689 ath9k_hw_apply_txpower(ah, chan); 2835 ath9k_hw_apply_txpower(ah, chan, test);
2690 2836
2691 if (test) 2837 if (test)
2692 channel->max_power = DIV_ROUND_UP(reg->max_power_level, 2); 2838 channel->max_power = DIV_ROUND_UP(reg->max_power_level, 2);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index aa1680a0c7fd..828b9bbc456d 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -708,7 +708,6 @@ struct ath_hw {
708 struct ar5416Stats stats; 708 struct ar5416Stats stats;
709 struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES]; 709 struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES];
710 710
711 int16_t curchan_rad_index;
712 enum ath9k_int imask; 711 enum ath9k_int imask;
713 u32 imrs2_reg; 712 u32 imrs2_reg;
714 u32 txok_interrupt_mask; 713 u32 txok_interrupt_mask;
@@ -762,11 +761,6 @@ struct ath_hw {
762 761
763 u32 sta_id1_defaults; 762 u32 sta_id1_defaults;
764 u32 misc_mode; 763 u32 misc_mode;
765 enum {
766 AUTO_32KHZ,
767 USE_32KHZ,
768 DONT_USE_32KHZ,
769 } enable_32kHz_clock;
770 764
771 /* Private to hardware code */ 765 /* Private to hardware code */
772 struct ath_hw_private_ops private_ops; 766 struct ath_hw_private_ops private_ops;
@@ -783,7 +777,6 @@ struct ath_hw {
783 u32 *analogBank7Data; 777 u32 *analogBank7Data;
784 u32 *bank6Temp; 778 u32 *bank6Temp;
785 779
786 u8 txpower_limit;
787 int coverage_class; 780 int coverage_class;
788 u32 slottime; 781 u32 slottime;
789 u32 globaltxtimeout; 782 u32 globaltxtimeout;
@@ -848,7 +841,6 @@ struct ath_hw {
848 struct ath_gen_timer_table hw_gen_timers; 841 struct ath_gen_timer_table hw_gen_timers;
849 842
850 struct ar9003_txs *ts_ring; 843 struct ar9003_txs *ts_ring;
851 void *ts_start;
852 u32 ts_paddr_start; 844 u32 ts_paddr_start;
853 u32 ts_paddr_end; 845 u32 ts_paddr_end;
854 u16 ts_tail; 846 u16 ts_tail;
@@ -915,7 +907,6 @@ static inline u8 get_streams(int mask)
915} 907}
916 908
917/* Initialization, Detach, Reset */ 909/* Initialization, Detach, Reset */
918const char *ath9k_hw_probe(u16 vendorid, u16 devid);
919void ath9k_hw_deinit(struct ath_hw *ah); 910void ath9k_hw_deinit(struct ath_hw *ah);
920int ath9k_hw_init(struct ath_hw *ah); 911int ath9k_hw_init(struct ath_hw *ah);
921int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, 912int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
@@ -932,6 +923,8 @@ void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val);
932void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna); 923void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
933 924
934/* General Operation */ 925/* General Operation */
926void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
927 int hw_delay);
935bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout); 928bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout);
936void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array, 929void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array,
937 int column, unsigned int *writecnt); 930 int column, unsigned int *writecnt);
@@ -965,6 +958,13 @@ bool ath9k_hw_check_alive(struct ath_hw *ah);
965 958
966bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode); 959bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
967 960
961#ifdef CONFIG_ATH9K_DEBUGFS
962void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause);
963#else
964static inline void ath9k_debug_sync_cause(struct ath_common *common,
965 u32 sync_cause) {}
966#endif
967
968/* Generic hw timer primitives */ 968/* Generic hw timer primitives */
969struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, 969struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
970 void (*trigger)(void *), 970 void (*trigger)(void *),
@@ -985,7 +985,8 @@ void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len);
985/* PHY */ 985/* PHY */
986void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled, 986void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
987 u32 *coef_mantissa, u32 *coef_exponent); 987 u32 *coef_mantissa, u32 *coef_exponent);
988void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan); 988void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
989 bool test);
989 990
990/* 991/*
991 * Code Specific to AR5008, AR9001 or AR9002, 992 * Code Specific to AR5008, AR9001 or AR9002,
@@ -1011,7 +1012,6 @@ int ar9003_paprd_create_curve(struct ath_hw *ah,
1011int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain); 1012int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
1012int ar9003_paprd_init_table(struct ath_hw *ah); 1013int ar9003_paprd_init_table(struct ath_hw *ah);
1013bool ar9003_paprd_is_done(struct ath_hw *ah); 1014bool ar9003_paprd_is_done(struct ath_hw *ah);
1014void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains);
1015 1015
1016/* Hardware family op attach helpers */ 1016/* Hardware family op attach helpers */
1017void ar5008_hw_attach_phy_ops(struct ath_hw *ah); 1017void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index cb006458fc4b..dee9e092449a 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -14,6 +14,8 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
18#include <linux/slab.h> 20#include <linux/slab.h>
19#include <linux/ath9k_platform.h> 21#include <linux/ath9k_platform.h>
@@ -519,6 +521,8 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
519 atomic_set(&ah->intr_ref_cnt, -1); 521 atomic_set(&ah->intr_ref_cnt, -1);
520 sc->sc_ah = ah; 522 sc->sc_ah = ah;
521 523
524 sc->dfs_detector = dfs_pattern_detector_init(NL80211_DFS_UNSET);
525
522 if (!pdata) { 526 if (!pdata) {
523 ah->ah_flags |= AH_USE_EEPROM; 527 ah->ah_flags |= AH_USE_EEPROM;
524 sc->sc_ah->led_pin = -1; 528 sc->sc_ah->led_pin = -1;
@@ -642,6 +646,24 @@ void ath9k_reload_chainmask_settings(struct ath_softc *sc)
642 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap); 646 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
643} 647}
644 648
649static const struct ieee80211_iface_limit if_limits[] = {
650 { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
651 BIT(NL80211_IFTYPE_P2P_CLIENT) |
652 BIT(NL80211_IFTYPE_WDS) },
653 { .max = 8, .types =
654#ifdef CONFIG_MAC80211_MESH
655 BIT(NL80211_IFTYPE_MESH_POINT) |
656#endif
657 BIT(NL80211_IFTYPE_AP) |
658 BIT(NL80211_IFTYPE_P2P_GO) },
659};
660
661static const struct ieee80211_iface_combination if_comb = {
662 .limits = if_limits,
663 .n_limits = ARRAY_SIZE(if_limits),
664 .max_interfaces = 2048,
665 .num_different_channels = 1,
666};
645 667
646void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) 668void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
647{ 669{
@@ -671,11 +693,15 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
671 BIT(NL80211_IFTYPE_ADHOC) | 693 BIT(NL80211_IFTYPE_ADHOC) |
672 BIT(NL80211_IFTYPE_MESH_POINT); 694 BIT(NL80211_IFTYPE_MESH_POINT);
673 695
696 hw->wiphy->iface_combinations = &if_comb;
697 hw->wiphy->n_iface_combinations = 1;
698
674 if (AR_SREV_5416(sc->sc_ah)) 699 if (AR_SREV_5416(sc->sc_ah))
675 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 700 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
676 701
677 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 702 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
678 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 703 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
704 hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
679 705
680 hw->queues = 4; 706 hw->queues = 4;
681 hw->max_rates = 4; 707 hw->max_rates = 4;
@@ -779,6 +805,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
779 goto error_world; 805 goto error_world;
780 } 806 }
781 807
808 setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc);
782 sc->last_rssi = ATH_RSSI_DUMMY_MARKER; 809 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
783 810
784 ath_init_leds(sc); 811 ath_init_leds(sc);
@@ -821,6 +848,8 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
821 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 848 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
822 849
823 ath9k_hw_deinit(sc->sc_ah); 850 ath9k_hw_deinit(sc->sc_ah);
851 if (sc->dfs_detector != NULL)
852 sc->dfs_detector->exit(sc->dfs_detector);
824 853
825 kfree(sc->sc_ah); 854 kfree(sc->sc_ah);
826 sc->sc_ah = NULL; 855 sc->sc_ah = NULL;
@@ -866,17 +895,14 @@ static int __init ath9k_init(void)
866 /* Register rate control algorithm */ 895 /* Register rate control algorithm */
867 error = ath_rate_control_register(); 896 error = ath_rate_control_register();
868 if (error != 0) { 897 if (error != 0) {
869 printk(KERN_ERR 898 pr_err("Unable to register rate control algorithm: %d\n",
870 "ath9k: Unable to register rate control " 899 error);
871 "algorithm: %d\n",
872 error);
873 goto err_out; 900 goto err_out;
874 } 901 }
875 902
876 error = ath_pci_init(); 903 error = ath_pci_init();
877 if (error < 0) { 904 if (error < 0) {
878 printk(KERN_ERR 905 pr_err("No PCI devices found, driver not installed\n");
879 "ath9k: No PCI devices found, driver not installed.\n");
880 error = -ENODEV; 906 error = -ENODEV;
881 goto err_rate_unregister; 907 goto err_rate_unregister;
882 } 908 }
@@ -905,6 +931,6 @@ static void __exit ath9k_exit(void)
905 ath_ahb_exit(); 931 ath_ahb_exit();
906 ath_pci_exit(); 932 ath_pci_exit();
907 ath_rate_control_unregister(); 933 ath_rate_control_unregister();
908 printk(KERN_INFO "%s: Driver unloaded\n", dev_info); 934 pr_info("%s: Driver unloaded\n", dev_info);
909} 935}
910module_exit(ath9k_exit); 936module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index f7bd2532269c..04ef775ccee1 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -133,8 +133,16 @@ EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
133 133
134void ath9k_hw_abort_tx_dma(struct ath_hw *ah) 134void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
135{ 135{
136 int maxdelay = 1000;
136 int i, q; 137 int i, q;
137 138
139 if (ah->curchan) {
140 if (IS_CHAN_HALF_RATE(ah->curchan))
141 maxdelay *= 2;
142 else if (IS_CHAN_QUARTER_RATE(ah->curchan))
143 maxdelay *= 4;
144 }
145
138 REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M); 146 REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
139 147
140 REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF); 148 REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
@@ -142,7 +150,7 @@ void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
142 REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF); 150 REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
143 151
144 for (q = 0; q < AR_NUM_QCU; q++) { 152 for (q = 0; q < AR_NUM_QCU; q++) {
145 for (i = 0; i < 1000; i++) { 153 for (i = 0; i < maxdelay; i++) {
146 if (i) 154 if (i)
147 udelay(5); 155 udelay(5);
148 156
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 2504ab005589..dfa78e8b6470 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -113,21 +113,25 @@ void ath9k_ps_restore(struct ath_softc *sc)
113 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 113 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
114 enum ath9k_power_mode mode; 114 enum ath9k_power_mode mode;
115 unsigned long flags; 115 unsigned long flags;
116 bool reset;
116 117
117 spin_lock_irqsave(&sc->sc_pm_lock, flags); 118 spin_lock_irqsave(&sc->sc_pm_lock, flags);
118 if (--sc->ps_usecount != 0) 119 if (--sc->ps_usecount != 0)
119 goto unlock; 120 goto unlock;
120 121
121 if (sc->ps_idle && (sc->ps_flags & PS_WAIT_FOR_TX_ACK)) 122 if (sc->ps_idle) {
123 ath9k_hw_setrxabort(sc->sc_ah, 1);
124 ath9k_hw_stopdmarecv(sc->sc_ah, &reset);
122 mode = ATH9K_PM_FULL_SLEEP; 125 mode = ATH9K_PM_FULL_SLEEP;
123 else if (sc->ps_enabled && 126 } else if (sc->ps_enabled &&
124 !(sc->ps_flags & (PS_WAIT_FOR_BEACON | 127 !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
125 PS_WAIT_FOR_CAB | 128 PS_WAIT_FOR_CAB |
126 PS_WAIT_FOR_PSPOLL_DATA | 129 PS_WAIT_FOR_PSPOLL_DATA |
127 PS_WAIT_FOR_TX_ACK))) 130 PS_WAIT_FOR_TX_ACK))) {
128 mode = ATH9K_PM_NETWORK_SLEEP; 131 mode = ATH9K_PM_NETWORK_SLEEP;
129 else 132 } else {
130 goto unlock; 133 goto unlock;
134 }
131 135
132 spin_lock(&common->cc_lock); 136 spin_lock(&common->cc_lock);
133 ath_hw_cycle_counters_update(common); 137 ath_hw_cycle_counters_update(common);
@@ -241,6 +245,7 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
241 245
242 sc->hw_busy_count = 0; 246 sc->hw_busy_count = 0;
243 del_timer_sync(&common->ani.timer); 247 del_timer_sync(&common->ani.timer);
248 del_timer_sync(&sc->rx_poll_timer);
244 249
245 ath9k_debug_samp_bb_mac(sc); 250 ath9k_debug_samp_bb_mac(sc);
246 ath9k_hw_disable_interrupts(ah); 251 ath9k_hw_disable_interrupts(ah);
@@ -282,6 +287,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
282 287
283 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); 288 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
284 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2); 289 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
290 ath_start_rx_poll(sc, 3);
285 if (!common->disable_ani) 291 if (!common->disable_ani)
286 ath_start_ani(common); 292 ath_start_ani(common);
287 } 293 }
@@ -690,17 +696,6 @@ void ath9k_tasklet(unsigned long data)
690 goto out; 696 goto out;
691 } 697 }
692 698
693 /*
694 * Only run the baseband hang check if beacons stop working in AP or
695 * IBSS mode, because it has a high false positive rate. For station
696 * mode it should not be necessary, since the upper layers will detect
697 * this through a beacon miss automatically and the following channel
698 * change will trigger a hardware reset anyway
699 */
700 if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0 &&
701 !ath9k_hw_check_alive(ah))
702 ieee80211_queue_work(sc->hw, &sc->hw_check_work);
703
704 if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) { 699 if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
705 /* 700 /*
706 * TSF sync does not look correct; remain awake to sync with 701 * TSF sync does not look correct; remain awake to sync with
@@ -912,10 +907,19 @@ void ath_hw_check(struct work_struct *work)
912 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 907 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
913 unsigned long flags; 908 unsigned long flags;
914 int busy; 909 int busy;
910 u8 is_alive, nbeacon = 1;
915 911
916 ath9k_ps_wakeup(sc); 912 ath9k_ps_wakeup(sc);
917 if (ath9k_hw_check_alive(sc->sc_ah)) 913 is_alive = ath9k_hw_check_alive(sc->sc_ah);
914
915 if (is_alive && !AR_SREV_9300(sc->sc_ah))
918 goto out; 916 goto out;
917 else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
918 ath_dbg(common, RESET,
919 "DCU stuck is detected. Schedule chip reset\n");
920 RESET_STAT_INC(sc, RESET_TYPE_MAC_HANG);
921 goto sched_reset;
922 }
919 923
920 spin_lock_irqsave(&common->cc_lock, flags); 924 spin_lock_irqsave(&common->cc_lock, flags);
921 busy = ath_update_survey_stats(sc); 925 busy = ath_update_survey_stats(sc);
@@ -926,12 +930,18 @@ void ath_hw_check(struct work_struct *work)
926 if (busy >= 99) { 930 if (busy >= 99) {
927 if (++sc->hw_busy_count >= 3) { 931 if (++sc->hw_busy_count >= 3) {
928 RESET_STAT_INC(sc, RESET_TYPE_BB_HANG); 932 RESET_STAT_INC(sc, RESET_TYPE_BB_HANG);
929 ieee80211_queue_work(sc->hw, &sc->hw_reset_work); 933 goto sched_reset;
930 } 934 }
931 935 } else if (busy >= 0) {
932 } else if (busy >= 0)
933 sc->hw_busy_count = 0; 936 sc->hw_busy_count = 0;
937 nbeacon = 3;
938 }
939
940 ath_start_rx_poll(sc, nbeacon);
941 goto out;
934 942
943sched_reset:
944 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
935out: 945out:
936 ath9k_ps_restore(sc); 946 ath9k_ps_restore(sc);
937} 947}
@@ -1094,14 +1104,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1094 } 1104 }
1095 } 1105 }
1096 1106
1097 /* 1107 if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP)) {
1098 * Cannot tx while the hardware is in full sleep, it first needs a full
1099 * chip reset to recover from that
1100 */
1101 if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP))
1102 goto exit;
1103
1104 if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) {
1105 /* 1108 /*
1106 * We are using PS-Poll and mac80211 can request TX while in 1109 * We are using PS-Poll and mac80211 can request TX while in
1107 * power save mode. Need to wake up hardware for the TX to be 1110 * power save mode. Need to wake up hardware for the TX to be
@@ -1120,12 +1123,21 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1120 } 1123 }
1121 /* 1124 /*
1122 * The actual restore operation will happen only after 1125 * The actual restore operation will happen only after
1123 * the sc_flags bit is cleared. We are just dropping 1126 * the ps_flags bit is cleared. We are just dropping
1124 * the ps_usecount here. 1127 * the ps_usecount here.
1125 */ 1128 */
1126 ath9k_ps_restore(sc); 1129 ath9k_ps_restore(sc);
1127 } 1130 }
1128 1131
1132 /*
1133 * Cannot tx while the hardware is in full sleep, it first needs a full
1134 * chip reset to recover from that
1135 */
1136 if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP)) {
1137 ath_err(common, "TX while HW is in FULL_SLEEP mode\n");
1138 goto exit;
1139 }
1140
1129 memset(&txctl, 0, sizeof(struct ath_tx_control)); 1141 memset(&txctl, 0, sizeof(struct ath_tx_control));
1130 txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)]; 1142 txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
1131 1143
@@ -1133,6 +1145,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1133 1145
1134 if (ath_tx_start(hw, skb, &txctl) != 0) { 1146 if (ath_tx_start(hw, skb, &txctl) != 0) {
1135 ath_dbg(common, XMIT, "TX failed\n"); 1147 ath_dbg(common, XMIT, "TX failed\n");
1148 TX_STAT_INC(txctl.txq->axq_qnum, txfailed);
1136 goto exit; 1149 goto exit;
1137 } 1150 }
1138 1151
@@ -1151,6 +1164,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1151 mutex_lock(&sc->mutex); 1164 mutex_lock(&sc->mutex);
1152 1165
1153 ath_cancel_work(sc); 1166 ath_cancel_work(sc);
1167 del_timer_sync(&sc->rx_poll_timer);
1154 1168
1155 if (sc->sc_flags & SC_OP_INVALID) { 1169 if (sc->sc_flags & SC_OP_INVALID) {
1156 ath_dbg(common, ANY, "Device not present\n"); 1170 ath_dbg(common, ANY, "Device not present\n");
@@ -1237,7 +1251,6 @@ static void ath9k_reclaim_beacon(struct ath_softc *sc,
1237 ath9k_set_beaconing_status(sc, false); 1251 ath9k_set_beaconing_status(sc, false);
1238 ath_beacon_return(sc, avp); 1252 ath_beacon_return(sc, avp);
1239 ath9k_set_beaconing_status(sc, true); 1253 ath9k_set_beaconing_status(sc, true);
1240 sc->sc_flags &= ~SC_OP_BEACONS;
1241} 1254}
1242 1255
1243static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) 1256static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
@@ -1368,21 +1381,31 @@ static void ath9k_do_vif_add_setup(struct ieee80211_hw *hw,
1368 ath9k_calculate_summary_state(hw, vif); 1381 ath9k_calculate_summary_state(hw, vif);
1369 1382
1370 if (ath9k_uses_beacons(vif->type)) { 1383 if (ath9k_uses_beacons(vif->type)) {
1371 int error; 1384 /* Reserve a beacon slot for the vif */
1372 /* This may fail because upper levels do not have beacons
1373 * properly configured yet. That's OK, we assume it
1374 * will be properly configured and then we will be notified
1375 * in the info_changed method and set up beacons properly
1376 * there.
1377 */
1378 ath9k_set_beaconing_status(sc, false); 1385 ath9k_set_beaconing_status(sc, false);
1379 error = ath_beacon_alloc(sc, vif); 1386 ath_beacon_alloc(sc, vif);
1380 if (!error)
1381 ath_beacon_config(sc, vif);
1382 ath9k_set_beaconing_status(sc, true); 1387 ath9k_set_beaconing_status(sc, true);
1383 } 1388 }
1384} 1389}
1385 1390
1391void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon)
1392{
1393 if (!AR_SREV_9300(sc->sc_ah))
1394 return;
1395
1396 if (!(sc->sc_flags & SC_OP_PRIM_STA_VIF))
1397 return;
1398
1399 mod_timer(&sc->rx_poll_timer, jiffies + msecs_to_jiffies
1400 (nbeacon * sc->cur_beacon_conf.beacon_interval));
1401}
1402
1403void ath_rx_poll(unsigned long data)
1404{
1405 struct ath_softc *sc = (struct ath_softc *)data;
1406
1407 ieee80211_queue_work(sc->hw, &sc->hw_check_work);
1408}
1386 1409
1387static int ath9k_add_interface(struct ieee80211_hw *hw, 1410static int ath9k_add_interface(struct ieee80211_hw *hw,
1388 struct ieee80211_vif *vif) 1411 struct ieee80211_vif *vif)
@@ -1511,6 +1534,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1511static void ath9k_enable_ps(struct ath_softc *sc) 1534static void ath9k_enable_ps(struct ath_softc *sc)
1512{ 1535{
1513 struct ath_hw *ah = sc->sc_ah; 1536 struct ath_hw *ah = sc->sc_ah;
1537 struct ath_common *common = ath9k_hw_common(ah);
1514 1538
1515 sc->ps_enabled = true; 1539 sc->ps_enabled = true;
1516 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 1540 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
@@ -1520,11 +1544,13 @@ static void ath9k_enable_ps(struct ath_softc *sc)
1520 } 1544 }
1521 ath9k_hw_setrxabort(ah, 1); 1545 ath9k_hw_setrxabort(ah, 1);
1522 } 1546 }
1547 ath_dbg(common, PS, "PowerSave enabled\n");
1523} 1548}
1524 1549
1525static void ath9k_disable_ps(struct ath_softc *sc) 1550static void ath9k_disable_ps(struct ath_softc *sc)
1526{ 1551{
1527 struct ath_hw *ah = sc->sc_ah; 1552 struct ath_hw *ah = sc->sc_ah;
1553 struct ath_common *common = ath9k_hw_common(ah);
1528 1554
1529 sc->ps_enabled = false; 1555 sc->ps_enabled = false;
1530 ath9k_hw_setpower(ah, ATH9K_PM_AWAKE); 1556 ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
@@ -1539,7 +1565,7 @@ static void ath9k_disable_ps(struct ath_softc *sc)
1539 ath9k_hw_set_interrupts(ah); 1565 ath9k_hw_set_interrupts(ah);
1540 } 1566 }
1541 } 1567 }
1542 1568 ath_dbg(common, PS, "PowerSave disabled\n");
1543} 1569}
1544 1570
1545static int ath9k_config(struct ieee80211_hw *hw, u32 changed) 1571static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
@@ -1548,6 +1574,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1548 struct ath_hw *ah = sc->sc_ah; 1574 struct ath_hw *ah = sc->sc_ah;
1549 struct ath_common *common = ath9k_hw_common(ah); 1575 struct ath_common *common = ath9k_hw_common(ah);
1550 struct ieee80211_conf *conf = &hw->conf; 1576 struct ieee80211_conf *conf = &hw->conf;
1577 bool reset_channel = false;
1551 1578
1552 ath9k_ps_wakeup(sc); 1579 ath9k_ps_wakeup(sc);
1553 mutex_lock(&sc->mutex); 1580 mutex_lock(&sc->mutex);
@@ -1556,6 +1583,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1556 sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); 1583 sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1557 if (sc->ps_idle) 1584 if (sc->ps_idle)
1558 ath_cancel_work(sc); 1585 ath_cancel_work(sc);
1586 else
1587 /*
1588 * The chip needs a reset to properly wake up from
1589 * full sleep
1590 */
1591 reset_channel = ah->chip_fullsleep;
1559 } 1592 }
1560 1593
1561 /* 1594 /*
@@ -1584,7 +1617,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1584 } 1617 }
1585 } 1618 }
1586 1619
1587 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 1620 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
1588 struct ieee80211_channel *curchan = hw->conf.channel; 1621 struct ieee80211_channel *curchan = hw->conf.channel;
1589 int pos = curchan->hw_value; 1622 int pos = curchan->hw_value;
1590 int old_pos = -1; 1623 int old_pos = -1;
@@ -1904,6 +1937,8 @@ static void ath9k_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
1904 sc->last_rssi = ATH_RSSI_DUMMY_MARKER; 1937 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
1905 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; 1938 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
1906 1939
1940 ath_start_rx_poll(sc, 3);
1941
1907 if (!common->disable_ani) { 1942 if (!common->disable_ani) {
1908 sc->sc_flags |= SC_OP_ANI_RUN; 1943 sc->sc_flags |= SC_OP_ANI_RUN;
1909 ath_start_ani(common); 1944 ath_start_ani(common);
@@ -1943,6 +1978,7 @@ static void ath9k_config_bss(struct ath_softc *sc, struct ieee80211_vif *vif)
1943 /* Stop ANI */ 1978 /* Stop ANI */
1944 sc->sc_flags &= ~SC_OP_ANI_RUN; 1979 sc->sc_flags &= ~SC_OP_ANI_RUN;
1945 del_timer_sync(&common->ani.timer); 1980 del_timer_sync(&common->ani.timer);
1981 del_timer_sync(&sc->rx_poll_timer);
1946 memset(&sc->caldata, 0, sizeof(sc->caldata)); 1982 memset(&sc->caldata, 0, sizeof(sc->caldata));
1947 } 1983 }
1948} 1984}
@@ -1957,7 +1993,6 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1957 struct ath_common *common = ath9k_hw_common(ah); 1993 struct ath_common *common = ath9k_hw_common(ah);
1958 struct ath_vif *avp = (void *)vif->drv_priv; 1994 struct ath_vif *avp = (void *)vif->drv_priv;
1959 int slottime; 1995 int slottime;
1960 int error;
1961 1996
1962 ath9k_ps_wakeup(sc); 1997 ath9k_ps_wakeup(sc);
1963 mutex_lock(&sc->mutex); 1998 mutex_lock(&sc->mutex);
@@ -1986,16 +2021,29 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1986 } else { 2021 } else {
1987 sc->sc_flags &= ~SC_OP_ANI_RUN; 2022 sc->sc_flags &= ~SC_OP_ANI_RUN;
1988 del_timer_sync(&common->ani.timer); 2023 del_timer_sync(&common->ani.timer);
2024 del_timer_sync(&sc->rx_poll_timer);
1989 } 2025 }
1990 } 2026 }
1991 2027
1992 /* Enable transmission of beacons (AP, IBSS, MESH) */ 2028 /*
1993 if ((changed & BSS_CHANGED_BEACON) || 2029 * In case of AP mode, the HW TSF has to be reset
1994 ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) { 2030 * when the beacon interval changes.
2031 */
2032 if ((changed & BSS_CHANGED_BEACON_INT) &&
2033 (vif->type == NL80211_IFTYPE_AP))
2034 sc->sc_flags |= SC_OP_TSF_RESET;
2035
2036 /* Configure beaconing (AP, IBSS, MESH) */
2037 if (ath9k_uses_beacons(vif->type) &&
2038 ((changed & BSS_CHANGED_BEACON) ||
2039 (changed & BSS_CHANGED_BEACON_ENABLED) ||
2040 (changed & BSS_CHANGED_BEACON_INT))) {
1995 ath9k_set_beaconing_status(sc, false); 2041 ath9k_set_beaconing_status(sc, false);
1996 error = ath_beacon_alloc(sc, vif); 2042 if (bss_conf->enable_beacon)
1997 if (!error) 2043 ath_beacon_alloc(sc, vif);
1998 ath_beacon_config(sc, vif); 2044 else
2045 avp->is_bslot_active = false;
2046 ath_beacon_config(sc, vif);
1999 ath9k_set_beaconing_status(sc, true); 2047 ath9k_set_beaconing_status(sc, true);
2000 } 2048 }
2001 2049
@@ -2018,30 +2066,6 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2018 } 2066 }
2019 } 2067 }
2020 2068
2021 /* Disable transmission of beacons */
2022 if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
2023 !bss_conf->enable_beacon) {
2024 ath9k_set_beaconing_status(sc, false);
2025 avp->is_bslot_active = false;
2026 ath9k_set_beaconing_status(sc, true);
2027 }
2028
2029 if (changed & BSS_CHANGED_BEACON_INT) {
2030 /*
2031 * In case of AP mode, the HW TSF has to be reset
2032 * when the beacon interval changes.
2033 */
2034 if (vif->type == NL80211_IFTYPE_AP) {
2035 sc->sc_flags |= SC_OP_TSF_RESET;
2036 ath9k_set_beaconing_status(sc, false);
2037 error = ath_beacon_alloc(sc, vif);
2038 if (!error)
2039 ath_beacon_config(sc, vif);
2040 ath9k_set_beaconing_status(sc, true);
2041 } else
2042 ath_beacon_config(sc, vif);
2043 }
2044
2045 mutex_unlock(&sc->mutex); 2069 mutex_unlock(&sc->mutex);
2046 ath9k_ps_restore(sc); 2070 ath9k_ps_restore(sc);
2047} 2071}
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 77dc327def8d..a856b51255f4 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -14,6 +14,8 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/nl80211.h> 19#include <linux/nl80211.h>
18#include <linux/pci.h> 20#include <linux/pci.h>
19#include <linux/pci-aspm.h> 21#include <linux/pci-aspm.h>
@@ -171,14 +173,13 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
171 173
172 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 174 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
173 if (ret) { 175 if (ret) {
174 printk(KERN_ERR "ath9k: 32-bit DMA not available\n"); 176 pr_err("32-bit DMA not available\n");
175 goto err_dma; 177 goto err_dma;
176 } 178 }
177 179
178 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 180 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
179 if (ret) { 181 if (ret) {
180 printk(KERN_ERR "ath9k: 32-bit DMA consistent " 182 pr_err("32-bit DMA consistent DMA enable failed\n");
181 "DMA enable failed\n");
182 goto err_dma; 183 goto err_dma;
183 } 184 }
184 185
@@ -224,7 +225,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
224 225
225 mem = pci_iomap(pdev, 0, 0); 226 mem = pci_iomap(pdev, 0, 0);
226 if (!mem) { 227 if (!mem) {
227 printk(KERN_ERR "PCI memory map error\n") ; 228 pr_err("PCI memory map error\n") ;
228 ret = -EIO; 229 ret = -EIO;
229 goto err_iomap; 230 goto err_iomap;
230 } 231 }
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 08bb45532701..92a6c0a87f89 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1436,7 +1436,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1436 1436
1437static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband, 1437static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1438 struct ieee80211_sta *sta, void *priv_sta, 1438 struct ieee80211_sta *sta, void *priv_sta,
1439 u32 changed, enum nl80211_channel_type oper_chan_type) 1439 u32 changed)
1440{ 1440{
1441 struct ath_softc *sc = priv; 1441 struct ath_softc *sc = priv;
1442 struct ath_rate_priv *ath_rc_priv = priv_sta; 1442 struct ath_rate_priv *ath_rc_priv = priv_sta;
@@ -1447,12 +1447,11 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1447 1447
1448 /* FIXME: Handle AP mode later when we support CWM */ 1448 /* FIXME: Handle AP mode later when we support CWM */
1449 1449
1450 if (changed & IEEE80211_RC_HT_CHANGED) { 1450 if (changed & IEEE80211_RC_BW_CHANGED) {
1451 if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION) 1451 if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
1452 return; 1452 return;
1453 1453
1454 if (oper_chan_type == NL80211_CHAN_HT40MINUS || 1454 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
1455 oper_chan_type == NL80211_CHAN_HT40PLUS)
1456 oper_cw40 = true; 1455 oper_cw40 = true;
1457 1456
1458 if (oper_cw40) 1457 if (oper_cw40)
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 1c4583c7ff7c..e1fcc68124dc 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -812,6 +812,7 @@ static bool ath9k_rx_accept(struct ath_common *common,
812 is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && 812 is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
813 test_bit(rx_stats->rs_keyix, common->tkip_keymap); 813 test_bit(rx_stats->rs_keyix, common->tkip_keymap);
814 strip_mic = is_valid_tkip && ieee80211_is_data(fc) && 814 strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
815 ieee80211_has_protected(fc) &&
815 !(rx_stats->rs_status & 816 !(rx_stats->rs_status &
816 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC | 817 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
817 ATH9K_RXERR_KEYMISS)); 818 ATH9K_RXERR_KEYMISS));
@@ -824,15 +825,20 @@ static bool ath9k_rx_accept(struct ath_common *common,
824 if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID) 825 if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
825 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS; 826 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
826 827
827 if (!rx_stats->rs_datalen) 828 if (!rx_stats->rs_datalen) {
829 RX_STAT_INC(rx_len_err);
828 return false; 830 return false;
831 }
832
829 /* 833 /*
830 * rs_status follows rs_datalen so if rs_datalen is too large 834 * rs_status follows rs_datalen so if rs_datalen is too large
831 * we can take a hint that hardware corrupted it, so ignore 835 * we can take a hint that hardware corrupted it, so ignore
832 * those frames. 836 * those frames.
833 */ 837 */
834 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) 838 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) {
839 RX_STAT_INC(rx_len_err);
835 return false; 840 return false;
841 }
836 842
837 /* Only use error bits from the last fragment */ 843 /* Only use error bits from the last fragment */
838 if (rx_stats->rs_more) 844 if (rx_stats->rs_more)
@@ -902,6 +908,7 @@ static int ath9k_process_rate(struct ath_common *common,
902 struct ieee80211_supported_band *sband; 908 struct ieee80211_supported_band *sband;
903 enum ieee80211_band band; 909 enum ieee80211_band band;
904 unsigned int i = 0; 910 unsigned int i = 0;
911 struct ath_softc __maybe_unused *sc = common->priv;
905 912
906 band = hw->conf.channel->band; 913 band = hw->conf.channel->band;
907 sband = hw->wiphy->bands[band]; 914 sband = hw->wiphy->bands[band];
@@ -936,7 +943,7 @@ static int ath9k_process_rate(struct ath_common *common,
936 ath_dbg(common, ANY, 943 ath_dbg(common, ANY,
937 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", 944 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
938 rx_stats->rs_rate); 945 rx_stats->rs_rate);
939 946 RX_STAT_INC(rx_rate_err);
940 return -EINVAL; 947 return -EINVAL;
941} 948}
942 949
@@ -1823,10 +1830,14 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1823 1830
1824 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); 1831 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
1825 rxs = IEEE80211_SKB_RXCB(hdr_skb); 1832 rxs = IEEE80211_SKB_RXCB(hdr_skb);
1826 if (ieee80211_is_beacon(hdr->frame_control) && 1833 if (ieee80211_is_beacon(hdr->frame_control)) {
1827 !is_zero_ether_addr(common->curbssid) && 1834 RX_STAT_INC(rx_beacons);
1828 !compare_ether_addr(hdr->addr3, common->curbssid)) 1835 if (!is_zero_ether_addr(common->curbssid) &&
1829 rs.is_mybeacon = true; 1836 ether_addr_equal(hdr->addr3, common->curbssid))
1837 rs.is_mybeacon = true;
1838 else
1839 rs.is_mybeacon = false;
1840 }
1830 else 1841 else
1831 rs.is_mybeacon = false; 1842 rs.is_mybeacon = false;
1832 1843
@@ -1836,8 +1847,10 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1836 * If we're asked to flush receive queue, directly 1847 * If we're asked to flush receive queue, directly
1837 * chain it back at the queue without processing it. 1848 * chain it back at the queue without processing it.
1838 */ 1849 */
1839 if (sc->sc_flags & SC_OP_RXFLUSH) 1850 if (sc->sc_flags & SC_OP_RXFLUSH) {
1851 RX_STAT_INC(rx_drop_rxflush);
1840 goto requeue_drop_frag; 1852 goto requeue_drop_frag;
1853 }
1841 1854
1842 memset(rxs, 0, sizeof(struct ieee80211_rx_status)); 1855 memset(rxs, 0, sizeof(struct ieee80211_rx_status));
1843 1856
@@ -1855,6 +1868,10 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1855 if (retval) 1868 if (retval)
1856 goto requeue_drop_frag; 1869 goto requeue_drop_frag;
1857 1870
1871 if (rs.is_mybeacon) {
1872 sc->hw_busy_count = 0;
1873 ath_start_rx_poll(sc, 3);
1874 }
1858 /* Ensure we always have an skb to requeue once we are done 1875 /* Ensure we always have an skb to requeue once we are done
1859 * processing the current buffer's skb */ 1876 * processing the current buffer's skb */
1860 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1877 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
@@ -1863,8 +1880,10 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1863 * tell hardware it can give us a new frame using the old 1880 * tell hardware it can give us a new frame using the old
1864 * skb and put it at the tail of the sc->rx.rxbuf list for 1881 * skb and put it at the tail of the sc->rx.rxbuf list for
1865 * processing. */ 1882 * processing. */
1866 if (!requeue_skb) 1883 if (!requeue_skb) {
1884 RX_STAT_INC(rx_oom_err);
1867 goto requeue_drop_frag; 1885 goto requeue_drop_frag;
1886 }
1868 1887
1869 /* Unmap the frame */ 1888 /* Unmap the frame */
1870 dma_unmap_single(sc->dev, bf->bf_buf_addr, 1889 dma_unmap_single(sc->dev, bf->bf_buf_addr,
@@ -1895,6 +1914,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1895 } 1914 }
1896 1915
1897 if (rs.rs_more) { 1916 if (rs.rs_more) {
1917 RX_STAT_INC(rx_frags);
1898 /* 1918 /*
1899 * rs_more indicates chained descriptors which can be 1919 * rs_more indicates chained descriptors which can be
1900 * used to link buffers together for a sort of 1920 * used to link buffers together for a sort of
@@ -1904,6 +1924,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1904 /* too many fragments - cannot handle frame */ 1924 /* too many fragments - cannot handle frame */
1905 dev_kfree_skb_any(sc->rx.frag); 1925 dev_kfree_skb_any(sc->rx.frag);
1906 dev_kfree_skb_any(skb); 1926 dev_kfree_skb_any(skb);
1927 RX_STAT_INC(rx_too_many_frags_err);
1907 skb = NULL; 1928 skb = NULL;
1908 } 1929 }
1909 sc->rx.frag = skb; 1930 sc->rx.frag = skb;
@@ -1915,6 +1936,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1915 1936
1916 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { 1937 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
1917 dev_kfree_skb(skb); 1938 dev_kfree_skb(skb);
1939 RX_STAT_INC(rx_oom_err);
1918 goto requeue_drop_frag; 1940 goto requeue_drop_frag;
1919 } 1941 }
1920 1942
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 834e6bc45e8b..23eaa1b26ebe 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1820,6 +1820,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1820 struct ath_frame_info *fi = get_frame_info(skb); 1820 struct ath_frame_info *fi = get_frame_info(skb);
1821 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1821 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1822 struct ath_buf *bf; 1822 struct ath_buf *bf;
1823 int fragno;
1823 u16 seqno; 1824 u16 seqno;
1824 1825
1825 bf = ath_tx_get_buffer(sc); 1826 bf = ath_tx_get_buffer(sc);
@@ -1831,9 +1832,16 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1831 ATH_TXBUF_RESET(bf); 1832 ATH_TXBUF_RESET(bf);
1832 1833
1833 if (tid) { 1834 if (tid) {
1835 fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
1834 seqno = tid->seq_next; 1836 seqno = tid->seq_next;
1835 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); 1837 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1836 INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1838
1839 if (fragno)
1840 hdr->seq_ctrl |= cpu_to_le16(fragno);
1841
1842 if (!ieee80211_has_morefrags(hdr->frame_control))
1843 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1844
1837 bf->bf_state.seqno = seqno; 1845 bf->bf_state.seqno = seqno;
1838 } 1846 }
1839 1847
diff --git a/drivers/net/wireless/ath/carl9170/cmd.h b/drivers/net/wireless/ath/carl9170/cmd.h
index 885c42778b8b..65919c902f55 100644
--- a/drivers/net/wireless/ath/carl9170/cmd.h
+++ b/drivers/net/wireless/ath/carl9170/cmd.h
@@ -114,7 +114,7 @@ __regwrite_out : \
114 114
115#define carl9170_regwrite_result() \ 115#define carl9170_regwrite_result() \
116 __err; \ 116 __err; \
117} while (0); 117} while (0)
118 118
119 119
120#define carl9170_async_regwrite_get_buf() \ 120#define carl9170_async_regwrite_get_buf() \
@@ -126,7 +126,7 @@ do { \
126 __err = -ENOMEM; \ 126 __err = -ENOMEM; \
127 goto __async_regwrite_out; \ 127 goto __async_regwrite_out; \
128 } \ 128 } \
129} while (0); 129} while (0)
130 130
131#define carl9170_async_regwrite_begin(carl) \ 131#define carl9170_async_regwrite_begin(carl) \
132do { \ 132do { \
@@ -169,6 +169,6 @@ __async_regwrite_out: \
169 169
170#define carl9170_async_regwrite_result() \ 170#define carl9170_async_regwrite_result() \
171 __err; \ 171 __err; \
172} while (0); 172} while (0)
173 173
174#endif /* __CMD_H */ 174#endif /* __CMD_H */
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index cffde8d9a521..5c73c03872f3 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -355,6 +355,8 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
355 355
356 ar->hw->wiphy->interface_modes |= if_comb_types; 356 ar->hw->wiphy->interface_modes |= if_comb_types;
357 357
358 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
359
358#undef SUPPORTED 360#undef SUPPORTED
359 return carl9170_fw_tx_sequence(ar); 361 return carl9170_fw_tx_sequence(ar);
360} 362}
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index dc99030ea8b6..84b22eec7abd 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -538,7 +538,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
538 return; 538 return;
539 539
540 /* and only beacons from the associated BSSID, please */ 540 /* and only beacons from the associated BSSID, please */
541 if (compare_ether_addr(hdr->addr3, ar->common.curbssid) || 541 if (!ether_addr_equal(hdr->addr3, ar->common.curbssid) ||
542 !ar->common.curaid) 542 !ar->common.curaid)
543 return; 543 return;
544 544
diff --git a/drivers/net/wireless/ath/main.c b/drivers/net/wireless/ath/main.c
index ea2c737138d3..8e99540cd90e 100644
--- a/drivers/net/wireless/ath/main.c
+++ b/drivers/net/wireless/ath/main.c
@@ -14,6 +14,8 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/kernel.h> 19#include <linux/kernel.h>
18#include <linux/module.h> 20#include <linux/module.h>
19 21
@@ -49,7 +51,7 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
49 if (off != 0) 51 if (off != 0)
50 skb_reserve(skb, common->cachelsz - off); 52 skb_reserve(skb, common->cachelsz - off);
51 } else { 53 } else {
52 printk(KERN_ERR "skbuff alloc of size %u failed\n", len); 54 pr_err("skbuff alloc of size %u failed\n", len);
53 return NULL; 55 return NULL;
54 } 56 }
55 57
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 10dea37431b3..d81698015bf7 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -14,6 +14,8 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/kernel.h> 19#include <linux/kernel.h>
18#include <linux/export.h> 20#include <linux/export.h>
19#include <net/cfg80211.h> 21#include <net/cfg80211.h>
@@ -562,7 +564,7 @@ static int __ath_regd_init(struct ath_regulatory *reg)
562 printk(KERN_DEBUG "ath: EEPROM regdomain: 0x%0x\n", reg->current_rd); 564 printk(KERN_DEBUG "ath: EEPROM regdomain: 0x%0x\n", reg->current_rd);
563 565
564 if (!ath_regd_is_eeprom_valid(reg)) { 566 if (!ath_regd_is_eeprom_valid(reg)) {
565 printk(KERN_ERR "ath: Invalid EEPROM contents\n"); 567 pr_err("Invalid EEPROM contents\n");
566 return -EINVAL; 568 return -EINVAL;
567 } 569 }
568 570
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 6c87a823f5a9..d07c0301da6a 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -3989,8 +3989,7 @@ static int reset_atmel_card(struct net_device *dev)
3989 atmel_copy_to_card(priv->dev, 0x8000, &fw[0x6000], len - 0x6000); 3989 atmel_copy_to_card(priv->dev, 0x8000, &fw[0x6000], len - 0x6000);
3990 } 3990 }
3991 3991
3992 if (fw_entry) 3992 release_firmware(fw_entry);
3993 release_firmware(fw_entry);
3994 } 3993 }
3995 3994
3996 err = atmel_wakeup_firmware(priv); 3995 err = atmel_wakeup_firmware(priv);
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
index 9ab1192004c0..51e33b53386e 100644
--- a/drivers/net/wireless/atmel_pci.c
+++ b/drivers/net/wireless/atmel_pci.c
@@ -74,15 +74,4 @@ static void __devexit atmel_pci_remove(struct pci_dev *pdev)
74 stop_atmel_card(pci_get_drvdata(pdev)); 74 stop_atmel_card(pci_get_drvdata(pdev));
75} 75}
76 76
77static int __init atmel_init_module(void) 77module_pci_driver(atmel_driver);
78{
79 return pci_register_driver(&atmel_driver);
80}
81
82static void __exit atmel_cleanup_module(void)
83{
84 pci_unregister_driver(&atmel_driver);
85}
86
87module_init(atmel_init_module);
88module_exit(atmel_cleanup_module);
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index c79e6638c88d..617afc8211b2 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4010,6 +4010,20 @@ static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
4010 if (modparam_nohwcrypt) 4010 if (modparam_nohwcrypt)
4011 return -ENOSPC; /* User disabled HW-crypto */ 4011 return -ENOSPC; /* User disabled HW-crypto */
4012 4012
4013 if ((vif->type == NL80211_IFTYPE_ADHOC ||
4014 vif->type == NL80211_IFTYPE_MESH_POINT) &&
4015 (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
4016 key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
4017 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
4018 /*
4019 * For now, disable hw crypto for the RSN IBSS group keys. This
4020 * could be optimized in the future, but until that gets
4021 * implemented, use of software crypto for group addressed
4022 * frames is a acceptable to allow RSN IBSS to be used.
4023 */
4024 return -EOPNOTSUPP;
4025 }
4026
4013 mutex_lock(&wl->mutex); 4027 mutex_lock(&wl->mutex);
4014 4028
4015 dev = wl->current_dev; 4029 dev = wl->current_dev;
@@ -4827,8 +4841,14 @@ static int b43_op_start(struct ieee80211_hw *hw)
4827 out_mutex_unlock: 4841 out_mutex_unlock:
4828 mutex_unlock(&wl->mutex); 4842 mutex_unlock(&wl->mutex);
4829 4843
4830 /* reload configuration */ 4844 /*
4831 b43_op_config(hw, ~0); 4845 * Configuration may have been overwritten during initialization.
4846 * Reload the configuration, but only if initialization was
4847 * successful. Reloading the configuration after a failed init
4848 * may hang the system.
4849 */
4850 if (!err)
4851 b43_op_config(hw, ~0);
4832 4852
4833 return err; 4853 return err;
4834} 4854}
@@ -5275,6 +5295,8 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
5275 BIT(NL80211_IFTYPE_WDS) | 5295 BIT(NL80211_IFTYPE_WDS) |
5276 BIT(NL80211_IFTYPE_ADHOC); 5296 BIT(NL80211_IFTYPE_ADHOC);
5277 5297
5298 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
5299
5278 hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1; 5300 hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
5279 wl->mac80211_initially_registered_queues = hw->queues; 5301 wl->mac80211_initially_registered_queues = hw->queues;
5280 hw->max_rates = 2; 5302 hw->max_rates = 2;
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
index 80b0755ed3af..a54fb2d29089 100644
--- a/drivers/net/wireless/b43/sdio.c
+++ b/drivers/net/wireless/b43/sdio.c
@@ -193,7 +193,7 @@ static struct sdio_driver b43_sdio_driver = {
193 .name = "b43-sdio", 193 .name = "b43-sdio",
194 .id_table = b43_sdio_ids, 194 .id_table = b43_sdio_ids,
195 .probe = b43_sdio_probe, 195 .probe = b43_sdio_probe,
196 .remove = b43_sdio_remove, 196 .remove = __devexit_p(b43_sdio_remove),
197}; 197};
198 198
199int b43_sdio_init(void) 199int b43_sdio_init(void)
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 2c5367884b3f..b31ccc02fa21 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -290,7 +290,8 @@ int b43_generate_txhdr(struct b43_wldev *dev,
290 txhdr->dur_fb = wlhdr->duration_id; 290 txhdr->dur_fb = wlhdr->duration_id;
291 } else { 291 } else {
292 txhdr->dur_fb = ieee80211_generic_frame_duration( 292 txhdr->dur_fb = ieee80211_generic_frame_duration(
293 dev->wl->hw, info->control.vif, fragment_len, fbrate); 293 dev->wl->hw, info->control.vif, info->band,
294 fragment_len, fbrate);
294 } 295 }
295 296
296 plcp_fragment_len = fragment_len + FCS_LEN; 297 plcp_fragment_len = fragment_len + FCS_LEN;
@@ -378,7 +379,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
378 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 379 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
379 phy_ctl |= B43_TXH_PHY_SHORTPRMBL; 380 phy_ctl |= B43_TXH_PHY_SHORTPRMBL;
380 381
381 switch (b43_ieee80211_antenna_sanitize(dev, info->antenna_sel_tx)) { 382 switch (b43_ieee80211_antenna_sanitize(dev, 0)) {
382 case 0: /* Default */ 383 case 0: /* Default */
383 phy_ctl |= B43_TXH_PHY_ANT01AUTO; 384 phy_ctl |= B43_TXH_PHY_ANT01AUTO;
384 break; 385 break;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index df7e16dfb36c..1be214b815fb 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1056,6 +1056,7 @@ static void b43legacy_write_probe_resp_plcp(struct b43legacy_wldev *dev,
1056 b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate->hw_value); 1056 b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate->hw_value);
1057 dur = ieee80211_generic_frame_duration(dev->wl->hw, 1057 dur = ieee80211_generic_frame_duration(dev->wl->hw,
1058 dev->wl->vif, 1058 dev->wl->vif,
1059 IEEE80211_BAND_2GHZ,
1059 size, 1060 size,
1060 rate); 1061 rate);
1061 /* Write PLCP in two parts and timing for packet transfer */ 1062 /* Write PLCP in two parts and timing for packet transfer */
@@ -1121,6 +1122,7 @@ static const u8 *b43legacy_generate_probe_resp(struct b43legacy_wldev *dev,
1121 IEEE80211_STYPE_PROBE_RESP); 1122 IEEE80211_STYPE_PROBE_RESP);
1122 dur = ieee80211_generic_frame_duration(dev->wl->hw, 1123 dur = ieee80211_generic_frame_duration(dev->wl->hw,
1123 dev->wl->vif, 1124 dev->wl->vif,
1125 IEEE80211_BAND_2GHZ,
1124 *dest_size, 1126 *dest_size,
1125 rate); 1127 rate);
1126 hdr->duration_id = dur; 1128 hdr->duration_id = dur;
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 5188fab0b377..a8012f2749ee 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -228,6 +228,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
228 } else { 228 } else {
229 txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw, 229 txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw,
230 info->control.vif, 230 info->control.vif,
231 info->band,
231 fragment_len, 232 fragment_len,
232 rate_fb); 233 rate_fb);
233 } 234 }
@@ -277,19 +278,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
277 phy_ctl |= B43legacy_TX4_PHY_ENC_OFDM; 278 phy_ctl |= B43legacy_TX4_PHY_ENC_OFDM;
278 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 279 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
279 phy_ctl |= B43legacy_TX4_PHY_SHORTPRMBL; 280 phy_ctl |= B43legacy_TX4_PHY_SHORTPRMBL;
280 switch (info->antenna_sel_tx) { 281 phy_ctl |= B43legacy_TX4_PHY_ANTLAST;
281 case 0:
282 phy_ctl |= B43legacy_TX4_PHY_ANTLAST;
283 break;
284 case 1:
285 phy_ctl |= B43legacy_TX4_PHY_ANT0;
286 break;
287 case 2:
288 phy_ctl |= B43legacy_TX4_PHY_ANT1;
289 break;
290 default:
291 B43legacy_BUG_ON(1);
292 }
293 282
294 /* MAC control */ 283 /* MAC control */
295 rates = info->control.rates; 284 rates = info->control.rates;
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index c5104533e24e..b480088b3dbe 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -36,6 +36,15 @@ config BRCMFMAC_SDIO
36 IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to 36 IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to
37 use the driver for a SDIO wireless card. 37 use the driver for a SDIO wireless card.
38 38
39config BRCMFMAC_SDIO_OOB
40 bool "Out of band interrupt support for SDIO interface chipset"
41 depends on BRCMFMAC_SDIO
42 ---help---
43 This option enables out-of-band interrupt support for Broadcom
44 SDIO Wifi chipset using fullmac in order to gain better
45 performance and deep sleep wake up capability on certain
46 platforms. Say N if you are unsure.
47
39config BRCMFMAC_USB 48config BRCMFMAC_USB
40 bool "USB bus interface support for FullMAC driver" 49 bool "USB bus interface support for FullMAC driver"
41 depends on USB 50 depends on USB
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index e925290b432b..4add7da24681 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -39,37 +39,113 @@
39 39
40#define SDIOH_API_ACCESS_RETRY_LIMIT 2 40#define SDIOH_API_ACCESS_RETRY_LIMIT 2
41 41
42static void brcmf_sdioh_irqhandler(struct sdio_func *func) 42#ifdef CONFIG_BRCMFMAC_SDIO_OOB
43static irqreturn_t brcmf_sdio_irqhandler(int irq, void *dev_id)
43{ 44{
44 struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev); 45 struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(dev_id);
45 46
46 brcmf_dbg(TRACE, "***IRQHandler\n"); 47 brcmf_dbg(INTR, "oob intr triggered\n");
47 48
48 sdio_release_host(func); 49 /*
50 * out-of-band interrupt is level-triggered which won't
51 * be cleared until dpc
52 */
53 if (sdiodev->irq_en) {
54 disable_irq_nosync(irq);
55 sdiodev->irq_en = false;
56 }
49 57
50 brcmf_sdbrcm_isr(sdiodev->bus); 58 brcmf_sdbrcm_isr(sdiodev->bus);
51 59
52 sdio_claim_host(func); 60 return IRQ_HANDLED;
61}
62
63int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
64{
65 int ret = 0;
66 u8 data;
67 unsigned long flags;
68
69 brcmf_dbg(TRACE, "Entering\n");
70
71 brcmf_dbg(ERROR, "requesting irq %d\n", sdiodev->irq);
72 ret = request_irq(sdiodev->irq, brcmf_sdio_irqhandler,
73 sdiodev->irq_flags, "brcmf_oob_intr",
74 &sdiodev->func[1]->card->dev);
75 if (ret != 0)
76 return ret;
77 spin_lock_init(&sdiodev->irq_en_lock);
78 spin_lock_irqsave(&sdiodev->irq_en_lock, flags);
79 sdiodev->irq_en = true;
80 spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags);
81
82 ret = enable_irq_wake(sdiodev->irq);
83 if (ret != 0)
84 return ret;
85 sdiodev->irq_wake = true;
86
87 /* must configure SDIO_CCCR_IENx to enable irq */
88 data = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_0,
89 SDIO_CCCR_IENx, &ret);
90 data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
91 brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_0, SDIO_CCCR_IENx,
92 data, &ret);
93
94 /* redirect, configure ane enable io for interrupt signal */
95 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
96 if (sdiodev->irq_flags | IRQF_TRIGGER_HIGH)
97 data |= SDIO_SEPINT_ACT_HI;
98 brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_0, SDIO_CCCR_BRCM_SEPINT,
99 data, &ret);
100
101 return 0;
102}
103
104int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
105{
106 brcmf_dbg(TRACE, "Entering\n");
107
108 brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_0, SDIO_CCCR_BRCM_SEPINT,
109 0, NULL);
110 brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_0, SDIO_CCCR_IENx, 0, NULL);
111
112 if (sdiodev->irq_wake) {
113 disable_irq_wake(sdiodev->irq);
114 sdiodev->irq_wake = false;
115 }
116 free_irq(sdiodev->irq, &sdiodev->func[1]->card->dev);
117 sdiodev->irq_en = false;
118
119 return 0;
120}
121#else /* CONFIG_BRCMFMAC_SDIO_OOB */
122static void brcmf_sdio_irqhandler(struct sdio_func *func)
123{
124 struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
125
126 brcmf_dbg(INTR, "ib intr triggered\n");
127
128 brcmf_sdbrcm_isr(sdiodev->bus);
53} 129}
54 130
55/* dummy handler for SDIO function 2 interrupt */ 131/* dummy handler for SDIO function 2 interrupt */
56static void brcmf_sdioh_dummy_irq_handler(struct sdio_func *func) 132static void brcmf_sdio_dummy_irqhandler(struct sdio_func *func)
57{ 133{
58} 134}
59 135
60int brcmf_sdcard_intr_reg(struct brcmf_sdio_dev *sdiodev) 136int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
61{ 137{
62 brcmf_dbg(TRACE, "Entering\n"); 138 brcmf_dbg(TRACE, "Entering\n");
63 139
64 sdio_claim_host(sdiodev->func[1]); 140 sdio_claim_host(sdiodev->func[1]);
65 sdio_claim_irq(sdiodev->func[1], brcmf_sdioh_irqhandler); 141 sdio_claim_irq(sdiodev->func[1], brcmf_sdio_irqhandler);
66 sdio_claim_irq(sdiodev->func[2], brcmf_sdioh_dummy_irq_handler); 142 sdio_claim_irq(sdiodev->func[2], brcmf_sdio_dummy_irqhandler);
67 sdio_release_host(sdiodev->func[1]); 143 sdio_release_host(sdiodev->func[1]);
68 144
69 return 0; 145 return 0;
70} 146}
71 147
72int brcmf_sdcard_intr_dereg(struct brcmf_sdio_dev *sdiodev) 148int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
73{ 149{
74 brcmf_dbg(TRACE, "Entering\n"); 150 brcmf_dbg(TRACE, "Entering\n");
75 151
@@ -80,6 +156,7 @@ int brcmf_sdcard_intr_dereg(struct brcmf_sdio_dev *sdiodev)
80 156
81 return 0; 157 return 0;
82} 158}
159#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
83 160
84u8 brcmf_sdcard_cfg_read(struct brcmf_sdio_dev *sdiodev, uint fnc_num, u32 addr, 161u8 brcmf_sdcard_cfg_read(struct brcmf_sdio_dev *sdiodev, uint fnc_num, u32 addr,
85 int *err) 162 int *err)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 4688904908ec..dd07d33a927c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -27,6 +27,7 @@
27#include <linux/errno.h> 27#include <linux/errno.h>
28#include <linux/sched.h> /* request_irq() */ 28#include <linux/sched.h> /* request_irq() */
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/platform_device.h>
30#include <net/cfg80211.h> 31#include <net/cfg80211.h>
31 32
32#include <defs.h> 33#include <defs.h>
@@ -55,6 +56,15 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
55}; 56};
56MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); 57MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
57 58
59#ifdef CONFIG_BRCMFMAC_SDIO_OOB
60static struct list_head oobirq_lh;
61struct brcmf_sdio_oobirq {
62 unsigned int irq;
63 unsigned long flags;
64 struct list_head list;
65};
66#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
67
58static bool 68static bool
59brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev) 69brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
60{ 70{
@@ -107,10 +117,17 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
107 } 117 }
108 sdio_release_host(sdfunc); 118 sdio_release_host(sdfunc);
109 } 119 }
110 } else if (regaddr == SDIO_CCCR_ABORT) { 120 } else if ((regaddr == SDIO_CCCR_ABORT) ||
121 (regaddr == SDIO_CCCR_IENx)) {
122 sdfunc = kmemdup(sdiodev->func[0], sizeof(struct sdio_func),
123 GFP_KERNEL);
124 if (!sdfunc)
125 return -ENOMEM;
126 sdfunc->num = 0;
111 sdio_claim_host(sdfunc); 127 sdio_claim_host(sdfunc);
112 sdio_writeb(sdfunc, *byte, regaddr, &err_ret); 128 sdio_writeb(sdfunc, *byte, regaddr, &err_ret);
113 sdio_release_host(sdfunc); 129 sdio_release_host(sdfunc);
130 kfree(sdfunc);
114 } else if (regaddr < 0xF0) { 131 } else if (regaddr < 0xF0) {
115 brcmf_dbg(ERROR, "F0 Wr:0x%02x: write disallowed\n", regaddr); 132 brcmf_dbg(ERROR, "F0 Wr:0x%02x: write disallowed\n", regaddr);
116 err_ret = -EPERM; 133 err_ret = -EPERM;
@@ -461,12 +478,40 @@ void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev)
461 478
462} 479}
463 480
481#ifdef CONFIG_BRCMFMAC_SDIO_OOB
482static int brcmf_sdio_getintrcfg(struct brcmf_sdio_dev *sdiodev)
483{
484 struct brcmf_sdio_oobirq *oobirq_entry;
485
486 if (list_empty(&oobirq_lh)) {
487 brcmf_dbg(ERROR, "no valid oob irq resource\n");
488 return -ENXIO;
489 }
490
491 oobirq_entry = list_first_entry(&oobirq_lh, struct brcmf_sdio_oobirq,
492 list);
493
494 sdiodev->irq = oobirq_entry->irq;
495 sdiodev->irq_flags = oobirq_entry->flags;
496 list_del(&oobirq_entry->list);
497 kfree(oobirq_entry);
498
499 return 0;
500}
501#else
502static inline int brcmf_sdio_getintrcfg(struct brcmf_sdio_dev *sdiodev)
503{
504 return 0;
505}
506#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
507
464static int brcmf_ops_sdio_probe(struct sdio_func *func, 508static int brcmf_ops_sdio_probe(struct sdio_func *func,
465 const struct sdio_device_id *id) 509 const struct sdio_device_id *id)
466{ 510{
467 int ret = 0; 511 int ret = 0;
468 struct brcmf_sdio_dev *sdiodev; 512 struct brcmf_sdio_dev *sdiodev;
469 struct brcmf_bus *bus_if; 513 struct brcmf_bus *bus_if;
514
470 brcmf_dbg(TRACE, "Enter\n"); 515 brcmf_dbg(TRACE, "Enter\n");
471 brcmf_dbg(TRACE, "func->class=%x\n", func->class); 516 brcmf_dbg(TRACE, "func->class=%x\n", func->class);
472 brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor); 517 brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor);
@@ -486,7 +531,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
486 kfree(bus_if); 531 kfree(bus_if);
487 return -ENOMEM; 532 return -ENOMEM;
488 } 533 }
489 sdiodev->func[0] = func->card->sdio_func[0]; 534 sdiodev->func[0] = func;
490 sdiodev->func[1] = func; 535 sdiodev->func[1] = func;
491 sdiodev->bus_if = bus_if; 536 sdiodev->bus_if = bus_if;
492 bus_if->bus_priv.sdio = sdiodev; 537 bus_if->bus_priv.sdio = sdiodev;
@@ -505,6 +550,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
505 sdiodev = dev_get_drvdata(&func->card->dev); 550 sdiodev = dev_get_drvdata(&func->card->dev);
506 if ((!sdiodev) || (sdiodev->func[1]->card != func->card)) 551 if ((!sdiodev) || (sdiodev->func[1]->card != func->card))
507 return -ENODEV; 552 return -ENODEV;
553
554 ret = brcmf_sdio_getintrcfg(sdiodev);
555 if (ret)
556 return ret;
508 sdiodev->func[2] = func; 557 sdiodev->func[2] = func;
509 558
510 bus_if = sdiodev->bus_if; 559 bus_if = sdiodev->bus_if;
@@ -597,6 +646,65 @@ static struct sdio_driver brcmf_sdmmc_driver = {
597#endif /* CONFIG_PM_SLEEP */ 646#endif /* CONFIG_PM_SLEEP */
598}; 647};
599 648
649#ifdef CONFIG_BRCMFMAC_SDIO_OOB
650static int brcmf_sdio_pd_probe(struct platform_device *pdev)
651{
652 struct resource *res;
653 struct brcmf_sdio_oobirq *oobirq_entry;
654 int i, ret;
655
656 INIT_LIST_HEAD(&oobirq_lh);
657
658 for (i = 0; ; i++) {
659 res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
660 if (!res)
661 break;
662
663 oobirq_entry = kzalloc(sizeof(struct brcmf_sdio_oobirq),
664 GFP_KERNEL);
665 oobirq_entry->irq = res->start;
666 oobirq_entry->flags = res->flags & IRQF_TRIGGER_MASK;
667 list_add_tail(&oobirq_entry->list, &oobirq_lh);
668 }
669 if (i == 0)
670 return -ENXIO;
671
672 ret = sdio_register_driver(&brcmf_sdmmc_driver);
673
674 if (ret)
675 brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret);
676
677 return ret;
678}
679
680static struct platform_driver brcmf_sdio_pd = {
681 .probe = brcmf_sdio_pd_probe,
682 .driver = {
683 .name = "brcmf_sdio_pd"
684 }
685};
686
687void brcmf_sdio_exit(void)
688{
689 brcmf_dbg(TRACE, "Enter\n");
690
691 sdio_unregister_driver(&brcmf_sdmmc_driver);
692
693 platform_driver_unregister(&brcmf_sdio_pd);
694}
695
696void brcmf_sdio_init(void)
697{
698 int ret;
699
700 brcmf_dbg(TRACE, "Enter\n");
701
702 ret = platform_driver_register(&brcmf_sdio_pd);
703
704 if (ret)
705 brcmf_dbg(ERROR, "platform_driver_register failed: %d\n", ret);
706}
707#else
600void brcmf_sdio_exit(void) 708void brcmf_sdio_exit(void)
601{ 709{
602 brcmf_dbg(TRACE, "Enter\n"); 710 brcmf_dbg(TRACE, "Enter\n");
@@ -615,3 +723,4 @@ void brcmf_sdio_init(void)
615 if (ret) 723 if (ret)
616 brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret); 724 brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret);
617} 725}
726#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 07686a748d3c..9f637014486e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -632,7 +632,6 @@ extern const struct bcmevent_name bcmevent_names[];
632extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen, 632extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
633 char *buf, uint len); 633 char *buf, uint len);
634 634
635extern int brcmf_net_attach(struct brcmf_pub *drvr, int idx);
636extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev); 635extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
637 636
638extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len); 637extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index b3e3b7f25d82..a5c15cac5e7d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -421,6 +421,7 @@ int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
421 pktbuf->priority = h->priority & BDC_PRIORITY_MASK; 421 pktbuf->priority = h->priority & BDC_PRIORITY_MASK;
422 422
423 skb_pull(pktbuf, BDC_HEADER_LEN); 423 skb_pull(pktbuf, BDC_HEADER_LEN);
424 skb_pull(pktbuf, h->data_offset << 2);
424 425
425 return 0; 426 return 0;
426} 427}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 4187435220f3..236cb9fa460c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -799,7 +799,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
799{ 799{
800 char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; /* Room for 800 char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; /* Room for
801 "event_msgs" + '\0' + bitvec */ 801 "event_msgs" + '\0' + bitvec */
802 uint up = 0;
803 char buf[128], *ptr; 802 char buf[128], *ptr;
804 u32 dongle_align = drvr->bus_if->align; 803 u32 dongle_align = drvr->bus_if->align;
805 u32 glom = 0; 804 u32 glom = 0;
@@ -853,9 +852,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
853 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, 852 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
854 sizeof(iovbuf)); 853 sizeof(iovbuf));
855 854
856 /* Force STA UP */
857 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_UP, (char *)&up, sizeof(up));
858
859 /* Setup event_msgs */ 855 /* Setup event_msgs */
860 brcmf_c_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN, 856 brcmf_c_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN,
861 iovbuf, sizeof(iovbuf)); 857 iovbuf, sizeof(iovbuf));
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 2a1e5ae0c402..8933f9b31a9a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -799,6 +799,7 @@ static int brcmf_netdev_open(struct net_device *ndev)
799 struct brcmf_bus *bus_if = drvr->bus_if; 799 struct brcmf_bus *bus_if = drvr->bus_if;
800 u32 toe_ol; 800 u32 toe_ol;
801 s32 ret = 0; 801 s32 ret = 0;
802 uint up = 0;
802 803
803 brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx); 804 brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
804 805
@@ -822,6 +823,10 @@ static int brcmf_netdev_open(struct net_device *ndev)
822 drvr->iflist[ifp->idx]->ndev->features &= 823 drvr->iflist[ifp->idx]->ndev->features &=
823 ~NETIF_F_IP_CSUM; 824 ~NETIF_F_IP_CSUM;
824 } 825 }
826
827 /* make sure RF is ready for work */
828 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_UP, (char *)&up, sizeof(up));
829
825 /* Allow transmit calls */ 830 /* Allow transmit calls */
826 netif_start_queue(ndev); 831 netif_start_queue(ndev);
827 drvr->bus_if->drvr_up = true; 832 drvr->bus_if->drvr_up = true;
@@ -843,6 +848,63 @@ static const struct net_device_ops brcmf_netdev_ops_pri = {
843 .ndo_set_rx_mode = brcmf_netdev_set_multicast_list 848 .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
844}; 849};
845 850
851static int brcmf_net_attach(struct brcmf_if *ifp)
852{
853 struct brcmf_pub *drvr = ifp->drvr;
854 struct net_device *ndev;
855 u8 temp_addr[ETH_ALEN];
856
857 brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
858
859 ndev = drvr->iflist[ifp->idx]->ndev;
860 ndev->netdev_ops = &brcmf_netdev_ops_pri;
861
862 /*
863 * determine mac address to use
864 */
865 if (is_valid_ether_addr(ifp->mac_addr))
866 memcpy(temp_addr, ifp->mac_addr, ETH_ALEN);
867 else
868 memcpy(temp_addr, drvr->mac, ETH_ALEN);
869
870 if (ifp->idx == 1) {
871 brcmf_dbg(TRACE, "ACCESS POINT MAC:\n");
872 /* ACCESSPOINT INTERFACE CASE */
873 temp_addr[0] |= 0X02; /* set bit 2 ,
874 - Locally Administered address */
875
876 }
877 ndev->hard_header_len = ETH_HLEN + drvr->hdrlen;
878 ndev->ethtool_ops = &brcmf_ethtool_ops;
879
880 drvr->rxsz = ndev->mtu + ndev->hard_header_len +
881 drvr->hdrlen;
882
883 memcpy(ndev->dev_addr, temp_addr, ETH_ALEN);
884
885 /* attach to cfg80211 for primary interface */
886 if (!ifp->idx) {
887 drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr);
888 if (drvr->config == NULL) {
889 brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
890 goto fail;
891 }
892 }
893
894 if (register_netdev(ndev) != 0) {
895 brcmf_dbg(ERROR, "couldn't register the net device\n");
896 goto fail;
897 }
898
899 brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
900
901 return 0;
902
903fail:
904 ndev->netdev_ops = NULL;
905 return -EBADE;
906}
907
846int 908int
847brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr) 909brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr)
848{ 910{
@@ -882,7 +944,7 @@ brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr)
882 if (mac_addr != NULL) 944 if (mac_addr != NULL)
883 memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN); 945 memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN);
884 946
885 if (brcmf_net_attach(drvr, ifp->idx)) { 947 if (brcmf_net_attach(ifp)) {
886 brcmf_dbg(ERROR, "brcmf_net_attach failed"); 948 brcmf_dbg(ERROR, "brcmf_net_attach failed");
887 free_netdev(ifp->ndev); 949 free_netdev(ifp->ndev);
888 drvr->iflist[ifidx] = NULL; 950 drvr->iflist[ifidx] = NULL;
@@ -1016,69 +1078,16 @@ int brcmf_bus_start(struct device *dev)
1016 if (ret < 0) 1078 if (ret < 0)
1017 return ret; 1079 return ret;
1018 1080
1081 /* add primary networking interface */
1082 ret = brcmf_add_if(dev, 0, "wlan%d", drvr->mac);
1083 if (ret < 0)
1084 return ret;
1085
1019 /* signal bus ready */ 1086 /* signal bus ready */
1020 bus_if->state = BRCMF_BUS_DATA; 1087 bus_if->state = BRCMF_BUS_DATA;
1021 return 0; 1088 return 0;
1022} 1089}
1023 1090
1024int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
1025{
1026 struct net_device *ndev;
1027 u8 temp_addr[ETH_ALEN] = {
1028 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33};
1029
1030 brcmf_dbg(TRACE, "ifidx %d\n", ifidx);
1031
1032 ndev = drvr->iflist[ifidx]->ndev;
1033 ndev->netdev_ops = &brcmf_netdev_ops_pri;
1034
1035 /*
1036 * We have to use the primary MAC for virtual interfaces
1037 */
1038 if (ifidx != 0) {
1039 /* for virtual interfaces use the primary MAC */
1040 memcpy(temp_addr, drvr->mac, ETH_ALEN);
1041
1042 }
1043
1044 if (ifidx == 1) {
1045 brcmf_dbg(TRACE, "ACCESS POINT MAC:\n");
1046 /* ACCESSPOINT INTERFACE CASE */
1047 temp_addr[0] |= 0X02; /* set bit 2 ,
1048 - Locally Administered address */
1049
1050 }
1051 ndev->hard_header_len = ETH_HLEN + drvr->hdrlen;
1052 ndev->ethtool_ops = &brcmf_ethtool_ops;
1053
1054 drvr->rxsz = ndev->mtu + ndev->hard_header_len +
1055 drvr->hdrlen;
1056
1057 memcpy(ndev->dev_addr, temp_addr, ETH_ALEN);
1058
1059 /* attach to cfg80211 for primary interface */
1060 if (!ifidx) {
1061 drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr);
1062 if (drvr->config == NULL) {
1063 brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
1064 goto fail;
1065 }
1066 }
1067
1068 if (register_netdev(ndev) != 0) {
1069 brcmf_dbg(ERROR, "couldn't register the net device\n");
1070 goto fail;
1071 }
1072
1073 brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
1074
1075 return 0;
1076
1077fail:
1078 ndev->netdev_ops = NULL;
1079 return -EBADE;
1080}
1081
1082static void brcmf_bus_detach(struct brcmf_pub *drvr) 1091static void brcmf_bus_detach(struct brcmf_pub *drvr)
1083{ 1092{
1084 brcmf_dbg(TRACE, "Enter\n"); 1093 brcmf_dbg(TRACE, "Enter\n");
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 2bf5dda29291..149ee67beb2e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -574,6 +574,8 @@ struct brcmf_sdio {
574 574
575 struct task_struct *dpc_tsk; 575 struct task_struct *dpc_tsk;
576 struct completion dpc_wait; 576 struct completion dpc_wait;
577 struct list_head dpc_tsklst;
578 spinlock_t dpc_tl_lock;
577 579
578 struct semaphore sdsem; 580 struct semaphore sdsem;
579 581
@@ -2350,6 +2352,24 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
2350 up(&bus->sdsem); 2352 up(&bus->sdsem);
2351} 2353}
2352 2354
2355#ifdef CONFIG_BRCMFMAC_SDIO_OOB
2356static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
2357{
2358 unsigned long flags;
2359
2360 spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags);
2361 if (!bus->sdiodev->irq_en && !bus->ipend) {
2362 enable_irq(bus->sdiodev->irq);
2363 bus->sdiodev->irq_en = true;
2364 }
2365 spin_unlock_irqrestore(&bus->sdiodev->irq_en_lock, flags);
2366}
2367#else
2368static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
2369{
2370}
2371#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
2372
2353static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) 2373static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2354{ 2374{
2355 u32 intstatus, newstatus = 0; 2375 u32 intstatus, newstatus = 0;
@@ -2507,6 +2527,8 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2507 bus->intstatus = intstatus; 2527 bus->intstatus = intstatus;
2508 2528
2509clkwait: 2529clkwait:
2530 brcmf_sdbrcm_clrintr(bus);
2531
2510 if (data_ok(bus) && bus->ctrl_frame_stat && 2532 if (data_ok(bus) && bus->ctrl_frame_stat &&
2511 (bus->clkstate == CLK_AVAIL)) { 2533 (bus->clkstate == CLK_AVAIL)) {
2512 int ret, i; 2534 int ret, i;
@@ -2594,29 +2616,59 @@ clkwait:
2594 return resched; 2616 return resched;
2595} 2617}
2596 2618
2619static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
2620{
2621 struct list_head *new_hd;
2622 unsigned long flags;
2623
2624 if (in_interrupt())
2625 new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
2626 else
2627 new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL);
2628 if (new_hd == NULL)
2629 return;
2630
2631 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2632 list_add_tail(new_hd, &bus->dpc_tsklst);
2633 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2634}
2635
2597static int brcmf_sdbrcm_dpc_thread(void *data) 2636static int brcmf_sdbrcm_dpc_thread(void *data)
2598{ 2637{
2599 struct brcmf_sdio *bus = (struct brcmf_sdio *) data; 2638 struct brcmf_sdio *bus = (struct brcmf_sdio *) data;
2639 struct list_head *cur_hd, *tmp_hd;
2640 unsigned long flags;
2600 2641
2601 allow_signal(SIGTERM); 2642 allow_signal(SIGTERM);
2602 /* Run until signal received */ 2643 /* Run until signal received */
2603 while (1) { 2644 while (1) {
2604 if (kthread_should_stop()) 2645 if (kthread_should_stop())
2605 break; 2646 break;
2606 if (!wait_for_completion_interruptible(&bus->dpc_wait)) { 2647
2607 /* Call bus dpc unless it indicated down 2648 if (list_empty(&bus->dpc_tsklst))
2608 (then clean stop) */ 2649 if (wait_for_completion_interruptible(&bus->dpc_wait))
2609 if (bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN) { 2650 break;
2610 if (brcmf_sdbrcm_dpc(bus)) 2651
2611 complete(&bus->dpc_wait); 2652 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2612 } else { 2653 list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) {
2654 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2655
2656 if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
2613 /* after stopping the bus, exit thread */ 2657 /* after stopping the bus, exit thread */
2614 brcmf_sdbrcm_bus_stop(bus->sdiodev->dev); 2658 brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
2615 bus->dpc_tsk = NULL; 2659 bus->dpc_tsk = NULL;
2660 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2616 break; 2661 break;
2617 } 2662 }
2618 } else 2663
2619 break; 2664 if (brcmf_sdbrcm_dpc(bus))
2665 brcmf_sdbrcm_adddpctsk(bus);
2666
2667 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2668 list_del(cur_hd);
2669 kfree(cur_hd);
2670 }
2671 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2620 } 2672 }
2621 return 0; 2673 return 0;
2622} 2674}
@@ -2669,8 +2721,10 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2669 /* Schedule DPC if needed to send queued packet(s) */ 2721 /* Schedule DPC if needed to send queued packet(s) */
2670 if (!bus->dpc_sched) { 2722 if (!bus->dpc_sched) {
2671 bus->dpc_sched = true; 2723 bus->dpc_sched = true;
2672 if (bus->dpc_tsk) 2724 if (bus->dpc_tsk) {
2725 brcmf_sdbrcm_adddpctsk(bus);
2673 complete(&bus->dpc_wait); 2726 complete(&bus->dpc_wait);
2727 }
2674 } 2728 }
2675 2729
2676 return ret; 2730 return ret;
@@ -3474,8 +3528,14 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
3474 brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, 3528 brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
3475 SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err); 3529 SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
3476 3530
3531 if (ret == 0) {
3532 ret = brcmf_sdio_intr_register(bus->sdiodev);
3533 if (ret != 0)
3534 brcmf_dbg(ERROR, "intr register failed:%d\n", ret);
3535 }
3536
3477 /* If we didn't come up, turn off backplane clock */ 3537 /* If we didn't come up, turn off backplane clock */
3478 if (!ret) 3538 if (bus_if->state != BRCMF_BUS_DATA)
3479 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false); 3539 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
3480 3540
3481exit: 3541exit:
@@ -3514,8 +3574,10 @@ void brcmf_sdbrcm_isr(void *arg)
3514 brcmf_dbg(ERROR, "isr w/o interrupt configured!\n"); 3574 brcmf_dbg(ERROR, "isr w/o interrupt configured!\n");
3515 3575
3516 bus->dpc_sched = true; 3576 bus->dpc_sched = true;
3517 if (bus->dpc_tsk) 3577 if (bus->dpc_tsk) {
3578 brcmf_sdbrcm_adddpctsk(bus);
3518 complete(&bus->dpc_wait); 3579 complete(&bus->dpc_wait);
3580 }
3519} 3581}
3520 3582
3521static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus) 3583static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
@@ -3559,8 +3621,10 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3559 bus->ipend = true; 3621 bus->ipend = true;
3560 3622
3561 bus->dpc_sched = true; 3623 bus->dpc_sched = true;
3562 if (bus->dpc_tsk) 3624 if (bus->dpc_tsk) {
3625 brcmf_sdbrcm_adddpctsk(bus);
3563 complete(&bus->dpc_wait); 3626 complete(&bus->dpc_wait);
3627 }
3564 } 3628 }
3565 } 3629 }
3566 3630
@@ -3829,7 +3893,7 @@ static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
3829 3893
3830 if (bus) { 3894 if (bus) {
3831 /* De-register interrupt handler */ 3895 /* De-register interrupt handler */
3832 brcmf_sdcard_intr_dereg(bus->sdiodev); 3896 brcmf_sdio_intr_unregister(bus->sdiodev);
3833 3897
3834 if (bus->sdiodev->bus_if->drvr) { 3898 if (bus->sdiodev->bus_if->drvr) {
3835 brcmf_detach(bus->sdiodev->dev); 3899 brcmf_detach(bus->sdiodev->dev);
@@ -3897,6 +3961,8 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3897 } 3961 }
3898 /* Initialize DPC thread */ 3962 /* Initialize DPC thread */
3899 init_completion(&bus->dpc_wait); 3963 init_completion(&bus->dpc_wait);
3964 INIT_LIST_HEAD(&bus->dpc_tsklst);
3965 spin_lock_init(&bus->dpc_tl_lock);
3900 bus->dpc_tsk = kthread_run(brcmf_sdbrcm_dpc_thread, 3966 bus->dpc_tsk = kthread_run(brcmf_sdbrcm_dpc_thread,
3901 bus, "brcmf_dpc"); 3967 bus, "brcmf_dpc");
3902 if (IS_ERR(bus->dpc_tsk)) { 3968 if (IS_ERR(bus->dpc_tsk)) {
@@ -3928,15 +3994,6 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3928 goto fail; 3994 goto fail;
3929 } 3995 }
3930 3996
3931 /* Register interrupt callback, but mask it (not operational yet). */
3932 brcmf_dbg(INTR, "disable SDIO interrupts (not interested yet)\n");
3933 ret = brcmf_sdcard_intr_reg(bus->sdiodev);
3934 if (ret != 0) {
3935 brcmf_dbg(ERROR, "FAILED: sdcard_intr_reg returned %d\n", ret);
3936 goto fail;
3937 }
3938 brcmf_dbg(INTR, "registered SDIO interrupt function ok\n");
3939
3940 brcmf_dbg(INFO, "completed!!\n"); 3997 brcmf_dbg(INFO, "completed!!\n");
3941 3998
3942 /* if firmware path present try to download and bring up bus */ 3999 /* if firmware path present try to download and bring up bus */
@@ -3948,12 +4005,6 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3948 } 4005 }
3949 } 4006 }
3950 4007
3951 /* add interface and open for business */
3952 if (brcmf_add_if(bus->sdiodev->dev, 0, "wlan%d", NULL)) {
3953 brcmf_dbg(ERROR, "Add primary net device interface failed!!\n");
3954 goto fail;
3955 }
3956
3957 return bus; 4008 return bus;
3958 4009
3959fail: 4010fail:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 0281d207d998..7010eaf71f99 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -43,6 +43,13 @@
43/* as of sdiod rev 0, supports 3 functions */ 43/* as of sdiod rev 0, supports 3 functions */
44#define SBSDIO_NUM_FUNCTION 3 44#define SBSDIO_NUM_FUNCTION 3
45 45
46/* function 0 vendor specific CCCR registers */
47#define SDIO_CCCR_BRCM_SEPINT 0xf2
48
49#define SDIO_SEPINT_MASK 0x01
50#define SDIO_SEPINT_OE 0x02
51#define SDIO_SEPINT_ACT_HI 0x04
52
46/* function 1 miscellaneous registers */ 53/* function 1 miscellaneous registers */
47 54
48/* sprom command and status */ 55/* sprom command and status */
@@ -144,13 +151,18 @@ struct brcmf_sdio_dev {
144 wait_queue_head_t request_buffer_wait; 151 wait_queue_head_t request_buffer_wait;
145 struct device *dev; 152 struct device *dev;
146 struct brcmf_bus *bus_if; 153 struct brcmf_bus *bus_if;
154#ifdef CONFIG_BRCMFMAC_SDIO_OOB
155 unsigned int irq; /* oob interrupt number */
156 unsigned long irq_flags; /* board specific oob flags */
157 bool irq_en; /* irq enable flags */
158 spinlock_t irq_en_lock;
159 bool irq_wake; /* irq wake enable flags */
160#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
147}; 161};
148 162
149/* Register/deregister device interrupt handler. */ 163/* Register/deregister interrupt handler. */
150extern int 164extern int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
151brcmf_sdcard_intr_reg(struct brcmf_sdio_dev *sdiodev); 165extern int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
152
153extern int brcmf_sdcard_intr_dereg(struct brcmf_sdio_dev *sdiodev);
154 166
155/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface). 167/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface).
156 * fn: function number 168 * fn: function number
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 82364223e817..1d67ecf681b7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -1383,14 +1383,6 @@ static int brcmf_usb_probe_cb(struct device *dev, const char *desc,
1383 goto fail; 1383 goto fail;
1384 } 1384 }
1385 1385
1386 /* add interface and open for business */
1387 ret = brcmf_add_if(dev, 0, "wlan%d", NULL);
1388 if (ret) {
1389 brcmf_dbg(ERROR, "Add primary net device interface failed!!\n");
1390 brcmf_detach(dev);
1391 goto fail;
1392 }
1393
1394 return 0; 1386 return 0;
1395fail: 1387fail:
1396 /* Release resources in reverse order */ 1388 /* Release resources in reverse order */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index 55e9f45fce22..0efe88e25a9a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -628,6 +628,40 @@ brcms_c_country_aggregate_map(struct brcms_cm_info *wlc_cm, const char *ccode,
628 return false; 628 return false;
629} 629}
630 630
631/*
632 * Indicates whether the country provided is valid to pass
633 * to cfg80211 or not.
634 *
635 * returns true if valid; false if not.
636 */
637static bool brcms_c_country_valid(const char *ccode)
638{
639 /*
640 * only allow ascii alpha uppercase for the first 2
641 * chars.
642 */
643 if (!((0x80 & ccode[0]) == 0 && ccode[0] >= 0x41 && ccode[0] <= 0x5A &&
644 (0x80 & ccode[1]) == 0 && ccode[1] >= 0x41 && ccode[1] <= 0x5A &&
645 ccode[2] == '\0'))
646 return false;
647
648 /*
649 * do not match ISO 3166-1 user assigned country codes
650 * that may be in the driver table
651 */
652 if (!strcmp("AA", ccode) || /* AA */
653 !strcmp("ZZ", ccode) || /* ZZ */
654 ccode[0] == 'X' || /* XA - XZ */
655 (ccode[0] == 'Q' && /* QM - QZ */
656 (ccode[1] >= 'M' && ccode[1] <= 'Z')))
657 return false;
658
659 if (!strcmp("NA", ccode))
660 return false;
661
662 return true;
663}
664
631/* Lookup a country info structure from a null terminated country 665/* Lookup a country info structure from a null terminated country
632 * abbreviation and regrev directly with no translation. 666 * abbreviation and regrev directly with no translation.
633 */ 667 */
@@ -1089,7 +1123,7 @@ struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
1089 1123
1090 /* store the country code for passing up as a regulatory hint */ 1124 /* store the country code for passing up as a regulatory hint */
1091 ccode = getvar(wlc->hw->sih, BRCMS_SROM_CCODE); 1125 ccode = getvar(wlc->hw->sih, BRCMS_SROM_CCODE);
1092 if (ccode) 1126 if (ccode && brcms_c_country_valid(ccode))
1093 strncpy(wlc->pub->srom_ccode, ccode, BRCM_CNTRY_BUF_SZ - 1); 1127 strncpy(wlc->pub->srom_ccode, ccode, BRCM_CNTRY_BUF_SZ - 1);
1094 1128
1095 /* 1129 /*
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/d11.h b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
index 1948cb2771e9..3f659e09f1cc 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/d11.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
@@ -733,7 +733,7 @@ struct cck_phy_hdr {
733 do { \ 733 do { \
734 plcp[1] = len & 0xff; \ 734 plcp[1] = len & 0xff; \
735 plcp[2] = ((len >> 8) & 0xff); \ 735 plcp[2] = ((len >> 8) & 0xff); \
736 } while (0); 736 } while (0)
737 737
738#define BRCMS_SET_MIMO_PLCP_AMPDU(plcp) (plcp[3] |= MIMO_PLCP_AMPDU) 738#define BRCMS_SET_MIMO_PLCP_AMPDU(plcp) (plcp[3] |= MIMO_PLCP_AMPDU)
739#define BRCMS_CLR_MIMO_PLCP_AMPDU(plcp) (plcp[3] &= ~MIMO_PLCP_AMPDU) 739#define BRCMS_CLR_MIMO_PLCP_AMPDU(plcp) (plcp[3] &= ~MIMO_PLCP_AMPDU)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 569ab8abd2a1..aa15558f75c8 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -1069,11 +1069,7 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
1069 wiphy_err(wl->wiphy, "%s: ieee80211_register_hw failed, status" 1069 wiphy_err(wl->wiphy, "%s: ieee80211_register_hw failed, status"
1070 "%d\n", __func__, err); 1070 "%d\n", __func__, err);
1071 1071
1072 if (wl->pub->srom_ccode[0]) 1072 if (wl->pub->srom_ccode[0] && brcms_set_hint(wl, wl->pub->srom_ccode))
1073 err = brcms_set_hint(wl, wl->pub->srom_ccode);
1074 else
1075 err = brcms_set_hint(wl, "US");
1076 if (err)
1077 wiphy_err(wl->wiphy, "%s: regulatory_hint failed, status %d\n", 1073 wiphy_err(wl->wiphy, "%s: regulatory_hint failed, status %d\n",
1078 __func__, err); 1074 __func__, err);
1079 1075
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 231ddf4a674f..b4d92792c502 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -847,8 +847,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
847 */ 847 */
848 if (!(txs->status & TX_STATUS_AMPDU) 848 if (!(txs->status & TX_STATUS_AMPDU)
849 && (txs->status & TX_STATUS_INTERMEDIATE)) { 849 && (txs->status & TX_STATUS_INTERMEDIATE)) {
850 wiphy_err(wlc->wiphy, "%s: INTERMEDIATE but not AMPDU\n", 850 BCMMSG(wlc->wiphy, "INTERMEDIATE but not AMPDU\n");
851 __func__);
852 return false; 851 return false;
853 } 852 }
854 853
@@ -7614,6 +7613,7 @@ brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
7614{ 7613{
7615 int len_mpdu; 7614 int len_mpdu;
7616 struct ieee80211_rx_status rx_status; 7615 struct ieee80211_rx_status rx_status;
7616 struct ieee80211_hdr *hdr;
7617 7617
7618 memset(&rx_status, 0, sizeof(rx_status)); 7618 memset(&rx_status, 0, sizeof(rx_status));
7619 prep_mac80211_status(wlc, rxh, p, &rx_status); 7619 prep_mac80211_status(wlc, rxh, p, &rx_status);
@@ -7623,6 +7623,13 @@ brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
7623 skb_pull(p, D11_PHY_HDR_LEN); 7623 skb_pull(p, D11_PHY_HDR_LEN);
7624 __skb_trim(p, len_mpdu); 7624 __skb_trim(p, len_mpdu);
7625 7625
7626 /* unmute transmit */
7627 if (wlc->hw->suspended_fifos) {
7628 hdr = (struct ieee80211_hdr *)p->data;
7629 if (ieee80211_is_beacon(hdr->frame_control))
7630 brcms_b_mute(wlc->hw, false);
7631 }
7632
7626 memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status)); 7633 memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status));
7627 ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p); 7634 ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p);
7628} 7635}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index ce8562aa5db0..0fce56235f38 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -207,8 +207,7 @@ static const iqcal_gain_params_lcnphy *tbl_iqcal_gainparams_lcnphy[1] = {
207}; 207};
208 208
209static const u16 iqcal_gainparams_numgains_lcnphy[1] = { 209static const u16 iqcal_gainparams_numgains_lcnphy[1] = {
210 sizeof(tbl_iqcal_gainparams_lcnphy_2G) / 210 ARRAY_SIZE(tbl_iqcal_gainparams_lcnphy_2G),
211 sizeof(*tbl_iqcal_gainparams_lcnphy_2G),
212}; 211};
213 212
214static const struct lcnphy_sfo_cfg lcnphy_sfo_cfg[] = { 213static const struct lcnphy_sfo_cfg lcnphy_sfo_cfg[] = {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
index 39095741fd05..812b6e38526e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
@@ -16353,11 +16353,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16353 wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX, 16353 wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX,
16354 rfseq_rx2tx_events_rev3_ipa, 16354 rfseq_rx2tx_events_rev3_ipa,
16355 rfseq_rx2tx_dlys_rev3_ipa, 16355 rfseq_rx2tx_dlys_rev3_ipa,
16356 sizeof 16356 ARRAY_SIZE(rfseq_rx2tx_events_rev3_ipa));
16357 (rfseq_rx2tx_events_rev3_ipa) /
16358 sizeof
16359 (rfseq_rx2tx_events_rev3_ipa
16360 [0]));
16361 16357
16362 mod_phy_reg(pi, 0x299, (0x3 << 14), (0x1 << 14)); 16358 mod_phy_reg(pi, 0x299, (0x3 << 14), (0x1 << 14));
16363 mod_phy_reg(pi, 0x29d, (0x3 << 14), (0x1 << 14)); 16359 mod_phy_reg(pi, 0x29d, (0x3 << 14), (0x1 << 14));
@@ -16858,18 +16854,13 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16858 wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_TX2RX, 16854 wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_TX2RX,
16859 rfseq_tx2rx_events_rev3, 16855 rfseq_tx2rx_events_rev3,
16860 rfseq_tx2rx_dlys_rev3, 16856 rfseq_tx2rx_dlys_rev3,
16861 sizeof(rfseq_tx2rx_events_rev3) / 16857 ARRAY_SIZE(rfseq_tx2rx_events_rev3));
16862 sizeof(rfseq_tx2rx_events_rev3[0]));
16863 16858
16864 if (PHY_IPA(pi)) 16859 if (PHY_IPA(pi))
16865 wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX, 16860 wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX,
16866 rfseq_rx2tx_events_rev3_ipa, 16861 rfseq_rx2tx_events_rev3_ipa,
16867 rfseq_rx2tx_dlys_rev3_ipa, 16862 rfseq_rx2tx_dlys_rev3_ipa,
16868 sizeof 16863 ARRAY_SIZE(rfseq_rx2tx_events_rev3_ipa));
16869 (rfseq_rx2tx_events_rev3_ipa) /
16870 sizeof
16871 (rfseq_rx2tx_events_rev3_ipa
16872 [0]));
16873 16864
16874 if ((pi->sh->hw_phyrxchain != 0x3) && 16865 if ((pi->sh->hw_phyrxchain != 0x3) &&
16875 (pi->sh->hw_phyrxchain != pi->sh->hw_phytxchain)) { 16866 (pi->sh->hw_phyrxchain != pi->sh->hw_phytxchain)) {
@@ -16885,8 +16876,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16885 pi, NPHY_RFSEQ_RX2TX, 16876 pi, NPHY_RFSEQ_RX2TX,
16886 rfseq_rx2tx_events_rev3, 16877 rfseq_rx2tx_events_rev3,
16887 rfseq_rx2tx_dlys_rev3, 16878 rfseq_rx2tx_dlys_rev3,
16888 sizeof(rfseq_rx2tx_events_rev3) / 16879 ARRAY_SIZE(rfseq_rx2tx_events_rev3));
16889 sizeof(rfseq_rx2tx_events_rev3[0]));
16890 } 16880 }
16891 16881
16892 if (CHSPEC_IS2G(pi->radio_chanspec)) 16882 if (CHSPEC_IS2G(pi->radio_chanspec))
@@ -17209,13 +17199,11 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
17209 17199
17210 wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX, rfseq_rx2tx_events, 17200 wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX, rfseq_rx2tx_events,
17211 rfseq_rx2tx_dlys, 17201 rfseq_rx2tx_dlys,
17212 sizeof(rfseq_rx2tx_events) / 17202 ARRAY_SIZE(rfseq_rx2tx_events));
17213 sizeof(rfseq_rx2tx_events[0]));
17214 17203
17215 wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_TX2RX, rfseq_tx2rx_events, 17204 wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_TX2RX, rfseq_tx2rx_events,
17216 rfseq_tx2rx_dlys, 17205 rfseq_tx2rx_dlys,
17217 sizeof(rfseq_tx2rx_events) / 17206 ARRAY_SIZE(rfseq_tx2rx_events));
17218 sizeof(rfseq_tx2rx_events[0]));
17219 17207
17220 wlc_phy_workarounds_nphy_gainctrl(pi); 17208 wlc_phy_workarounds_nphy_gainctrl(pi);
17221 17209
@@ -19357,8 +19345,7 @@ static void wlc_phy_spurwar_nphy(struct brcms_phy *pi)
19357 } 19345 }
19358 19346
19359 if (isAdjustNoiseVar) { 19347 if (isAdjustNoiseVar) {
19360 numTonesAdjust = sizeof(nphy_adj_tone_id_buf) / 19348 numTonesAdjust = ARRAY_SIZE(nphy_adj_tone_id_buf);
19361 sizeof(nphy_adj_tone_id_buf[0]);
19362 19349
19363 wlc_phy_adjust_min_noisevar_nphy( 19350 wlc_phy_adjust_min_noisevar_nphy(
19364 pi, 19351 pi,
@@ -25204,32 +25191,26 @@ static u8 wlc_phy_a3_nphy(struct brcms_phy *pi, u8 start_gain, u8 core)
25204 25191
25205 phy_a15 = pad_gain_codes_used_2057rev5; 25192 phy_a15 = pad_gain_codes_used_2057rev5;
25206 phy_a13 = 25193 phy_a13 =
25207 sizeof(pad_gain_codes_used_2057rev5) / 25194 ARRAY_SIZE(pad_gain_codes_used_2057rev5) - 1;
25208 sizeof(pad_gain_codes_used_2057rev5
25209 [0]) - 1;
25210 25195
25211 } else if ((pi->pubpi.radiorev == 7) 25196 } else if ((pi->pubpi.radiorev == 7)
25212 || (pi->pubpi.radiorev == 8)) { 25197 || (pi->pubpi.radiorev == 8)) {
25213 25198
25214 phy_a15 = pad_gain_codes_used_2057rev7; 25199 phy_a15 = pad_gain_codes_used_2057rev7;
25215 phy_a13 = 25200 phy_a13 =
25216 sizeof(pad_gain_codes_used_2057rev7) / 25201 ARRAY_SIZE(pad_gain_codes_used_2057rev7) - 1;
25217 sizeof(pad_gain_codes_used_2057rev7
25218 [0]) - 1;
25219 25202
25220 } else { 25203 } else {
25221 25204
25222 phy_a15 = pad_all_gain_codes_2057; 25205 phy_a15 = pad_all_gain_codes_2057;
25223 phy_a13 = sizeof(pad_all_gain_codes_2057) / 25206 phy_a13 = ARRAY_SIZE(pad_all_gain_codes_2057) -
25224 sizeof(pad_all_gain_codes_2057[0]) -
25225 1; 25207 1;
25226 } 25208 }
25227 25209
25228 } else { 25210 } else {
25229 25211
25230 phy_a15 = pga_all_gain_codes_2057; 25212 phy_a15 = pga_all_gain_codes_2057;
25231 phy_a13 = sizeof(pga_all_gain_codes_2057) / 25213 phy_a13 = ARRAY_SIZE(pga_all_gain_codes_2057) - 1;
25232 sizeof(pga_all_gain_codes_2057[0]) - 1;
25233 } 25214 }
25234 25215
25235 phy_a14 = 0; 25216 phy_a14 = 0;
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 5fb17d53c9b2..333193f20e1c 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -17,17 +17,7 @@
17#ifndef _BRCM_HW_IDS_H_ 17#ifndef _BRCM_HW_IDS_H_
18#define _BRCM_HW_IDS_H_ 18#define _BRCM_HW_IDS_H_
19 19
20#define BCM4325_D11DUAL_ID 0x431b 20#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */
21#define BCM4325_D11G_ID 0x431c
22#define BCM4325_D11A_ID 0x431d
23
24#define BCM4329_D11N2G_ID 0x432f /* 4329 802.11n 2.4G device */
25#define BCM4329_D11N5G_ID 0x4330 /* 4329 802.11n 5G device */
26#define BCM4329_D11NDUAL_ID 0x432e
27
28#define BCM4319_D11N_ID 0x4337 /* 4319 802.11n dualband device */
29#define BCM4319_D11N2G_ID 0x4338 /* 4319 802.11n 2.4G device */
30#define BCM4319_D11N5G_ID 0x4339 /* 4319 802.11n 5G device */
31 21
32#define BCM43224_D11N_ID 0x4353 /* 43224 802.11n dualband device */ 22#define BCM43224_D11N_ID 0x4353 /* 43224 802.11n dualband device */
33#define BCM43224_D11N_ID_VEN1 0x0576 /* Vendor specific 43224 802.11n db */ 23#define BCM43224_D11N_ID_VEN1 0x0576 /* Vendor specific 43224 802.11n db */
@@ -37,23 +27,15 @@
37#define BCM43236_D11N_ID 0x4346 /* 43236 802.11n dualband device */ 27#define BCM43236_D11N_ID 0x4346 /* 43236 802.11n dualband device */
38#define BCM43236_D11N2G_ID 0x4347 /* 43236 802.11n 2.4GHz device */ 28#define BCM43236_D11N2G_ID 0x4347 /* 43236 802.11n 2.4GHz device */
39 29
40#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */ 30/* Chipcommon Core Chip IDs */
41 31#define BCM4313_CHIP_ID 0x4313
42/* Chip IDs */ 32#define BCM43224_CHIP_ID 43224
43#define BCM4313_CHIP_ID 0x4313 /* 4313 chip id */ 33#define BCM43225_CHIP_ID 43225
44#define BCM4319_CHIP_ID 0x4319 /* 4319 chip id */ 34#define BCM43235_CHIP_ID 43235
45 35#define BCM43236_CHIP_ID 43236
46#define BCM43224_CHIP_ID 43224 /* 43224 chipcommon chipid */ 36#define BCM43238_CHIP_ID 43238
47#define BCM43225_CHIP_ID 43225 /* 43225 chipcommon chipid */ 37#define BCM4329_CHIP_ID 0x4329
48#define BCM43421_CHIP_ID 43421 /* 43421 chipcommon chipid */ 38#define BCM4330_CHIP_ID 0x4330
49#define BCM43235_CHIP_ID 43235 /* 43235 chipcommon chipid */ 39#define BCM4331_CHIP_ID 0x4331
50#define BCM43236_CHIP_ID 43236 /* 43236 chipcommon chipid */
51#define BCM43238_CHIP_ID 43238 /* 43238 chipcommon chipid */
52#define BCM4329_CHIP_ID 0x4329 /* 4329 chipcommon chipid */
53#define BCM4325_CHIP_ID 0x4325 /* 4325 chipcommon chipid */
54#define BCM4331_CHIP_ID 0x4331 /* 4331 chipcommon chipid */
55#define BCM4336_CHIP_ID 0x4336 /* 4336 chipcommon chipid */
56#define BCM4330_CHIP_ID 0x4330 /* 4330 chipcommon chipid */
57#define BCM6362_CHIP_ID 0x6362 /* 6362 chipcommon chipid */
58 40
59#endif /* _BRCM_HW_IDS_H_ */ 41#endif /* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index bfa0d54221e8..627bc12074c7 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -244,8 +244,7 @@ u16 hostap_tx_callback_register(local_info_t *local,
244 unsigned long flags; 244 unsigned long flags;
245 struct hostap_tx_callback_info *entry; 245 struct hostap_tx_callback_info *entry;
246 246
247 entry = kmalloc(sizeof(*entry), 247 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
248 GFP_ATOMIC);
249 if (entry == NULL) 248 if (entry == NULL)
250 return 0; 249 return 0;
251 250
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index 972a9c3af39e..05ca3402dca7 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -457,18 +457,4 @@ static struct pci_driver prism2_pci_driver = {
457#endif /* CONFIG_PM */ 457#endif /* CONFIG_PM */
458}; 458};
459 459
460 460module_pci_driver(prism2_pci_driver);
461static int __init init_prism2_pci(void)
462{
463 return pci_register_driver(&prism2_pci_driver);
464}
465
466
467static void __exit exit_prism2_pci(void)
468{
469 pci_unregister_driver(&prism2_pci_driver);
470}
471
472
473module_init(init_prism2_pci);
474module_exit(exit_prism2_pci);
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index 33e79037770b..c3d067ee4db9 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -616,18 +616,4 @@ static struct pci_driver prism2_plx_driver = {
616 .remove = prism2_plx_remove, 616 .remove = prism2_plx_remove,
617}; 617};
618 618
619 619module_pci_driver(prism2_plx_driver);
620static int __init init_prism2_plx(void)
621{
622 return pci_register_driver(&prism2_plx_driver);
623}
624
625
626static void __exit exit_prism2_plx(void)
627{
628 pci_unregister_driver(&prism2_plx_driver);
629}
630
631
632module_init(init_prism2_plx);
633module_exit(exit_prism2_plx);
diff --git a/drivers/net/wireless/ipw2x00/ipw.h b/drivers/net/wireless/ipw2x00/ipw.h
new file mode 100644
index 000000000000..4007bf5ed6f3
--- /dev/null
+++ b/drivers/net/wireless/ipw2x00/ipw.h
@@ -0,0 +1,23 @@
1/*
2 * Intel Pro/Wireless 2100, 2200BG, 2915ABG network connection driver
3 *
4 * Copyright 2012 Stanislav Yakovlev <stas.yakovlev@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __IPW_H__
12#define __IPW_H__
13
14#include <linux/ieee80211.h>
15
16static const u32 ipw_cipher_suites[] = {
17 WLAN_CIPHER_SUITE_WEP40,
18 WLAN_CIPHER_SUITE_WEP104,
19 WLAN_CIPHER_SUITE_TKIP,
20 WLAN_CIPHER_SUITE_CCMP,
21};
22
23#endif
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index f0551f807f69..9cfae0c08707 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -166,6 +166,7 @@ that only one external action is invoked at a time.
166#include <net/lib80211.h> 166#include <net/lib80211.h>
167 167
168#include "ipw2100.h" 168#include "ipw2100.h"
169#include "ipw.h"
169 170
170#define IPW2100_VERSION "git-1.2.2" 171#define IPW2100_VERSION "git-1.2.2"
171 172
@@ -343,38 +344,50 @@ static struct iw_handler_def ipw2100_wx_handler_def;
343 344
344static inline void read_register(struct net_device *dev, u32 reg, u32 * val) 345static inline void read_register(struct net_device *dev, u32 reg, u32 * val)
345{ 346{
346 *val = readl((void __iomem *)(dev->base_addr + reg)); 347 struct ipw2100_priv *priv = libipw_priv(dev);
348
349 *val = ioread32(priv->ioaddr + reg);
347 IPW_DEBUG_IO("r: 0x%08X => 0x%08X\n", reg, *val); 350 IPW_DEBUG_IO("r: 0x%08X => 0x%08X\n", reg, *val);
348} 351}
349 352
350static inline void write_register(struct net_device *dev, u32 reg, u32 val) 353static inline void write_register(struct net_device *dev, u32 reg, u32 val)
351{ 354{
352 writel(val, (void __iomem *)(dev->base_addr + reg)); 355 struct ipw2100_priv *priv = libipw_priv(dev);
356
357 iowrite32(val, priv->ioaddr + reg);
353 IPW_DEBUG_IO("w: 0x%08X <= 0x%08X\n", reg, val); 358 IPW_DEBUG_IO("w: 0x%08X <= 0x%08X\n", reg, val);
354} 359}
355 360
356static inline void read_register_word(struct net_device *dev, u32 reg, 361static inline void read_register_word(struct net_device *dev, u32 reg,
357 u16 * val) 362 u16 * val)
358{ 363{
359 *val = readw((void __iomem *)(dev->base_addr + reg)); 364 struct ipw2100_priv *priv = libipw_priv(dev);
365
366 *val = ioread16(priv->ioaddr + reg);
360 IPW_DEBUG_IO("r: 0x%08X => %04X\n", reg, *val); 367 IPW_DEBUG_IO("r: 0x%08X => %04X\n", reg, *val);
361} 368}
362 369
363static inline void read_register_byte(struct net_device *dev, u32 reg, u8 * val) 370static inline void read_register_byte(struct net_device *dev, u32 reg, u8 * val)
364{ 371{
365 *val = readb((void __iomem *)(dev->base_addr + reg)); 372 struct ipw2100_priv *priv = libipw_priv(dev);
373
374 *val = ioread8(priv->ioaddr + reg);
366 IPW_DEBUG_IO("r: 0x%08X => %02X\n", reg, *val); 375 IPW_DEBUG_IO("r: 0x%08X => %02X\n", reg, *val);
367} 376}
368 377
369static inline void write_register_word(struct net_device *dev, u32 reg, u16 val) 378static inline void write_register_word(struct net_device *dev, u32 reg, u16 val)
370{ 379{
371 writew(val, (void __iomem *)(dev->base_addr + reg)); 380 struct ipw2100_priv *priv = libipw_priv(dev);
381
382 iowrite16(val, priv->ioaddr + reg);
372 IPW_DEBUG_IO("w: 0x%08X <= %04X\n", reg, val); 383 IPW_DEBUG_IO("w: 0x%08X <= %04X\n", reg, val);
373} 384}
374 385
375static inline void write_register_byte(struct net_device *dev, u32 reg, u8 val) 386static inline void write_register_byte(struct net_device *dev, u32 reg, u8 val)
376{ 387{
377 writeb(val, (void __iomem *)(dev->base_addr + reg)); 388 struct ipw2100_priv *priv = libipw_priv(dev);
389
390 iowrite8(val, priv->ioaddr + reg);
378 IPW_DEBUG_IO("w: 0x%08X =< %02X\n", reg, val); 391 IPW_DEBUG_IO("w: 0x%08X =< %02X\n", reg, val);
379} 392}
380 393
@@ -506,13 +519,13 @@ static void read_nic_memory(struct net_device *dev, u32 addr, u32 len,
506 read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA + i, buf); 519 read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA + i, buf);
507} 520}
508 521
509static inline int ipw2100_hw_is_adapter_in_system(struct net_device *dev) 522static bool ipw2100_hw_is_adapter_in_system(struct net_device *dev)
510{ 523{
511 return (dev->base_addr && 524 u32 dbg;
512 (readl 525
513 ((void __iomem *)(dev->base_addr + 526 read_register(dev, IPW_REG_DOA_DEBUG_AREA_START, &dbg);
514 IPW_REG_DOA_DEBUG_AREA_START)) 527
515 == IPW_DATA_DOA_DEBUG_VALUE)); 528 return dbg == IPW_DATA_DOA_DEBUG_VALUE;
516} 529}
517 530
518static int ipw2100_get_ordinal(struct ipw2100_priv *priv, u32 ord, 531static int ipw2100_get_ordinal(struct ipw2100_priv *priv, u32 ord,
@@ -1946,11 +1959,12 @@ static int ipw2100_wdev_init(struct net_device *dev)
1946 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band; 1959 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
1947 } 1960 }
1948 1961
1962 wdev->wiphy->cipher_suites = ipw_cipher_suites;
1963 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
1964
1949 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); 1965 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
1950 if (wiphy_register(wdev->wiphy)) { 1966 if (wiphy_register(wdev->wiphy))
1951 ipw2100_down(priv);
1952 return -EIO; 1967 return -EIO;
1953 }
1954 return 0; 1968 return 0;
1955} 1969}
1956 1970
@@ -3773,7 +3787,7 @@ IPW2100_ORD(STAT_TX_HOST_REQUESTS, "requested Host Tx's (MSDU)"),
3773 IPW2100_ORD(COUNTRY_CODE, 3787 IPW2100_ORD(COUNTRY_CODE,
3774 "IEEE country code as recv'd from beacon"), 3788 "IEEE country code as recv'd from beacon"),
3775 IPW2100_ORD(COUNTRY_CHANNELS, 3789 IPW2100_ORD(COUNTRY_CHANNELS,
3776 "channels suported by country"), 3790 "channels supported by country"),
3777 IPW2100_ORD(RESET_CNT, "adapter resets (warm)"), 3791 IPW2100_ORD(RESET_CNT, "adapter resets (warm)"),
3778 IPW2100_ORD(BEACON_INTERVAL, "Beacon interval"), 3792 IPW2100_ORD(BEACON_INTERVAL, "Beacon interval"),
3779 IPW2100_ORD(ANTENNA_DIVERSITY, 3793 IPW2100_ORD(ANTENNA_DIVERSITY,
@@ -4062,7 +4076,7 @@ static int ipw2100_switch_mode(struct ipw2100_priv *priv, u32 mode)
4062 ipw2100_firmware.version = 0; 4076 ipw2100_firmware.version = 0;
4063#endif 4077#endif
4064 4078
4065 printk(KERN_INFO "%s: Reseting on mode change.\n", priv->net_dev->name); 4079 printk(KERN_INFO "%s: Resetting on mode change.\n", priv->net_dev->name);
4066 priv->reset_backoff = 0; 4080 priv->reset_backoff = 0;
4067 schedule_reset(priv); 4081 schedule_reset(priv);
4068 4082
@@ -6082,9 +6096,7 @@ static const struct net_device_ops ipw2100_netdev_ops = {
6082/* Look into using netdev destructor to shutdown libipw? */ 6096/* Look into using netdev destructor to shutdown libipw? */
6083 6097
6084static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, 6098static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6085 void __iomem * base_addr, 6099 void __iomem * ioaddr)
6086 unsigned long mem_start,
6087 unsigned long mem_len)
6088{ 6100{
6089 struct ipw2100_priv *priv; 6101 struct ipw2100_priv *priv;
6090 struct net_device *dev; 6102 struct net_device *dev;
@@ -6096,6 +6108,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6096 priv->ieee = netdev_priv(dev); 6108 priv->ieee = netdev_priv(dev);
6097 priv->pci_dev = pci_dev; 6109 priv->pci_dev = pci_dev;
6098 priv->net_dev = dev; 6110 priv->net_dev = dev;
6111 priv->ioaddr = ioaddr;
6099 6112
6100 priv->ieee->hard_start_xmit = ipw2100_tx; 6113 priv->ieee->hard_start_xmit = ipw2100_tx;
6101 priv->ieee->set_security = shim__set_security; 6114 priv->ieee->set_security = shim__set_security;
@@ -6111,10 +6124,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6111 dev->watchdog_timeo = 3 * HZ; 6124 dev->watchdog_timeo = 3 * HZ;
6112 dev->irq = 0; 6125 dev->irq = 0;
6113 6126
6114 dev->base_addr = (unsigned long)base_addr;
6115 dev->mem_start = mem_start;
6116 dev->mem_end = dev->mem_start + mem_len - 1;
6117
6118 /* NOTE: We don't use the wireless_handlers hook 6127 /* NOTE: We don't use the wireless_handlers hook
6119 * in dev as the system will start throwing WX requests 6128 * in dev as the system will start throwing WX requests
6120 * to us before we're actually initialized and it just 6129 * to us before we're actually initialized and it just
@@ -6215,8 +6224,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6215static int ipw2100_pci_init_one(struct pci_dev *pci_dev, 6224static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6216 const struct pci_device_id *ent) 6225 const struct pci_device_id *ent)
6217{ 6226{
6218 unsigned long mem_start, mem_len, mem_flags; 6227 void __iomem *ioaddr;
6219 void __iomem *base_addr = NULL;
6220 struct net_device *dev = NULL; 6228 struct net_device *dev = NULL;
6221 struct ipw2100_priv *priv = NULL; 6229 struct ipw2100_priv *priv = NULL;
6222 int err = 0; 6230 int err = 0;
@@ -6225,18 +6233,14 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6225 6233
6226 IPW_DEBUG_INFO("enter\n"); 6234 IPW_DEBUG_INFO("enter\n");
6227 6235
6228 mem_start = pci_resource_start(pci_dev, 0); 6236 if (!(pci_resource_flags(pci_dev, 0) & IORESOURCE_MEM)) {
6229 mem_len = pci_resource_len(pci_dev, 0);
6230 mem_flags = pci_resource_flags(pci_dev, 0);
6231
6232 if ((mem_flags & IORESOURCE_MEM) != IORESOURCE_MEM) {
6233 IPW_DEBUG_INFO("weird - resource type is not memory\n"); 6237 IPW_DEBUG_INFO("weird - resource type is not memory\n");
6234 err = -ENODEV; 6238 err = -ENODEV;
6235 goto fail; 6239 goto out;
6236 } 6240 }
6237 6241
6238 base_addr = ioremap_nocache(mem_start, mem_len); 6242 ioaddr = pci_iomap(pci_dev, 0, 0);
6239 if (!base_addr) { 6243 if (!ioaddr) {
6240 printk(KERN_WARNING DRV_NAME 6244 printk(KERN_WARNING DRV_NAME
6241 "Error calling ioremap_nocache.\n"); 6245 "Error calling ioremap_nocache.\n");
6242 err = -EIO; 6246 err = -EIO;
@@ -6244,7 +6248,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6244 } 6248 }
6245 6249
6246 /* allocate and initialize our net_device */ 6250 /* allocate and initialize our net_device */
6247 dev = ipw2100_alloc_device(pci_dev, base_addr, mem_start, mem_len); 6251 dev = ipw2100_alloc_device(pci_dev, ioaddr);
6248 if (!dev) { 6252 if (!dev) {
6249 printk(KERN_WARNING DRV_NAME 6253 printk(KERN_WARNING DRV_NAME
6250 "Error calling ipw2100_alloc_device.\n"); 6254 "Error calling ipw2100_alloc_device.\n");
@@ -6325,6 +6329,11 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6325 printk(KERN_INFO DRV_NAME 6329 printk(KERN_INFO DRV_NAME
6326 ": Detected Intel PRO/Wireless 2100 Network Connection\n"); 6330 ": Detected Intel PRO/Wireless 2100 Network Connection\n");
6327 6331
6332 err = ipw2100_wdev_init(dev);
6333 if (err)
6334 goto fail;
6335 registered = 1;
6336
6328 /* Bring up the interface. Pre 0.46, after we registered the 6337 /* Bring up the interface. Pre 0.46, after we registered the
6329 * network device we would call ipw2100_up. This introduced a race 6338 * network device we would call ipw2100_up. This introduced a race
6330 * condition with newer hotplug configurations (network was coming 6339 * condition with newer hotplug configurations (network was coming
@@ -6341,11 +6350,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6341 "Error calling register_netdev.\n"); 6350 "Error calling register_netdev.\n");
6342 goto fail; 6351 goto fail;
6343 } 6352 }
6344 registered = 1; 6353 registered = 2;
6345
6346 err = ipw2100_wdev_init(dev);
6347 if (err)
6348 goto fail;
6349 6354
6350 mutex_lock(&priv->action_mutex); 6355 mutex_lock(&priv->action_mutex);
6351 6356
@@ -6379,18 +6384,21 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6379 priv->status |= STATUS_INITIALIZED; 6384 priv->status |= STATUS_INITIALIZED;
6380 6385
6381 mutex_unlock(&priv->action_mutex); 6386 mutex_unlock(&priv->action_mutex);
6382 6387out:
6383 return 0; 6388 return err;
6384 6389
6385 fail_unlock: 6390 fail_unlock:
6386 mutex_unlock(&priv->action_mutex); 6391 mutex_unlock(&priv->action_mutex);
6387 wiphy_unregister(priv->ieee->wdev.wiphy);
6388 kfree(priv->ieee->bg_band.channels);
6389 fail: 6392 fail:
6390 if (dev) { 6393 if (dev) {
6391 if (registered) 6394 if (registered >= 2)
6392 unregister_netdev(dev); 6395 unregister_netdev(dev);
6393 6396
6397 if (registered) {
6398 wiphy_unregister(priv->ieee->wdev.wiphy);
6399 kfree(priv->ieee->bg_band.channels);
6400 }
6401
6394 ipw2100_hw_stop_adapter(priv); 6402 ipw2100_hw_stop_adapter(priv);
6395 6403
6396 ipw2100_disable_interrupts(priv); 6404 ipw2100_disable_interrupts(priv);
@@ -6409,63 +6417,56 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6409 pci_set_drvdata(pci_dev, NULL); 6417 pci_set_drvdata(pci_dev, NULL);
6410 } 6418 }
6411 6419
6412 if (base_addr) 6420 pci_iounmap(pci_dev, ioaddr);
6413 iounmap(base_addr);
6414 6421
6415 pci_release_regions(pci_dev); 6422 pci_release_regions(pci_dev);
6416 pci_disable_device(pci_dev); 6423 pci_disable_device(pci_dev);
6417 6424 goto out;
6418 return err;
6419} 6425}
6420 6426
6421static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev) 6427static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
6422{ 6428{
6423 struct ipw2100_priv *priv = pci_get_drvdata(pci_dev); 6429 struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
6424 struct net_device *dev; 6430 struct net_device *dev = priv->net_dev;
6425 6431
6426 if (priv) { 6432 mutex_lock(&priv->action_mutex);
6427 mutex_lock(&priv->action_mutex);
6428 6433
6429 priv->status &= ~STATUS_INITIALIZED; 6434 priv->status &= ~STATUS_INITIALIZED;
6430 6435
6431 dev = priv->net_dev; 6436 sysfs_remove_group(&pci_dev->dev.kobj, &ipw2100_attribute_group);
6432 sysfs_remove_group(&pci_dev->dev.kobj,
6433 &ipw2100_attribute_group);
6434 6437
6435#ifdef CONFIG_PM 6438#ifdef CONFIG_PM
6436 if (ipw2100_firmware.version) 6439 if (ipw2100_firmware.version)
6437 ipw2100_release_firmware(priv, &ipw2100_firmware); 6440 ipw2100_release_firmware(priv, &ipw2100_firmware);
6438#endif 6441#endif
6439 /* Take down the hardware */ 6442 /* Take down the hardware */
6440 ipw2100_down(priv); 6443 ipw2100_down(priv);
6441 6444
6442 /* Release the mutex so that the network subsystem can 6445 /* Release the mutex so that the network subsystem can
6443 * complete any needed calls into the driver... */ 6446 * complete any needed calls into the driver... */
6444 mutex_unlock(&priv->action_mutex); 6447 mutex_unlock(&priv->action_mutex);
6445 6448
6446 /* Unregister the device first - this results in close() 6449 /* Unregister the device first - this results in close()
6447 * being called if the device is open. If we free storage 6450 * being called if the device is open. If we free storage
6448 * first, then close() will crash. */ 6451 * first, then close() will crash.
6449 unregister_netdev(dev); 6452 * FIXME: remove the comment above. */
6453 unregister_netdev(dev);
6450 6454
6451 ipw2100_kill_works(priv); 6455 ipw2100_kill_works(priv);
6452 6456
6453 ipw2100_queues_free(priv); 6457 ipw2100_queues_free(priv);
6454 6458
6455 /* Free potential debugging firmware snapshot */ 6459 /* Free potential debugging firmware snapshot */
6456 ipw2100_snapshot_free(priv); 6460 ipw2100_snapshot_free(priv);
6457 6461
6458 if (dev->irq) 6462 free_irq(dev->irq, priv);
6459 free_irq(dev->irq, priv);
6460 6463
6461 if (dev->base_addr) 6464 pci_iounmap(pci_dev, priv->ioaddr);
6462 iounmap((void __iomem *)dev->base_addr);
6463 6465
6464 /* wiphy_unregister needs to be here, before free_libipw */ 6466 /* wiphy_unregister needs to be here, before free_libipw */
6465 wiphy_unregister(priv->ieee->wdev.wiphy); 6467 wiphy_unregister(priv->ieee->wdev.wiphy);
6466 kfree(priv->ieee->bg_band.channels); 6468 kfree(priv->ieee->bg_band.channels);
6467 free_libipw(dev, 0); 6469 free_libipw(dev, 0);
6468 }
6469 6470
6470 pci_release_regions(pci_dev); 6471 pci_release_regions(pci_dev);
6471 pci_disable_device(pci_dev); 6472 pci_disable_device(pci_dev);
@@ -8508,8 +8509,7 @@ static void ipw2100_release_firmware(struct ipw2100_priv *priv,
8508 struct ipw2100_fw *fw) 8509 struct ipw2100_fw *fw)
8509{ 8510{
8510 fw->version = 0; 8511 fw->version = 0;
8511 if (fw->fw_entry) 8512 release_firmware(fw->fw_entry);
8512 release_firmware(fw->fw_entry);
8513 fw->fw_entry = NULL; 8513 fw->fw_entry = NULL;
8514} 8514}
8515 8515
@@ -8609,7 +8609,7 @@ static int ipw2100_ucode_download(struct ipw2100_priv *priv,
8609 struct net_device *dev = priv->net_dev; 8609 struct net_device *dev = priv->net_dev;
8610 const unsigned char *microcode_data = fw->uc.data; 8610 const unsigned char *microcode_data = fw->uc.data;
8611 unsigned int microcode_data_left = fw->uc.size; 8611 unsigned int microcode_data_left = fw->uc.size;
8612 void __iomem *reg = (void __iomem *)dev->base_addr; 8612 void __iomem *reg = priv->ioaddr;
8613 8613
8614 struct symbol_alive_response response; 8614 struct symbol_alive_response response;
8615 int i, j; 8615 int i, j;
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index 99cba968aa58..973125242490 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -135,15 +135,6 @@ enum {
135 IPW_HW_STATE_ENABLED = 0 135 IPW_HW_STATE_ENABLED = 0
136}; 136};
137 137
138struct ssid_context {
139 char ssid[IW_ESSID_MAX_SIZE + 1];
140 int ssid_len;
141 unsigned char bssid[ETH_ALEN];
142 int port_type;
143 int channel;
144
145};
146
147extern const char *port_type_str[]; 138extern const char *port_type_str[];
148extern const char *band_str[]; 139extern const char *band_str[];
149 140
@@ -488,6 +479,7 @@ enum {
488#define CAP_PRIVACY_ON (1<<1) /* Off = No privacy */ 479#define CAP_PRIVACY_ON (1<<1) /* Off = No privacy */
489 480
490struct ipw2100_priv { 481struct ipw2100_priv {
482 void __iomem *ioaddr;
491 483
492 int stop_hang_check; /* Set 1 when shutting down to kill hang_check */ 484 int stop_hang_check; /* Set 1 when shutting down to kill hang_check */
493 int stop_rf_kill; /* Set 1 when shutting down to kill rf_kill */ 485 int stop_rf_kill; /* Set 1 when shutting down to kill rf_kill */
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 2b022571a859..0036737fe8e3 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -34,6 +34,7 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <net/cfg80211-wext.h> 35#include <net/cfg80211-wext.h>
36#include "ipw2200.h" 36#include "ipw2200.h"
37#include "ipw.h"
37 38
38 39
39#ifndef KBUILD_EXTMOD 40#ifndef KBUILD_EXTMOD
@@ -2191,6 +2192,7 @@ static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2191{ 2192{
2192 int rc = 0; 2193 int rc = 0;
2193 unsigned long flags; 2194 unsigned long flags;
2195 unsigned long now, end;
2194 2196
2195 spin_lock_irqsave(&priv->lock, flags); 2197 spin_lock_irqsave(&priv->lock, flags);
2196 if (priv->status & STATUS_HCMD_ACTIVE) { 2198 if (priv->status & STATUS_HCMD_ACTIVE) {
@@ -2232,10 +2234,20 @@ static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2232 } 2234 }
2233 spin_unlock_irqrestore(&priv->lock, flags); 2235 spin_unlock_irqrestore(&priv->lock, flags);
2234 2236
2237 now = jiffies;
2238 end = now + HOST_COMPLETE_TIMEOUT;
2239again:
2235 rc = wait_event_interruptible_timeout(priv->wait_command_queue, 2240 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2236 !(priv-> 2241 !(priv->
2237 status & STATUS_HCMD_ACTIVE), 2242 status & STATUS_HCMD_ACTIVE),
2238 HOST_COMPLETE_TIMEOUT); 2243 end - now);
2244 if (rc < 0) {
2245 now = jiffies;
2246 if (time_before(now, end))
2247 goto again;
2248 rc = 0;
2249 }
2250
2239 if (rc == 0) { 2251 if (rc == 0) {
2240 spin_lock_irqsave(&priv->lock, flags); 2252 spin_lock_irqsave(&priv->lock, flags);
2241 if (priv->status & STATUS_HCMD_ACTIVE) { 2253 if (priv->status & STATUS_HCMD_ACTIVE) {
@@ -3657,8 +3669,7 @@ static int ipw_load(struct ipw_priv *priv)
3657 priv->rxq = NULL; 3669 priv->rxq = NULL;
3658 } 3670 }
3659 ipw_tx_queue_free(priv); 3671 ipw_tx_queue_free(priv);
3660 if (raw) 3672 release_firmware(raw);
3661 release_firmware(raw);
3662#ifdef CONFIG_PM 3673#ifdef CONFIG_PM
3663 fw_loaded = 0; 3674 fw_loaded = 0;
3664 raw = NULL; 3675 raw = NULL;
@@ -7024,7 +7035,7 @@ static int ipw_qos_activate(struct ipw_priv *priv,
7024 cpu_to_le16(burst_duration); 7035 cpu_to_le16(burst_duration);
7025 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 7036 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7026 if (type == IEEE_B) { 7037 if (type == IEEE_B) {
7027 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n", 7038 IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n",
7028 type); 7039 type);
7029 if (priv->qos_data.qos_enable == 0) 7040 if (priv->qos_data.qos_enable == 0)
7030 active_one = &def_parameters_CCK; 7041 active_one = &def_parameters_CCK;
@@ -11432,20 +11443,6 @@ static void ipw_bg_down(struct work_struct *work)
11432 mutex_unlock(&priv->mutex); 11443 mutex_unlock(&priv->mutex);
11433} 11444}
11434 11445
11435/* Called by register_netdev() */
11436static int ipw_net_init(struct net_device *dev)
11437{
11438 int rc = 0;
11439 struct ipw_priv *priv = libipw_priv(dev);
11440
11441 mutex_lock(&priv->mutex);
11442 if (ipw_up(priv))
11443 rc = -EIO;
11444 mutex_unlock(&priv->mutex);
11445
11446 return rc;
11447}
11448
11449static int ipw_wdev_init(struct net_device *dev) 11446static int ipw_wdev_init(struct net_device *dev)
11450{ 11447{
11451 int i, rc = 0; 11448 int i, rc = 0;
@@ -11533,6 +11530,9 @@ static int ipw_wdev_init(struct net_device *dev)
11533 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band; 11530 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
11534 } 11531 }
11535 11532
11533 wdev->wiphy->cipher_suites = ipw_cipher_suites;
11534 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
11535
11536 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); 11536 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11537 11537
11538 /* With that information in place, we can now register the wiphy... */ 11538 /* With that information in place, we can now register the wiphy... */
@@ -11711,7 +11711,6 @@ static void ipw_prom_free(struct ipw_priv *priv)
11711#endif 11711#endif
11712 11712
11713static const struct net_device_ops ipw_netdev_ops = { 11713static const struct net_device_ops ipw_netdev_ops = {
11714 .ndo_init = ipw_net_init,
11715 .ndo_open = ipw_net_open, 11714 .ndo_open = ipw_net_open,
11716 .ndo_stop = ipw_net_stop, 11715 .ndo_stop = ipw_net_stop,
11717 .ndo_set_rx_mode = ipw_net_set_multicast_list, 11716 .ndo_set_rx_mode = ipw_net_set_multicast_list,
@@ -11826,10 +11825,6 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11826 net_dev->wireless_data = &priv->wireless_data; 11825 net_dev->wireless_data = &priv->wireless_data;
11827 net_dev->wireless_handlers = &ipw_wx_handler_def; 11826 net_dev->wireless_handlers = &ipw_wx_handler_def;
11828 net_dev->ethtool_ops = &ipw_ethtool_ops; 11827 net_dev->ethtool_ops = &ipw_ethtool_ops;
11829 net_dev->irq = pdev->irq;
11830 net_dev->base_addr = (unsigned long)priv->hw_base;
11831 net_dev->mem_start = pci_resource_start(pdev, 0);
11832 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11833 11828
11834 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group); 11829 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11835 if (err) { 11830 if (err) {
@@ -11838,17 +11833,24 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11838 goto out_release_irq; 11833 goto out_release_irq;
11839 } 11834 }
11840 11835
11841 mutex_unlock(&priv->mutex); 11836 if (ipw_up(priv)) {
11842 err = register_netdev(net_dev); 11837 mutex_unlock(&priv->mutex);
11843 if (err) { 11838 err = -EIO;
11844 IPW_ERROR("failed to register network device\n");
11845 goto out_remove_sysfs; 11839 goto out_remove_sysfs;
11846 } 11840 }
11847 11841
11842 mutex_unlock(&priv->mutex);
11843
11848 err = ipw_wdev_init(net_dev); 11844 err = ipw_wdev_init(net_dev);
11849 if (err) { 11845 if (err) {
11850 IPW_ERROR("failed to register wireless device\n"); 11846 IPW_ERROR("failed to register wireless device\n");
11851 goto out_unregister_netdev; 11847 goto out_remove_sysfs;
11848 }
11849
11850 err = register_netdev(net_dev);
11851 if (err) {
11852 IPW_ERROR("failed to register network device\n");
11853 goto out_unregister_wiphy;
11852 } 11854 }
11853 11855
11854#ifdef CONFIG_IPW2200_PROMISCUOUS 11856#ifdef CONFIG_IPW2200_PROMISCUOUS
@@ -11857,10 +11859,8 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11857 if (err) { 11859 if (err) {
11858 IPW_ERROR("Failed to register promiscuous network " 11860 IPW_ERROR("Failed to register promiscuous network "
11859 "device (error %d).\n", err); 11861 "device (error %d).\n", err);
11860 wiphy_unregister(priv->ieee->wdev.wiphy); 11862 unregister_netdev(priv->net_dev);
11861 kfree(priv->ieee->a_band.channels); 11863 goto out_unregister_wiphy;
11862 kfree(priv->ieee->bg_band.channels);
11863 goto out_unregister_netdev;
11864 } 11864 }
11865 } 11865 }
11866#endif 11866#endif
@@ -11872,8 +11872,10 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11872 11872
11873 return 0; 11873 return 0;
11874 11874
11875 out_unregister_netdev: 11875 out_unregister_wiphy:
11876 unregister_netdev(priv->net_dev); 11876 wiphy_unregister(priv->ieee->wdev.wiphy);
11877 kfree(priv->ieee->a_band.channels);
11878 kfree(priv->ieee->bg_band.channels);
11877 out_remove_sysfs: 11879 out_remove_sysfs:
11878 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); 11880 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11879 out_release_irq: 11881 out_release_irq:
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 8874588fb929..0b22fb421735 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -584,61 +584,6 @@ struct libipw_tim_parameters {
584 584
585/*******************************************************/ 585/*******************************************************/
586 586
587enum { /* libipw_basic_report.map */
588 LIBIPW_BASIC_MAP_BSS = (1 << 0),
589 LIBIPW_BASIC_MAP_OFDM = (1 << 1),
590 LIBIPW_BASIC_MAP_UNIDENTIFIED = (1 << 2),
591 LIBIPW_BASIC_MAP_RADAR = (1 << 3),
592 LIBIPW_BASIC_MAP_UNMEASURED = (1 << 4),
593 /* Bits 5-7 are reserved */
594
595};
596struct libipw_basic_report {
597 u8 channel;
598 __le64 start_time;
599 __le16 duration;
600 u8 map;
601} __packed;
602
603enum { /* libipw_measurement_request.mode */
604 /* Bit 0 is reserved */
605 LIBIPW_MEASUREMENT_ENABLE = (1 << 1),
606 LIBIPW_MEASUREMENT_REQUEST = (1 << 2),
607 LIBIPW_MEASUREMENT_REPORT = (1 << 3),
608 /* Bits 4-7 are reserved */
609};
610
611enum {
612 LIBIPW_REPORT_BASIC = 0, /* required */
613 LIBIPW_REPORT_CCA = 1, /* optional */
614 LIBIPW_REPORT_RPI = 2, /* optional */
615 /* 3-255 reserved */
616};
617
618struct libipw_measurement_params {
619 u8 channel;
620 __le64 start_time;
621 __le16 duration;
622} __packed;
623
624struct libipw_measurement_request {
625 struct libipw_info_element ie;
626 u8 token;
627 u8 mode;
628 u8 type;
629 struct libipw_measurement_params params[0];
630} __packed;
631
632struct libipw_measurement_report {
633 struct libipw_info_element ie;
634 u8 token;
635 u8 mode;
636 u8 type;
637 union {
638 struct libipw_basic_report basic[0];
639 } u;
640} __packed;
641
642struct libipw_tpc_report { 587struct libipw_tpc_report {
643 u8 transmit_power; 588 u8 transmit_power;
644 u8 link_margin; 589 u8 link_margin;
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index c4955d25a19a..02e057923236 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -77,8 +77,8 @@ static struct libipw_frag_entry *libipw_frag_cache_find(struct
77 77
78 if (entry->skb != NULL && entry->seq == seq && 78 if (entry->skb != NULL && entry->seq == seq &&
79 (entry->last_frag + 1 == frag || frag == -1) && 79 (entry->last_frag + 1 == frag || frag == -1) &&
80 !compare_ether_addr(entry->src_addr, src) && 80 ether_addr_equal(entry->src_addr, src) &&
81 !compare_ether_addr(entry->dst_addr, dst)) 81 ether_addr_equal(entry->dst_addr, dst))
82 return entry; 82 return entry;
83 } 83 }
84 84
@@ -245,12 +245,12 @@ static int libipw_is_eapol_frame(struct libipw_device *ieee,
245 /* check that the frame is unicast frame to us */ 245 /* check that the frame is unicast frame to us */
246 if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == 246 if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
247 IEEE80211_FCTL_TODS && 247 IEEE80211_FCTL_TODS &&
248 !compare_ether_addr(hdr->addr1, dev->dev_addr) && 248 ether_addr_equal(hdr->addr1, dev->dev_addr) &&
249 !compare_ether_addr(hdr->addr3, dev->dev_addr)) { 249 ether_addr_equal(hdr->addr3, dev->dev_addr)) {
250 /* ToDS frame with own addr BSSID and DA */ 250 /* ToDS frame with own addr BSSID and DA */
251 } else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == 251 } else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
252 IEEE80211_FCTL_FROMDS && 252 IEEE80211_FCTL_FROMDS &&
253 !compare_ether_addr(hdr->addr1, dev->dev_addr)) { 253 ether_addr_equal(hdr->addr1, dev->dev_addr)) {
254 /* FromDS frame with own addr as DA */ 254 /* FromDS frame with own addr as DA */
255 } else 255 } else
256 return 0; 256 return 0;
@@ -523,8 +523,8 @@ int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
523 523
524 if (ieee->iw_mode == IW_MODE_MASTER && !wds && 524 if (ieee->iw_mode == IW_MODE_MASTER && !wds &&
525 (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == 525 (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
526 IEEE80211_FCTL_FROMDS && ieee->stadev 526 IEEE80211_FCTL_FROMDS && ieee->stadev &&
527 && !compare_ether_addr(hdr->addr2, ieee->assoc_ap_addr)) { 527 ether_addr_equal(hdr->addr2, ieee->assoc_ap_addr)) {
528 /* Frame from BSSID of the AP for which we are a client */ 528 /* Frame from BSSID of the AP for which we are a client */
529 skb->dev = dev = ieee->stadev; 529 skb->dev = dev = ieee->stadev;
530 stats = hostap_get_stats(dev); 530 stats = hostap_get_stats(dev);
@@ -1468,7 +1468,7 @@ static inline int is_same_network(struct libipw_network *src,
1468 * as one network */ 1468 * as one network */
1469 return ((src->ssid_len == dst->ssid_len) && 1469 return ((src->ssid_len == dst->ssid_len) &&
1470 (src->channel == dst->channel) && 1470 (src->channel == dst->channel) &&
1471 !compare_ether_addr(src->bssid, dst->bssid) && 1471 ether_addr_equal(src->bssid, dst->bssid) &&
1472 !memcmp(src->ssid, dst->ssid, src->ssid_len)); 1472 !memcmp(src->ssid, dst->ssid, src->ssid_len));
1473} 1473}
1474 1474
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index b25c01be0d90..87e539894330 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -453,10 +453,10 @@ il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
453 switch (il->iw_mode) { 453 switch (il->iw_mode) {
454 case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */ 454 case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
455 /* packets to our IBSS update information */ 455 /* packets to our IBSS update information */
456 return !compare_ether_addr(header->addr3, il->bssid); 456 return ether_addr_equal(header->addr3, il->bssid);
457 case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */ 457 case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
458 /* packets to our IBSS update information */ 458 /* packets to our IBSS update information */
459 return !compare_ether_addr(header->addr2, il->bssid); 459 return ether_addr_equal(header->addr2, il->bssid);
460 default: 460 default:
461 return 1; 461 return 1;
462 } 462 }
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index c46275a92565..509301a5e7e2 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -2565,7 +2565,7 @@ il4965_find_station(struct il_priv *il, const u8 *addr)
2565 spin_lock_irqsave(&il->sta_lock, flags); 2565 spin_lock_irqsave(&il->sta_lock, flags);
2566 for (i = start; i < il->hw_params.max_stations; i++) 2566 for (i = start; i < il->hw_params.max_stations; i++)
2567 if (il->stations[i].used && 2567 if (il->stations[i].used &&
2568 (!compare_ether_addr(il->stations[i].sta.sta.addr, addr))) { 2568 ether_addr_equal(il->stations[i].sta.sta.addr, addr)) {
2569 ret = i; 2569 ret = i;
2570 goto out; 2570 goto out;
2571 } 2571 }
@@ -2850,9 +2850,9 @@ void
2850il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags, 2850il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
2851 struct ieee80211_tx_info *info) 2851 struct ieee80211_tx_info *info)
2852{ 2852{
2853 struct ieee80211_tx_rate *r = &info->control.rates[0]; 2853 struct ieee80211_tx_rate *r = &info->status.rates[0];
2854 2854
2855 info->antenna_sel_tx = 2855 info->status.antenna =
2856 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); 2856 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
2857 if (rate_n_flags & RATE_MCS_HT_MSK) 2857 if (rate_n_flags & RATE_MCS_HT_MSK)
2858 r->flags |= IEEE80211_TX_RC_MCS; 2858 r->flags |= IEEE80211_TX_RC_MCS;
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index 11ab1247fae1..f3b8e91aa3dc 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -873,7 +873,7 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
873 tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) || 873 tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) ||
874 tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) || 874 tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ||
875 tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) || 875 tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) ||
876 tbl_type.ant_type != info->antenna_sel_tx || 876 tbl_type.ant_type != info->status.antenna ||
877 !!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS) 877 !!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)
878 || !!(tx_rate & RATE_MCS_GF_MSK) != 878 || !!(tx_rate & RATE_MCS_GF_MSK) !=
879 !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) { 879 !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) {
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index eaf249452e51..cbf2dc18341f 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -1896,8 +1896,8 @@ il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap,
1896 sta_id = il->hw_params.bcast_id; 1896 sta_id = il->hw_params.bcast_id;
1897 else 1897 else
1898 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) { 1898 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
1899 if (!compare_ether_addr 1899 if (ether_addr_equal(il->stations[i].sta.sta.addr,
1900 (il->stations[i].sta.sta.addr, addr)) { 1900 addr)) {
1901 sta_id = i; 1901 sta_id = i;
1902 break; 1902 break;
1903 } 1903 }
@@ -1926,7 +1926,7 @@ il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap,
1926 1926
1927 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && 1927 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
1928 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) && 1928 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
1929 !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) { 1929 ether_addr_equal(il->stations[sta_id].sta.sta.addr, addr)) {
1930 D_ASSOC("STA %d (%pM) already added, not adding again.\n", 1930 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
1931 sta_id, addr); 1931 sta_id, addr);
1932 return sta_id; 1932 return sta_id;
@@ -3744,10 +3744,10 @@ il_full_rxon_required(struct il_priv *il)
3744 3744
3745 /* These items are only settable from the full RXON command */ 3745 /* These items are only settable from the full RXON command */
3746 CHK(!il_is_associated(il)); 3746 CHK(!il_is_associated(il));
3747 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr)); 3747 CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr));
3748 CHK(compare_ether_addr(staging->node_addr, active->node_addr)); 3748 CHK(!ether_addr_equal(staging->node_addr, active->node_addr));
3749 CHK(compare_ether_addr 3749 CHK(!ether_addr_equal(staging->wlap_bssid_addr,
3750 (staging->wlap_bssid_addr, active->wlap_bssid_addr)); 3750 active->wlap_bssid_addr));
3751 CHK_NEQ(staging->dev_type, active->dev_type); 3751 CHK_NEQ(staging->dev_type, active->dev_type);
3752 CHK_NEQ(staging->channel, active->channel); 3752 CHK_NEQ(staging->channel, active->channel);
3753 CHK_NEQ(staging->air_propagation, active->air_propagation); 3753 CHK_NEQ(staging->air_propagation, active->air_propagation);
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 2fe62730dddd..db6c6e528022 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -113,20 +113,21 @@ config IWLWIFI_DEVICE_TESTMODE
113 generic netlink message via NL80211_TESTMODE channel. 113 generic netlink message via NL80211_TESTMODE channel.
114 114
115config IWLWIFI_P2P 115config IWLWIFI_P2P
116 bool "iwlwifi experimental P2P support" 116 def_bool y
117 depends on IWLWIFI 117 bool "iwlwifi experimental P2P support"
118 help 118 depends on IWLWIFI
119 This option enables experimental P2P support for some devices 119 help
120 based on microcode support. Since P2P support is still under 120 This option enables experimental P2P support for some devices
121 development, this option may even enable it for some devices 121 based on microcode support. Since P2P support is still under
122 now that turn out to not support it in the future due to 122 development, this option may even enable it for some devices
123 microcode restrictions. 123 now that turn out to not support it in the future due to
124 microcode restrictions.
124 125
125 To determine if your microcode supports the experimental P2P 126 To determine if your microcode supports the experimental P2P
126 offered by this option, check if the driver advertises AP 127 offered by this option, check if the driver advertises AP
127 support when it is loaded. 128 support when it is loaded.
128 129
129 Say Y only if you want to experiment with P2P. 130 Say Y only if you want to experiment with P2P.
130 131
131config IWLWIFI_EXPERIMENTAL_MFP 132config IWLWIFI_EXPERIMENTAL_MFP
132 bool "support MFP (802.11w) even if uCode doesn't advertise" 133 bool "support MFP (802.11w) even if uCode doesn't advertise"
@@ -136,3 +137,11 @@ config IWLWIFI_EXPERIMENTAL_MFP
136 even if the microcode doesn't advertise it. 137 even if the microcode doesn't advertise it.
137 138
138 Say Y only if you want to experiment with MFP. 139 Say Y only if you want to experiment with MFP.
140
141config IWLWIFI_UCODE16
142 bool "support uCode 16.0"
143 depends on IWLWIFI
144 help
145 This option enables support for uCode version 16.0.
146
147 Say Y if you want to use 16.0 microcode.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 85d163ed3db1..406f297a9a56 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -5,9 +5,9 @@ iwlwifi-objs += iwl-ucode.o iwl-agn-tx.o iwl-debug.o
5iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o 5iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
6iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o 6iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o
7 7
8iwlwifi-objs += iwl-core.o iwl-eeprom.o iwl-power.o 8iwlwifi-objs += iwl-eeprom.o iwl-power.o
9iwlwifi-objs += iwl-scan.o iwl-led.o 9iwlwifi-objs += iwl-scan.o iwl-led.o
10iwlwifi-objs += iwl-agn-rxon.o 10iwlwifi-objs += iwl-agn-rxon.o iwl-agn-devices.o
11iwlwifi-objs += iwl-5000.o 11iwlwifi-objs += iwl-5000.o
12iwlwifi-objs += iwl-6000.o 12iwlwifi-objs += iwl-6000.o
13iwlwifi-objs += iwl-1000.o 13iwlwifi-objs += iwl-1000.o
@@ -17,6 +17,8 @@ iwlwifi-objs += iwl-drv.o
17iwlwifi-objs += iwl-notif-wait.o 17iwlwifi-objs += iwl-notif-wait.o
18iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o 18iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
19 19
20
21iwlwifi-$(CONFIG_IWLWIFI_UCODE16) += iwl-phy-db.o
20iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o 22iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
21iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o 23iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
22iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o 24iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 5b0d888f746b..2629a6602dfa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -24,30 +24,16 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26 26
27#include <linux/kernel.h>
28#include <linux/module.h> 27#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/skbuff.h>
32#include <linux/netdevice.h>
33#include <net/mac80211.h>
34#include <linux/etherdevice.h>
35#include <asm/unaligned.h>
36#include <linux/stringify.h> 28#include <linux/stringify.h>
37 29#include "iwl-config.h"
38#include "iwl-eeprom.h"
39#include "iwl-dev.h"
40#include "iwl-core.h"
41#include "iwl-io.h"
42#include "iwl-agn.h"
43#include "iwl-agn-hw.h"
44#include "iwl-shared.h"
45#include "iwl-cfg.h" 30#include "iwl-cfg.h"
46#include "iwl-prph.h" 31#include "iwl-csr.h"
32#include "iwl-agn-hw.h"
47 33
48/* Highest firmware API version supported */ 34/* Highest firmware API version supported */
49#define IWL1000_UCODE_API_MAX 6 35#define IWL1000_UCODE_API_MAX 5
50#define IWL100_UCODE_API_MAX 6 36#define IWL100_UCODE_API_MAX 5
51 37
52/* Oldest version we won't warn about */ 38/* Oldest version we won't warn about */
53#define IWL1000_UCODE_API_OK 5 39#define IWL1000_UCODE_API_OK 5
@@ -57,6 +43,10 @@
57#define IWL1000_UCODE_API_MIN 1 43#define IWL1000_UCODE_API_MIN 1
58#define IWL100_UCODE_API_MIN 5 44#define IWL100_UCODE_API_MIN 5
59 45
46/* EEPROM version */
47#define EEPROM_1000_TX_POWER_VERSION (4)
48#define EEPROM_1000_EEPROM_VERSION (0x15C)
49
60#define IWL1000_FW_PRE "iwlwifi-1000-" 50#define IWL1000_FW_PRE "iwlwifi-1000-"
61#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode" 51#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode"
62 52
@@ -64,100 +54,8 @@
64#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode" 54#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode"
65 55
66 56
67/*
68 * For 1000, use advance thermal throttling critical temperature threshold,
69 * but legacy thermal management implementation for now.
70 * This is for the reason of 1000 uCode using advance thermal throttling API
71 * but not implement ct_kill_exit based on ct_kill exit temperature
72 * so the thermal throttling will still based on legacy thermal throttling
73 * management.
74 * The code here need to be modified once 1000 uCode has the advanced thermal
75 * throttling algorithm in place
76 */
77static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
78{
79 /* want Celsius */
80 hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
81 hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
82}
83
84/* NIC configuration for 1000 series */
85static void iwl1000_nic_config(struct iwl_priv *priv)
86{
87 /* set CSR_HW_CONFIG_REG for uCode use */
88 iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG,
89 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
90 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
91
92 /* Setting digital SVR for 1000 card to 1.32V */
93 /* locking is acquired in iwl_set_bits_mask_prph() function */
94 iwl_set_bits_mask_prph(trans(priv), APMG_DIGITAL_SVR_REG,
95 APMG_SVR_DIGITAL_VOLTAGE_1_32,
96 ~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
97}
98
99static const struct iwl_sensitivity_ranges iwl1000_sensitivity = {
100 .min_nrg_cck = 95,
101 .auto_corr_min_ofdm = 90,
102 .auto_corr_min_ofdm_mrc = 170,
103 .auto_corr_min_ofdm_x1 = 120,
104 .auto_corr_min_ofdm_mrc_x1 = 240,
105
106 .auto_corr_max_ofdm = 120,
107 .auto_corr_max_ofdm_mrc = 210,
108 .auto_corr_max_ofdm_x1 = 155,
109 .auto_corr_max_ofdm_mrc_x1 = 290,
110
111 .auto_corr_min_cck = 125,
112 .auto_corr_max_cck = 200,
113 .auto_corr_min_cck_mrc = 170,
114 .auto_corr_max_cck_mrc = 400,
115 .nrg_th_cck = 95,
116 .nrg_th_ofdm = 95,
117
118 .barker_corr_th_min = 190,
119 .barker_corr_th_min_mrc = 390,
120 .nrg_th_cca = 62,
121};
122
123static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
124{
125 hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
126
127 hw_params(priv).tx_chains_num =
128 num_of_ant(hw_params(priv).valid_tx_ant);
129 if (cfg(priv)->rx_with_siso_diversity)
130 hw_params(priv).rx_chains_num = 1;
131 else
132 hw_params(priv).rx_chains_num =
133 num_of_ant(hw_params(priv).valid_rx_ant);
134
135 iwl1000_set_ct_threshold(priv);
136
137 /* Set initial sensitivity parameters */
138 hw_params(priv).sens = &iwl1000_sensitivity;
139}
140
141static struct iwl_lib_ops iwl1000_lib = {
142 .set_hw_params = iwl1000_hw_set_hw_params,
143 .nic_config = iwl1000_nic_config,
144 .eeprom_ops = {
145 .regulatory_bands = {
146 EEPROM_REG_BAND_1_CHANNELS,
147 EEPROM_REG_BAND_2_CHANNELS,
148 EEPROM_REG_BAND_3_CHANNELS,
149 EEPROM_REG_BAND_4_CHANNELS,
150 EEPROM_REG_BAND_5_CHANNELS,
151 EEPROM_REG_BAND_24_HT40_CHANNELS,
152 EEPROM_REGULATORY_BAND_NO_HT40,
153 },
154 },
155 .temperature = iwlagn_temperature,
156};
157
158static const struct iwl_base_params iwl1000_base_params = { 57static const struct iwl_base_params iwl1000_base_params = {
159 .num_of_queues = IWLAGN_NUM_QUEUES, 58 .num_of_queues = IWLAGN_NUM_QUEUES,
160 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
161 .eeprom_size = OTP_LOW_IMAGE_SIZE, 59 .eeprom_size = OTP_LOW_IMAGE_SIZE,
162 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 60 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
163 .max_ll_items = OTP_MAX_LL_ITEMS_1000, 61 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
@@ -166,15 +64,13 @@ static const struct iwl_base_params iwl1000_base_params = {
166 .support_ct_kill_exit = true, 64 .support_ct_kill_exit = true,
167 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, 65 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
168 .chain_noise_scale = 1000, 66 .chain_noise_scale = 1000,
169 .wd_timeout = IWL_DEF_WD_TIMEOUT, 67 .wd_timeout = IWL_WATCHHDOG_DISABLED,
170 .max_event_log_size = 128, 68 .max_event_log_size = 128,
171 .wd_disable = true,
172}; 69};
173 70
174static const struct iwl_ht_params iwl1000_ht_params = { 71static const struct iwl_ht_params iwl1000_ht_params = {
175 .ht_greenfield_support = true, 72 .ht_greenfield_support = true,
176 .use_rts_for_aggregation = true, /* use rts/cts protection */ 73 .use_rts_for_aggregation = true, /* use rts/cts protection */
177 .smps_mode = IEEE80211_SMPS_DYNAMIC,
178}; 74};
179 75
180#define IWL_DEVICE_1000 \ 76#define IWL_DEVICE_1000 \
@@ -182,11 +78,11 @@ static const struct iwl_ht_params iwl1000_ht_params = {
182 .ucode_api_max = IWL1000_UCODE_API_MAX, \ 78 .ucode_api_max = IWL1000_UCODE_API_MAX, \
183 .ucode_api_ok = IWL1000_UCODE_API_OK, \ 79 .ucode_api_ok = IWL1000_UCODE_API_OK, \
184 .ucode_api_min = IWL1000_UCODE_API_MIN, \ 80 .ucode_api_min = IWL1000_UCODE_API_MIN, \
81 .device_family = IWL_DEVICE_FAMILY_1000, \
185 .max_inst_size = IWLAGN_RTC_INST_SIZE, \ 82 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
186 .max_data_size = IWLAGN_RTC_DATA_SIZE, \ 83 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
187 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \ 84 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
188 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ 85 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
189 .lib = &iwl1000_lib, \
190 .base_params = &iwl1000_base_params, \ 86 .base_params = &iwl1000_base_params, \
191 .led_mode = IWL_LED_BLINK 87 .led_mode = IWL_LED_BLINK
192 88
@@ -206,11 +102,11 @@ const struct iwl_cfg iwl1000_bg_cfg = {
206 .ucode_api_max = IWL100_UCODE_API_MAX, \ 102 .ucode_api_max = IWL100_UCODE_API_MAX, \
207 .ucode_api_ok = IWL100_UCODE_API_OK, \ 103 .ucode_api_ok = IWL100_UCODE_API_OK, \
208 .ucode_api_min = IWL100_UCODE_API_MIN, \ 104 .ucode_api_min = IWL100_UCODE_API_MIN, \
105 .device_family = IWL_DEVICE_FAMILY_100, \
209 .max_inst_size = IWLAGN_RTC_INST_SIZE, \ 106 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
210 .max_data_size = IWLAGN_RTC_DATA_SIZE, \ 107 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
211 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \ 108 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
212 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ 109 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
213 .lib = &iwl1000_lib, \
214 .base_params = &iwl1000_base_params, \ 110 .base_params = &iwl1000_base_params, \
215 .led_mode = IWL_LED_RF_STATE, \ 111 .led_mode = IWL_LED_RF_STATE, \
216 .rx_with_siso_diversity = true 112 .rx_with_siso_diversity = true
@@ -226,5 +122,5 @@ const struct iwl_cfg iwl100_bg_cfg = {
226 IWL_DEVICE_100, 122 IWL_DEVICE_100,
227}; 123};
228 124
229MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX)); 125MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK));
230MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_MAX)); 126MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 5635b9e2c69e..7f793417c787 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -24,25 +24,12 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26 26
27#include <linux/kernel.h>
28#include <linux/module.h> 27#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/skbuff.h>
32#include <linux/netdevice.h>
33#include <net/mac80211.h>
34#include <linux/etherdevice.h>
35#include <asm/unaligned.h>
36#include <linux/stringify.h> 28#include <linux/stringify.h>
37 29#include "iwl-config.h"
38#include "iwl-eeprom.h"
39#include "iwl-dev.h"
40#include "iwl-core.h"
41#include "iwl-io.h"
42#include "iwl-agn.h"
43#include "iwl-agn-hw.h"
44#include "iwl-shared.h"
45#include "iwl-cfg.h" 30#include "iwl-cfg.h"
31#include "iwl-agn-hw.h"
32#include "iwl-commands.h" /* needed for BT for now */
46 33
47/* Highest firmware API version supported */ 34/* Highest firmware API version supported */
48#define IWL2030_UCODE_API_MAX 6 35#define IWL2030_UCODE_API_MAX 6
@@ -51,10 +38,10 @@
51#define IWL135_UCODE_API_MAX 6 38#define IWL135_UCODE_API_MAX 6
52 39
53/* Oldest version we won't warn about */ 40/* Oldest version we won't warn about */
54#define IWL2030_UCODE_API_OK 5 41#define IWL2030_UCODE_API_OK 6
55#define IWL2000_UCODE_API_OK 5 42#define IWL2000_UCODE_API_OK 6
56#define IWL105_UCODE_API_OK 5 43#define IWL105_UCODE_API_OK 6
57#define IWL135_UCODE_API_OK 5 44#define IWL135_UCODE_API_OK 6
58 45
59/* Lowest firmware API version supported */ 46/* Lowest firmware API version supported */
60#define IWL2030_UCODE_API_MIN 5 47#define IWL2030_UCODE_API_MIN 5
@@ -62,6 +49,11 @@
62#define IWL105_UCODE_API_MIN 5 49#define IWL105_UCODE_API_MIN 5
63#define IWL135_UCODE_API_MIN 5 50#define IWL135_UCODE_API_MIN 5
64 51
52/* EEPROM version */
53#define EEPROM_2000_TX_POWER_VERSION (6)
54#define EEPROM_2000_EEPROM_VERSION (0x805)
55
56
65#define IWL2030_FW_PRE "iwlwifi-2030-" 57#define IWL2030_FW_PRE "iwlwifi-2030-"
66#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode" 58#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode"
67 59
@@ -74,105 +66,9 @@
74#define IWL135_FW_PRE "iwlwifi-135-" 66#define IWL135_FW_PRE "iwlwifi-135-"
75#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE __stringify(api) ".ucode" 67#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE __stringify(api) ".ucode"
76 68
77static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
78{
79 /* want Celsius */
80 hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD;
81 hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
82}
83
84/* NIC configuration for 2000 series */
85static void iwl2000_nic_config(struct iwl_priv *priv)
86{
87 iwl_rf_config(priv);
88
89 if (cfg(priv)->iq_invert)
90 iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
91 CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
92}
93
94static const struct iwl_sensitivity_ranges iwl2000_sensitivity = {
95 .min_nrg_cck = 97,
96 .auto_corr_min_ofdm = 80,
97 .auto_corr_min_ofdm_mrc = 128,
98 .auto_corr_min_ofdm_x1 = 105,
99 .auto_corr_min_ofdm_mrc_x1 = 192,
100
101 .auto_corr_max_ofdm = 145,
102 .auto_corr_max_ofdm_mrc = 232,
103 .auto_corr_max_ofdm_x1 = 110,
104 .auto_corr_max_ofdm_mrc_x1 = 232,
105
106 .auto_corr_min_cck = 125,
107 .auto_corr_max_cck = 175,
108 .auto_corr_min_cck_mrc = 160,
109 .auto_corr_max_cck_mrc = 310,
110 .nrg_th_cck = 97,
111 .nrg_th_ofdm = 100,
112
113 .barker_corr_th_min = 190,
114 .barker_corr_th_min_mrc = 390,
115 .nrg_th_cca = 62,
116};
117
118static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
119{
120 hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
121
122 hw_params(priv).tx_chains_num =
123 num_of_ant(hw_params(priv).valid_tx_ant);
124 if (cfg(priv)->rx_with_siso_diversity)
125 hw_params(priv).rx_chains_num = 1;
126 else
127 hw_params(priv).rx_chains_num =
128 num_of_ant(hw_params(priv).valid_rx_ant);
129
130 iwl2000_set_ct_threshold(priv);
131
132 /* Set initial sensitivity parameters */
133 hw_params(priv).sens = &iwl2000_sensitivity;
134}
135
136static struct iwl_lib_ops iwl2000_lib = {
137 .set_hw_params = iwl2000_hw_set_hw_params,
138 .nic_config = iwl2000_nic_config,
139 .eeprom_ops = {
140 .regulatory_bands = {
141 EEPROM_REG_BAND_1_CHANNELS,
142 EEPROM_REG_BAND_2_CHANNELS,
143 EEPROM_REG_BAND_3_CHANNELS,
144 EEPROM_REG_BAND_4_CHANNELS,
145 EEPROM_REG_BAND_5_CHANNELS,
146 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
147 EEPROM_REGULATORY_BAND_NO_HT40,
148 },
149 .enhanced_txpower = true,
150 },
151 .temperature = iwlagn_temperature,
152};
153
154static struct iwl_lib_ops iwl2030_lib = {
155 .set_hw_params = iwl2000_hw_set_hw_params,
156 .nic_config = iwl2000_nic_config,
157 .eeprom_ops = {
158 .regulatory_bands = {
159 EEPROM_REG_BAND_1_CHANNELS,
160 EEPROM_REG_BAND_2_CHANNELS,
161 EEPROM_REG_BAND_3_CHANNELS,
162 EEPROM_REG_BAND_4_CHANNELS,
163 EEPROM_REG_BAND_5_CHANNELS,
164 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
165 EEPROM_REGULATORY_BAND_NO_HT40,
166 },
167 .enhanced_txpower = true,
168 },
169 .temperature = iwlagn_temperature,
170};
171
172static const struct iwl_base_params iwl2000_base_params = { 69static const struct iwl_base_params iwl2000_base_params = {
173 .eeprom_size = OTP_LOW_IMAGE_SIZE, 70 .eeprom_size = OTP_LOW_IMAGE_SIZE,
174 .num_of_queues = IWLAGN_NUM_QUEUES, 71 .num_of_queues = IWLAGN_NUM_QUEUES,
175 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
176 .pll_cfg_val = 0, 72 .pll_cfg_val = 0,
177 .max_ll_items = OTP_MAX_LL_ITEMS_2x00, 73 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
178 .shadow_ram_support = true, 74 .shadow_ram_support = true,
@@ -191,7 +87,6 @@ static const struct iwl_base_params iwl2000_base_params = {
191static const struct iwl_base_params iwl2030_base_params = { 87static const struct iwl_base_params iwl2030_base_params = {
192 .eeprom_size = OTP_LOW_IMAGE_SIZE, 88 .eeprom_size = OTP_LOW_IMAGE_SIZE,
193 .num_of_queues = IWLAGN_NUM_QUEUES, 89 .num_of_queues = IWLAGN_NUM_QUEUES,
194 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
195 .pll_cfg_val = 0, 90 .pll_cfg_val = 0,
196 .max_ll_items = OTP_MAX_LL_ITEMS_2x00, 91 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
197 .shadow_ram_support = true, 92 .shadow_ram_support = true,
@@ -226,16 +121,15 @@ static const struct iwl_bt_params iwl2030_bt_params = {
226 .ucode_api_max = IWL2000_UCODE_API_MAX, \ 121 .ucode_api_max = IWL2000_UCODE_API_MAX, \
227 .ucode_api_ok = IWL2000_UCODE_API_OK, \ 122 .ucode_api_ok = IWL2000_UCODE_API_OK, \
228 .ucode_api_min = IWL2000_UCODE_API_MIN, \ 123 .ucode_api_min = IWL2000_UCODE_API_MIN, \
124 .device_family = IWL_DEVICE_FAMILY_2000, \
229 .max_inst_size = IWL60_RTC_INST_SIZE, \ 125 .max_inst_size = IWL60_RTC_INST_SIZE, \
230 .max_data_size = IWL60_RTC_DATA_SIZE, \ 126 .max_data_size = IWL60_RTC_DATA_SIZE, \
231 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 127 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
232 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 128 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
233 .lib = &iwl2000_lib, \
234 .base_params = &iwl2000_base_params, \ 129 .base_params = &iwl2000_base_params, \
235 .need_temp_offset_calib = true, \ 130 .need_temp_offset_calib = true, \
236 .temp_offset_v2 = true, \ 131 .temp_offset_v2 = true, \
237 .led_mode = IWL_LED_RF_STATE, \ 132 .led_mode = IWL_LED_RF_STATE
238 .iq_invert = true \
239 133
240const struct iwl_cfg iwl2000_2bgn_cfg = { 134const struct iwl_cfg iwl2000_2bgn_cfg = {
241 .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN", 135 .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
@@ -254,18 +148,17 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
254 .ucode_api_max = IWL2030_UCODE_API_MAX, \ 148 .ucode_api_max = IWL2030_UCODE_API_MAX, \
255 .ucode_api_ok = IWL2030_UCODE_API_OK, \ 149 .ucode_api_ok = IWL2030_UCODE_API_OK, \
256 .ucode_api_min = IWL2030_UCODE_API_MIN, \ 150 .ucode_api_min = IWL2030_UCODE_API_MIN, \
151 .device_family = IWL_DEVICE_FAMILY_2030, \
257 .max_inst_size = IWL60_RTC_INST_SIZE, \ 152 .max_inst_size = IWL60_RTC_INST_SIZE, \
258 .max_data_size = IWL60_RTC_DATA_SIZE, \ 153 .max_data_size = IWL60_RTC_DATA_SIZE, \
259 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 154 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
260 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 155 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
261 .lib = &iwl2030_lib, \
262 .base_params = &iwl2030_base_params, \ 156 .base_params = &iwl2030_base_params, \
263 .bt_params = &iwl2030_bt_params, \ 157 .bt_params = &iwl2030_bt_params, \
264 .need_temp_offset_calib = true, \ 158 .need_temp_offset_calib = true, \
265 .temp_offset_v2 = true, \ 159 .temp_offset_v2 = true, \
266 .led_mode = IWL_LED_RF_STATE, \ 160 .led_mode = IWL_LED_RF_STATE, \
267 .adv_pm = true, \ 161 .adv_pm = true
268 .iq_invert = true \
269 162
270const struct iwl_cfg iwl2030_2bgn_cfg = { 163const struct iwl_cfg iwl2030_2bgn_cfg = {
271 .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", 164 .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
@@ -278,18 +171,17 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
278 .ucode_api_max = IWL105_UCODE_API_MAX, \ 171 .ucode_api_max = IWL105_UCODE_API_MAX, \
279 .ucode_api_ok = IWL105_UCODE_API_OK, \ 172 .ucode_api_ok = IWL105_UCODE_API_OK, \
280 .ucode_api_min = IWL105_UCODE_API_MIN, \ 173 .ucode_api_min = IWL105_UCODE_API_MIN, \
174 .device_family = IWL_DEVICE_FAMILY_105, \
281 .max_inst_size = IWL60_RTC_INST_SIZE, \ 175 .max_inst_size = IWL60_RTC_INST_SIZE, \
282 .max_data_size = IWL60_RTC_DATA_SIZE, \ 176 .max_data_size = IWL60_RTC_DATA_SIZE, \
283 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 177 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
284 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 178 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
285 .lib = &iwl2000_lib, \
286 .base_params = &iwl2000_base_params, \ 179 .base_params = &iwl2000_base_params, \
287 .need_temp_offset_calib = true, \ 180 .need_temp_offset_calib = true, \
288 .temp_offset_v2 = true, \ 181 .temp_offset_v2 = true, \
289 .led_mode = IWL_LED_RF_STATE, \ 182 .led_mode = IWL_LED_RF_STATE, \
290 .adv_pm = true, \ 183 .adv_pm = true, \
291 .rx_with_siso_diversity = true, \ 184 .rx_with_siso_diversity = true
292 .iq_invert = true \
293 185
294const struct iwl_cfg iwl105_bgn_cfg = { 186const struct iwl_cfg iwl105_bgn_cfg = {
295 .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", 187 .name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
@@ -308,19 +200,18 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
308 .ucode_api_max = IWL135_UCODE_API_MAX, \ 200 .ucode_api_max = IWL135_UCODE_API_MAX, \
309 .ucode_api_ok = IWL135_UCODE_API_OK, \ 201 .ucode_api_ok = IWL135_UCODE_API_OK, \
310 .ucode_api_min = IWL135_UCODE_API_MIN, \ 202 .ucode_api_min = IWL135_UCODE_API_MIN, \
203 .device_family = IWL_DEVICE_FAMILY_135, \
311 .max_inst_size = IWL60_RTC_INST_SIZE, \ 204 .max_inst_size = IWL60_RTC_INST_SIZE, \
312 .max_data_size = IWL60_RTC_DATA_SIZE, \ 205 .max_data_size = IWL60_RTC_DATA_SIZE, \
313 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 206 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
314 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 207 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
315 .lib = &iwl2030_lib, \
316 .base_params = &iwl2030_base_params, \ 208 .base_params = &iwl2030_base_params, \
317 .bt_params = &iwl2030_bt_params, \ 209 .bt_params = &iwl2030_bt_params, \
318 .need_temp_offset_calib = true, \ 210 .need_temp_offset_calib = true, \
319 .temp_offset_v2 = true, \ 211 .temp_offset_v2 = true, \
320 .led_mode = IWL_LED_RF_STATE, \ 212 .led_mode = IWL_LED_RF_STATE, \
321 .adv_pm = true, \ 213 .adv_pm = true, \
322 .rx_with_siso_diversity = true, \ 214 .rx_with_siso_diversity = true
323 .iq_invert = true \
324 215
325const struct iwl_cfg iwl135_bgn_cfg = { 216const struct iwl_cfg iwl135_bgn_cfg = {
326 .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", 217 .name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
@@ -328,7 +219,7 @@ const struct iwl_cfg iwl135_bgn_cfg = {
328 .ht_params = &iwl2000_ht_params, 219 .ht_params = &iwl2000_ht_params,
329}; 220};
330 221
331MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX)); 222MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK));
332MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX)); 223MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK));
333MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX)); 224MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK));
334MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_MAX)); 225MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index a805e97b89af..8e26bc825f23 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -24,299 +24,47 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26 26
27#include <linux/kernel.h>
28#include <linux/module.h> 27#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/sched.h>
32#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <net/mac80211.h>
35#include <linux/etherdevice.h>
36#include <asm/unaligned.h>
37#include <linux/stringify.h> 28#include <linux/stringify.h>
38 29#include "iwl-config.h"
39#include "iwl-eeprom.h"
40#include "iwl-dev.h"
41#include "iwl-core.h"
42#include "iwl-io.h"
43#include "iwl-agn.h"
44#include "iwl-agn-hw.h"
45#include "iwl-trans.h"
46#include "iwl-shared.h"
47#include "iwl-cfg.h" 30#include "iwl-cfg.h"
48#include "iwl-prph.h" 31#include "iwl-agn-hw.h"
32#include "iwl-csr.h"
49 33
50/* Highest firmware API version supported */ 34/* Highest firmware API version supported */
51#define IWL5000_UCODE_API_MAX 5 35#define IWL5000_UCODE_API_MAX 5
52#define IWL5150_UCODE_API_MAX 2 36#define IWL5150_UCODE_API_MAX 2
53 37
38/* Oldest version we won't warn about */
39#define IWL5000_UCODE_API_OK 5
40#define IWL5150_UCODE_API_OK 2
41
54/* Lowest firmware API version supported */ 42/* Lowest firmware API version supported */
55#define IWL5000_UCODE_API_MIN 1 43#define IWL5000_UCODE_API_MIN 1
56#define IWL5150_UCODE_API_MIN 1 44#define IWL5150_UCODE_API_MIN 1
57 45
46/* EEPROM versions */
47#define EEPROM_5000_TX_POWER_VERSION (4)
48#define EEPROM_5000_EEPROM_VERSION (0x11A)
49#define EEPROM_5050_TX_POWER_VERSION (4)
50#define EEPROM_5050_EEPROM_VERSION (0x21E)
51
58#define IWL5000_FW_PRE "iwlwifi-5000-" 52#define IWL5000_FW_PRE "iwlwifi-5000-"
59#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode" 53#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode"
60 54
61#define IWL5150_FW_PRE "iwlwifi-5150-" 55#define IWL5150_FW_PRE "iwlwifi-5150-"
62#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode" 56#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode"
63 57
64/* NIC configuration for 5000 series */
65static void iwl5000_nic_config(struct iwl_priv *priv)
66{
67 iwl_rf_config(priv);
68
69 /* W/A : NIC is stuck in a reset state after Early PCIe power off
70 * (PCIe power is lost before PERST# is asserted),
71 * causing ME FW to lose ownership and not being able to obtain it back.
72 */
73 iwl_set_bits_mask_prph(trans(priv), APMG_PS_CTRL_REG,
74 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
75 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
76}
77
78static const struct iwl_sensitivity_ranges iwl5000_sensitivity = {
79 .min_nrg_cck = 100,
80 .auto_corr_min_ofdm = 90,
81 .auto_corr_min_ofdm_mrc = 170,
82 .auto_corr_min_ofdm_x1 = 105,
83 .auto_corr_min_ofdm_mrc_x1 = 220,
84
85 .auto_corr_max_ofdm = 120,
86 .auto_corr_max_ofdm_mrc = 210,
87 .auto_corr_max_ofdm_x1 = 120,
88 .auto_corr_max_ofdm_mrc_x1 = 240,
89
90 .auto_corr_min_cck = 125,
91 .auto_corr_max_cck = 200,
92 .auto_corr_min_cck_mrc = 200,
93 .auto_corr_max_cck_mrc = 400,
94 .nrg_th_cck = 100,
95 .nrg_th_ofdm = 100,
96
97 .barker_corr_th_min = 190,
98 .barker_corr_th_min_mrc = 390,
99 .nrg_th_cca = 62,
100};
101
102static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
103 .min_nrg_cck = 95,
104 .auto_corr_min_ofdm = 90,
105 .auto_corr_min_ofdm_mrc = 170,
106 .auto_corr_min_ofdm_x1 = 105,
107 .auto_corr_min_ofdm_mrc_x1 = 220,
108
109 .auto_corr_max_ofdm = 120,
110 .auto_corr_max_ofdm_mrc = 210,
111 /* max = min for performance bug in 5150 DSP */
112 .auto_corr_max_ofdm_x1 = 105,
113 .auto_corr_max_ofdm_mrc_x1 = 220,
114
115 .auto_corr_min_cck = 125,
116 .auto_corr_max_cck = 200,
117 .auto_corr_min_cck_mrc = 170,
118 .auto_corr_max_cck_mrc = 400,
119 .nrg_th_cck = 95,
120 .nrg_th_ofdm = 95,
121
122 .barker_corr_th_min = 190,
123 .barker_corr_th_min_mrc = 390,
124 .nrg_th_cca = 62,
125};
126
127#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
128
129static s32 iwl_temp_calib_to_offset(struct iwl_shared *shrd)
130{
131 u16 temperature, voltage;
132 __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(shrd,
133 EEPROM_KELVIN_TEMPERATURE);
134
135 temperature = le16_to_cpu(temp_calib[0]);
136 voltage = le16_to_cpu(temp_calib[1]);
137
138 /* offset = temp - volt / coeff */
139 return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
140}
141
142static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
143{
144 const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
145 s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
146 iwl_temp_calib_to_offset(priv->shrd);
147
148 hw_params(priv).ct_kill_threshold = threshold * volt2temp_coef;
149}
150
151static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
152{
153 /* want Celsius */
154 hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
155}
156
157static void iwl5000_hw_set_hw_params(struct iwl_priv *priv)
158{
159 hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
160 BIT(IEEE80211_BAND_5GHZ);
161
162 hw_params(priv).tx_chains_num =
163 num_of_ant(hw_params(priv).valid_tx_ant);
164 hw_params(priv).rx_chains_num =
165 num_of_ant(hw_params(priv).valid_rx_ant);
166
167 iwl5000_set_ct_threshold(priv);
168
169 /* Set initial sensitivity parameters */
170 hw_params(priv).sens = &iwl5000_sensitivity;
171}
172
173static void iwl5150_hw_set_hw_params(struct iwl_priv *priv)
174{
175 hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
176 BIT(IEEE80211_BAND_5GHZ);
177
178 hw_params(priv).tx_chains_num =
179 num_of_ant(hw_params(priv).valid_tx_ant);
180 hw_params(priv).rx_chains_num =
181 num_of_ant(hw_params(priv).valid_rx_ant);
182
183 iwl5150_set_ct_threshold(priv);
184
185 /* Set initial sensitivity parameters */
186 hw_params(priv).sens = &iwl5150_sensitivity;
187}
188
189static void iwl5150_temperature(struct iwl_priv *priv)
190{
191 u32 vt = 0;
192 s32 offset = iwl_temp_calib_to_offset(priv->shrd);
193
194 vt = le32_to_cpu(priv->statistics.common.temperature);
195 vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
196 /* now vt hold the temperature in Kelvin */
197 priv->temperature = KELVIN_TO_CELSIUS(vt);
198 iwl_tt_handler(priv);
199}
200
201static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
202 struct ieee80211_channel_switch *ch_switch)
203{
204 /*
205 * MULTI-FIXME
206 * See iwlagn_mac_channel_switch.
207 */
208 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
209 struct iwl5000_channel_switch_cmd cmd;
210 const struct iwl_channel_info *ch_info;
211 u32 switch_time_in_usec, ucode_switch_time;
212 u16 ch;
213 u32 tsf_low;
214 u8 switch_count;
215 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
216 struct ieee80211_vif *vif = ctx->vif;
217 struct iwl_host_cmd hcmd = {
218 .id = REPLY_CHANNEL_SWITCH,
219 .len = { sizeof(cmd), },
220 .flags = CMD_SYNC,
221 .data = { &cmd, },
222 };
223
224 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
225 ch = ch_switch->channel->hw_value;
226 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
227 ctx->active.channel, ch);
228 cmd.channel = cpu_to_le16(ch);
229 cmd.rxon_flags = ctx->staging.flags;
230 cmd.rxon_filter_flags = ctx->staging.filter_flags;
231 switch_count = ch_switch->count;
232 tsf_low = ch_switch->timestamp & 0x0ffffffff;
233 /*
234 * calculate the ucode channel switch time
235 * adding TSF as one of the factor for when to switch
236 */
237 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
238 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
239 beacon_interval)) {
240 switch_count -= (priv->ucode_beacon_time -
241 tsf_low) / beacon_interval;
242 } else
243 switch_count = 0;
244 }
245 if (switch_count <= 1)
246 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
247 else {
248 switch_time_in_usec =
249 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
250 ucode_switch_time = iwl_usecs_to_beacons(priv,
251 switch_time_in_usec,
252 beacon_interval);
253 cmd.switch_time = iwl_add_beacon_time(priv,
254 priv->ucode_beacon_time,
255 ucode_switch_time,
256 beacon_interval);
257 }
258 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
259 cmd.switch_time);
260 ch_info = iwl_get_channel_info(priv, priv->band, ch);
261 if (ch_info)
262 cmd.expect_beacon = is_channel_radar(ch_info);
263 else {
264 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
265 ctx->active.channel, ch);
266 return -EFAULT;
267 }
268
269 return iwl_dvm_send_cmd(priv, &hcmd);
270}
271
272static struct iwl_lib_ops iwl5000_lib = {
273 .set_hw_params = iwl5000_hw_set_hw_params,
274 .set_channel_switch = iwl5000_hw_channel_switch,
275 .nic_config = iwl5000_nic_config,
276 .eeprom_ops = {
277 .regulatory_bands = {
278 EEPROM_REG_BAND_1_CHANNELS,
279 EEPROM_REG_BAND_2_CHANNELS,
280 EEPROM_REG_BAND_3_CHANNELS,
281 EEPROM_REG_BAND_4_CHANNELS,
282 EEPROM_REG_BAND_5_CHANNELS,
283 EEPROM_REG_BAND_24_HT40_CHANNELS,
284 EEPROM_REG_BAND_52_HT40_CHANNELS
285 },
286 },
287 .temperature = iwlagn_temperature,
288};
289
290static struct iwl_lib_ops iwl5150_lib = {
291 .set_hw_params = iwl5150_hw_set_hw_params,
292 .set_channel_switch = iwl5000_hw_channel_switch,
293 .nic_config = iwl5000_nic_config,
294 .eeprom_ops = {
295 .regulatory_bands = {
296 EEPROM_REG_BAND_1_CHANNELS,
297 EEPROM_REG_BAND_2_CHANNELS,
298 EEPROM_REG_BAND_3_CHANNELS,
299 EEPROM_REG_BAND_4_CHANNELS,
300 EEPROM_REG_BAND_5_CHANNELS,
301 EEPROM_REG_BAND_24_HT40_CHANNELS,
302 EEPROM_REG_BAND_52_HT40_CHANNELS
303 },
304 },
305 .temperature = iwl5150_temperature,
306};
307
308static const struct iwl_base_params iwl5000_base_params = { 58static const struct iwl_base_params iwl5000_base_params = {
309 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, 59 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
310 .num_of_queues = IWLAGN_NUM_QUEUES, 60 .num_of_queues = IWLAGN_NUM_QUEUES,
311 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
312 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 61 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
313 .led_compensation = 51, 62 .led_compensation = 51,
314 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 63 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
315 .chain_noise_scale = 1000, 64 .chain_noise_scale = 1000,
316 .wd_timeout = IWL_LONG_WD_TIMEOUT, 65 .wd_timeout = IWL_WATCHHDOG_DISABLED,
317 .max_event_log_size = 512, 66 .max_event_log_size = 512,
318 .no_idle_support = true, 67 .no_idle_support = true,
319 .wd_disable = true,
320}; 68};
321 69
322static const struct iwl_ht_params iwl5000_ht_params = { 70static const struct iwl_ht_params iwl5000_ht_params = {
@@ -326,12 +74,13 @@ static const struct iwl_ht_params iwl5000_ht_params = {
326#define IWL_DEVICE_5000 \ 74#define IWL_DEVICE_5000 \
327 .fw_name_pre = IWL5000_FW_PRE, \ 75 .fw_name_pre = IWL5000_FW_PRE, \
328 .ucode_api_max = IWL5000_UCODE_API_MAX, \ 76 .ucode_api_max = IWL5000_UCODE_API_MAX, \
77 .ucode_api_ok = IWL5000_UCODE_API_OK, \
329 .ucode_api_min = IWL5000_UCODE_API_MIN, \ 78 .ucode_api_min = IWL5000_UCODE_API_MIN, \
79 .device_family = IWL_DEVICE_FAMILY_5000, \
330 .max_inst_size = IWLAGN_RTC_INST_SIZE, \ 80 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
331 .max_data_size = IWLAGN_RTC_DATA_SIZE, \ 81 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
332 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \ 82 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \
333 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ 83 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
334 .lib = &iwl5000_lib, \
335 .base_params = &iwl5000_base_params, \ 84 .base_params = &iwl5000_base_params, \
336 .led_mode = IWL_LED_BLINK 85 .led_mode = IWL_LED_BLINK
337 86
@@ -371,12 +120,13 @@ const struct iwl_cfg iwl5350_agn_cfg = {
371 .name = "Intel(R) WiMAX/WiFi Link 5350 AGN", 120 .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
372 .fw_name_pre = IWL5000_FW_PRE, 121 .fw_name_pre = IWL5000_FW_PRE,
373 .ucode_api_max = IWL5000_UCODE_API_MAX, 122 .ucode_api_max = IWL5000_UCODE_API_MAX,
123 .ucode_api_ok = IWL5000_UCODE_API_OK,
374 .ucode_api_min = IWL5000_UCODE_API_MIN, 124 .ucode_api_min = IWL5000_UCODE_API_MIN,
125 .device_family = IWL_DEVICE_FAMILY_5000,
375 .max_inst_size = IWLAGN_RTC_INST_SIZE, 126 .max_inst_size = IWLAGN_RTC_INST_SIZE,
376 .max_data_size = IWLAGN_RTC_DATA_SIZE, 127 .max_data_size = IWLAGN_RTC_DATA_SIZE,
377 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 128 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
378 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 129 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
379 .lib = &iwl5000_lib,
380 .base_params = &iwl5000_base_params, 130 .base_params = &iwl5000_base_params,
381 .ht_params = &iwl5000_ht_params, 131 .ht_params = &iwl5000_ht_params,
382 .led_mode = IWL_LED_BLINK, 132 .led_mode = IWL_LED_BLINK,
@@ -386,12 +136,13 @@ const struct iwl_cfg iwl5350_agn_cfg = {
386#define IWL_DEVICE_5150 \ 136#define IWL_DEVICE_5150 \
387 .fw_name_pre = IWL5150_FW_PRE, \ 137 .fw_name_pre = IWL5150_FW_PRE, \
388 .ucode_api_max = IWL5150_UCODE_API_MAX, \ 138 .ucode_api_max = IWL5150_UCODE_API_MAX, \
139 .ucode_api_ok = IWL5150_UCODE_API_OK, \
389 .ucode_api_min = IWL5150_UCODE_API_MIN, \ 140 .ucode_api_min = IWL5150_UCODE_API_MIN, \
141 .device_family = IWL_DEVICE_FAMILY_5150, \
390 .max_inst_size = IWLAGN_RTC_INST_SIZE, \ 142 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
391 .max_data_size = IWLAGN_RTC_DATA_SIZE, \ 143 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
392 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \ 144 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \
393 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \ 145 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
394 .lib = &iwl5150_lib, \
395 .base_params = &iwl5000_base_params, \ 146 .base_params = &iwl5000_base_params, \
396 .no_xtal_calib = true, \ 147 .no_xtal_calib = true, \
397 .led_mode = IWL_LED_BLINK, \ 148 .led_mode = IWL_LED_BLINK, \
@@ -409,5 +160,5 @@ const struct iwl_cfg iwl5150_abg_cfg = {
409 IWL_DEVICE_5150, 160 IWL_DEVICE_5150,
410}; 161};
411 162
412MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); 163MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK));
413MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX)); 164MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 64060cd738b5..381b02cf339c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -24,26 +24,12 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26 26
27#include <linux/kernel.h>
28#include <linux/module.h> 27#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/skbuff.h>
32#include <linux/netdevice.h>
33#include <net/mac80211.h>
34#include <linux/etherdevice.h>
35#include <asm/unaligned.h>
36#include <linux/stringify.h> 28#include <linux/stringify.h>
37 29#include "iwl-config.h"
38#include "iwl-eeprom.h"
39#include "iwl-dev.h"
40#include "iwl-core.h"
41#include "iwl-io.h"
42#include "iwl-agn.h"
43#include "iwl-agn-hw.h"
44#include "iwl-trans.h"
45#include "iwl-shared.h"
46#include "iwl-cfg.h" 30#include "iwl-cfg.h"
31#include "iwl-agn-hw.h"
32#include "iwl-commands.h" /* needed for BT for now */
47 33
48/* Highest firmware API version supported */ 34/* Highest firmware API version supported */
49#define IWL6000_UCODE_API_MAX 6 35#define IWL6000_UCODE_API_MAX 6
@@ -53,12 +39,28 @@
53/* Oldest version we won't warn about */ 39/* Oldest version we won't warn about */
54#define IWL6000_UCODE_API_OK 4 40#define IWL6000_UCODE_API_OK 4
55#define IWL6000G2_UCODE_API_OK 5 41#define IWL6000G2_UCODE_API_OK 5
42#define IWL6050_UCODE_API_OK 5
43#define IWL6000G2B_UCODE_API_OK 6
56 44
57/* Lowest firmware API version supported */ 45/* Lowest firmware API version supported */
58#define IWL6000_UCODE_API_MIN 4 46#define IWL6000_UCODE_API_MIN 4
59#define IWL6050_UCODE_API_MIN 4 47#define IWL6050_UCODE_API_MIN 4
60#define IWL6000G2_UCODE_API_MIN 4 48#define IWL6000G2_UCODE_API_MIN 4
61 49
50/* EEPROM versions */
51#define EEPROM_6000_TX_POWER_VERSION (4)
52#define EEPROM_6000_EEPROM_VERSION (0x423)
53#define EEPROM_6050_TX_POWER_VERSION (4)
54#define EEPROM_6050_EEPROM_VERSION (0x532)
55#define EEPROM_6150_TX_POWER_VERSION (6)
56#define EEPROM_6150_EEPROM_VERSION (0x553)
57#define EEPROM_6005_TX_POWER_VERSION (6)
58#define EEPROM_6005_EEPROM_VERSION (0x709)
59#define EEPROM_6030_TX_POWER_VERSION (6)
60#define EEPROM_6030_EEPROM_VERSION (0x709)
61#define EEPROM_6035_TX_POWER_VERSION (6)
62#define EEPROM_6035_EEPROM_VERSION (0x753)
63
62#define IWL6000_FW_PRE "iwlwifi-6000-" 64#define IWL6000_FW_PRE "iwlwifi-6000-"
63#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode" 65#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode"
64 66
@@ -71,205 +73,9 @@
71#define IWL6030_FW_PRE "iwlwifi-6000g2b-" 73#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
72#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode" 74#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode"
73 75
74static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
75{
76 /* want Celsius */
77 hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD;
78 hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
79}
80
81static void iwl6050_additional_nic_config(struct iwl_priv *priv)
82{
83 /* Indicate calibration version to uCode. */
84 if (iwl_eeprom_calib_version(priv->shrd) >= 6)
85 iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
86 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
87}
88
89static void iwl6150_additional_nic_config(struct iwl_priv *priv)
90{
91 /* Indicate calibration version to uCode. */
92 if (iwl_eeprom_calib_version(priv->shrd) >= 6)
93 iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
94 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
95 iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
96 CSR_GP_DRIVER_REG_BIT_6050_1x2);
97}
98
99static void iwl6000i_additional_nic_config(struct iwl_priv *priv)
100{
101 /* 2x2 IPA phy type */
102 iwl_write32(trans(priv), CSR_GP_DRIVER_REG,
103 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
104}
105
106/* NIC configuration for 6000 series */
107static void iwl6000_nic_config(struct iwl_priv *priv)
108{
109 iwl_rf_config(priv);
110
111 /* do additional nic configuration if needed */
112 if (cfg(priv)->additional_nic_config)
113 cfg(priv)->additional_nic_config(priv);
114}
115
116static const struct iwl_sensitivity_ranges iwl6000_sensitivity = {
117 .min_nrg_cck = 110,
118 .auto_corr_min_ofdm = 80,
119 .auto_corr_min_ofdm_mrc = 128,
120 .auto_corr_min_ofdm_x1 = 105,
121 .auto_corr_min_ofdm_mrc_x1 = 192,
122
123 .auto_corr_max_ofdm = 145,
124 .auto_corr_max_ofdm_mrc = 232,
125 .auto_corr_max_ofdm_x1 = 110,
126 .auto_corr_max_ofdm_mrc_x1 = 232,
127
128 .auto_corr_min_cck = 125,
129 .auto_corr_max_cck = 175,
130 .auto_corr_min_cck_mrc = 160,
131 .auto_corr_max_cck_mrc = 310,
132 .nrg_th_cck = 110,
133 .nrg_th_ofdm = 110,
134
135 .barker_corr_th_min = 190,
136 .barker_corr_th_min_mrc = 336,
137 .nrg_th_cca = 62,
138};
139
140static void iwl6000_hw_set_hw_params(struct iwl_priv *priv)
141{
142 hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
143 BIT(IEEE80211_BAND_5GHZ);
144
145 hw_params(priv).tx_chains_num =
146 num_of_ant(hw_params(priv).valid_tx_ant);
147 if (cfg(priv)->rx_with_siso_diversity)
148 hw_params(priv).rx_chains_num = 1;
149 else
150 hw_params(priv).rx_chains_num =
151 num_of_ant(hw_params(priv).valid_rx_ant);
152
153 iwl6000_set_ct_threshold(priv);
154
155 /* Set initial sensitivity parameters */
156 hw_params(priv).sens = &iwl6000_sensitivity;
157
158}
159
160static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
161 struct ieee80211_channel_switch *ch_switch)
162{
163 /*
164 * MULTI-FIXME
165 * See iwlagn_mac_channel_switch.
166 */
167 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
168 struct iwl6000_channel_switch_cmd cmd;
169 const struct iwl_channel_info *ch_info;
170 u32 switch_time_in_usec, ucode_switch_time;
171 u16 ch;
172 u32 tsf_low;
173 u8 switch_count;
174 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
175 struct ieee80211_vif *vif = ctx->vif;
176 struct iwl_host_cmd hcmd = {
177 .id = REPLY_CHANNEL_SWITCH,
178 .len = { sizeof(cmd), },
179 .flags = CMD_SYNC,
180 .data = { &cmd, },
181 };
182
183 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
184 ch = ch_switch->channel->hw_value;
185 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
186 ctx->active.channel, ch);
187 cmd.channel = cpu_to_le16(ch);
188 cmd.rxon_flags = ctx->staging.flags;
189 cmd.rxon_filter_flags = ctx->staging.filter_flags;
190 switch_count = ch_switch->count;
191 tsf_low = ch_switch->timestamp & 0x0ffffffff;
192 /*
193 * calculate the ucode channel switch time
194 * adding TSF as one of the factor for when to switch
195 */
196 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
197 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
198 beacon_interval)) {
199 switch_count -= (priv->ucode_beacon_time -
200 tsf_low) / beacon_interval;
201 } else
202 switch_count = 0;
203 }
204 if (switch_count <= 1)
205 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
206 else {
207 switch_time_in_usec =
208 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
209 ucode_switch_time = iwl_usecs_to_beacons(priv,
210 switch_time_in_usec,
211 beacon_interval);
212 cmd.switch_time = iwl_add_beacon_time(priv,
213 priv->ucode_beacon_time,
214 ucode_switch_time,
215 beacon_interval);
216 }
217 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
218 cmd.switch_time);
219 ch_info = iwl_get_channel_info(priv, priv->band, ch);
220 if (ch_info)
221 cmd.expect_beacon = is_channel_radar(ch_info);
222 else {
223 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
224 ctx->active.channel, ch);
225 return -EFAULT;
226 }
227
228 return iwl_dvm_send_cmd(priv, &hcmd);
229}
230
231static struct iwl_lib_ops iwl6000_lib = {
232 .set_hw_params = iwl6000_hw_set_hw_params,
233 .set_channel_switch = iwl6000_hw_channel_switch,
234 .nic_config = iwl6000_nic_config,
235 .eeprom_ops = {
236 .regulatory_bands = {
237 EEPROM_REG_BAND_1_CHANNELS,
238 EEPROM_REG_BAND_2_CHANNELS,
239 EEPROM_REG_BAND_3_CHANNELS,
240 EEPROM_REG_BAND_4_CHANNELS,
241 EEPROM_REG_BAND_5_CHANNELS,
242 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
243 EEPROM_REG_BAND_52_HT40_CHANNELS
244 },
245 .enhanced_txpower = true,
246 },
247 .temperature = iwlagn_temperature,
248};
249
250static struct iwl_lib_ops iwl6030_lib = {
251 .set_hw_params = iwl6000_hw_set_hw_params,
252 .set_channel_switch = iwl6000_hw_channel_switch,
253 .nic_config = iwl6000_nic_config,
254 .eeprom_ops = {
255 .regulatory_bands = {
256 EEPROM_REG_BAND_1_CHANNELS,
257 EEPROM_REG_BAND_2_CHANNELS,
258 EEPROM_REG_BAND_3_CHANNELS,
259 EEPROM_REG_BAND_4_CHANNELS,
260 EEPROM_REG_BAND_5_CHANNELS,
261 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
262 EEPROM_REG_BAND_52_HT40_CHANNELS
263 },
264 .enhanced_txpower = true,
265 },
266 .temperature = iwlagn_temperature,
267};
268
269static const struct iwl_base_params iwl6000_base_params = { 76static const struct iwl_base_params iwl6000_base_params = {
270 .eeprom_size = OTP_LOW_IMAGE_SIZE, 77 .eeprom_size = OTP_LOW_IMAGE_SIZE,
271 .num_of_queues = IWLAGN_NUM_QUEUES, 78 .num_of_queues = IWLAGN_NUM_QUEUES,
272 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
273 .pll_cfg_val = 0, 79 .pll_cfg_val = 0,
274 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 80 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
275 .shadow_ram_support = true, 81 .shadow_ram_support = true,
@@ -286,7 +92,6 @@ static const struct iwl_base_params iwl6000_base_params = {
286static const struct iwl_base_params iwl6050_base_params = { 92static const struct iwl_base_params iwl6050_base_params = {
287 .eeprom_size = OTP_LOW_IMAGE_SIZE, 93 .eeprom_size = OTP_LOW_IMAGE_SIZE,
288 .num_of_queues = IWLAGN_NUM_QUEUES, 94 .num_of_queues = IWLAGN_NUM_QUEUES,
289 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
290 .pll_cfg_val = 0, 95 .pll_cfg_val = 0,
291 .max_ll_items = OTP_MAX_LL_ITEMS_6x50, 96 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
292 .shadow_ram_support = true, 97 .shadow_ram_support = true,
@@ -303,7 +108,6 @@ static const struct iwl_base_params iwl6050_base_params = {
303static const struct iwl_base_params iwl6000_g2_base_params = { 108static const struct iwl_base_params iwl6000_g2_base_params = {
304 .eeprom_size = OTP_LOW_IMAGE_SIZE, 109 .eeprom_size = OTP_LOW_IMAGE_SIZE,
305 .num_of_queues = IWLAGN_NUM_QUEUES, 110 .num_of_queues = IWLAGN_NUM_QUEUES,
306 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
307 .pll_cfg_val = 0, 111 .pll_cfg_val = 0,
308 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 112 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
309 .shadow_ram_support = true, 113 .shadow_ram_support = true,
@@ -336,11 +140,11 @@ static const struct iwl_bt_params iwl6000_bt_params = {
336 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ 140 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
337 .ucode_api_ok = IWL6000G2_UCODE_API_OK, \ 141 .ucode_api_ok = IWL6000G2_UCODE_API_OK, \
338 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ 142 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
143 .device_family = IWL_DEVICE_FAMILY_6005, \
339 .max_inst_size = IWL60_RTC_INST_SIZE, \ 144 .max_inst_size = IWL60_RTC_INST_SIZE, \
340 .max_data_size = IWL60_RTC_DATA_SIZE, \ 145 .max_data_size = IWL60_RTC_DATA_SIZE, \
341 .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \ 146 .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
342 .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ 147 .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
343 .lib = &iwl6000_lib, \
344 .base_params = &iwl6000_g2_base_params, \ 148 .base_params = &iwl6000_g2_base_params, \
345 .need_temp_offset_calib = true, \ 149 .need_temp_offset_calib = true, \
346 .led_mode = IWL_LED_RF_STATE 150 .led_mode = IWL_LED_RF_STATE
@@ -388,13 +192,13 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
388#define IWL_DEVICE_6030 \ 192#define IWL_DEVICE_6030 \
389 .fw_name_pre = IWL6030_FW_PRE, \ 193 .fw_name_pre = IWL6030_FW_PRE, \
390 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ 194 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
391 .ucode_api_ok = IWL6000G2_UCODE_API_OK, \ 195 .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \
392 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ 196 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
197 .device_family = IWL_DEVICE_FAMILY_6030, \
393 .max_inst_size = IWL60_RTC_INST_SIZE, \ 198 .max_inst_size = IWL60_RTC_INST_SIZE, \
394 .max_data_size = IWL60_RTC_DATA_SIZE, \ 199 .max_data_size = IWL60_RTC_DATA_SIZE, \
395 .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \ 200 .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
396 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 201 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
397 .lib = &iwl6030_lib, \
398 .base_params = &iwl6000_g2_base_params, \ 202 .base_params = &iwl6000_g2_base_params, \
399 .bt_params = &iwl6000_bt_params, \ 203 .bt_params = &iwl6000_bt_params, \
400 .need_temp_offset_calib = true, \ 204 .need_temp_offset_calib = true, \
@@ -461,14 +265,13 @@ const struct iwl_cfg iwl130_bg_cfg = {
461 .ucode_api_max = IWL6000_UCODE_API_MAX, \ 265 .ucode_api_max = IWL6000_UCODE_API_MAX, \
462 .ucode_api_ok = IWL6000_UCODE_API_OK, \ 266 .ucode_api_ok = IWL6000_UCODE_API_OK, \
463 .ucode_api_min = IWL6000_UCODE_API_MIN, \ 267 .ucode_api_min = IWL6000_UCODE_API_MIN, \
268 .device_family = IWL_DEVICE_FAMILY_6000i, \
464 .max_inst_size = IWL60_RTC_INST_SIZE, \ 269 .max_inst_size = IWL60_RTC_INST_SIZE, \
465 .max_data_size = IWL60_RTC_DATA_SIZE, \ 270 .max_data_size = IWL60_RTC_DATA_SIZE, \
466 .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \ 271 .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
467 .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \ 272 .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
468 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, \ 273 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, \
469 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ 274 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
470 .lib = &iwl6000_lib, \
471 .additional_nic_config = iwl6000i_additional_nic_config,\
472 .base_params = &iwl6000_base_params, \ 275 .base_params = &iwl6000_base_params, \
473 .led_mode = IWL_LED_BLINK 276 .led_mode = IWL_LED_BLINK
474 277
@@ -492,12 +295,11 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
492 .fw_name_pre = IWL6050_FW_PRE, \ 295 .fw_name_pre = IWL6050_FW_PRE, \
493 .ucode_api_max = IWL6050_UCODE_API_MAX, \ 296 .ucode_api_max = IWL6050_UCODE_API_MAX, \
494 .ucode_api_min = IWL6050_UCODE_API_MIN, \ 297 .ucode_api_min = IWL6050_UCODE_API_MIN, \
298 .device_family = IWL_DEVICE_FAMILY_6050, \
495 .max_inst_size = IWL60_RTC_INST_SIZE, \ 299 .max_inst_size = IWL60_RTC_INST_SIZE, \
496 .max_data_size = IWL60_RTC_DATA_SIZE, \ 300 .max_data_size = IWL60_RTC_DATA_SIZE, \
497 .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \ 301 .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
498 .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \ 302 .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
499 .lib = &iwl6000_lib, \
500 .additional_nic_config = iwl6050_additional_nic_config, \
501 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ 303 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
502 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ 304 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
503 .base_params = &iwl6050_base_params, \ 305 .base_params = &iwl6050_base_params, \
@@ -519,10 +321,9 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
519 .fw_name_pre = IWL6050_FW_PRE, \ 321 .fw_name_pre = IWL6050_FW_PRE, \
520 .ucode_api_max = IWL6050_UCODE_API_MAX, \ 322 .ucode_api_max = IWL6050_UCODE_API_MAX, \
521 .ucode_api_min = IWL6050_UCODE_API_MIN, \ 323 .ucode_api_min = IWL6050_UCODE_API_MIN, \
324 .device_family = IWL_DEVICE_FAMILY_6150, \
522 .max_inst_size = IWL60_RTC_INST_SIZE, \ 325 .max_inst_size = IWL60_RTC_INST_SIZE, \
523 .max_data_size = IWL60_RTC_DATA_SIZE, \ 326 .max_data_size = IWL60_RTC_DATA_SIZE, \
524 .lib = &iwl6000_lib, \
525 .additional_nic_config = iwl6150_additional_nic_config, \
526 .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \ 327 .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
527 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \ 328 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
528 .base_params = &iwl6050_base_params, \ 329 .base_params = &iwl6050_base_params, \
@@ -546,17 +347,17 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
546 .ucode_api_max = IWL6000_UCODE_API_MAX, 347 .ucode_api_max = IWL6000_UCODE_API_MAX,
547 .ucode_api_ok = IWL6000_UCODE_API_OK, 348 .ucode_api_ok = IWL6000_UCODE_API_OK,
548 .ucode_api_min = IWL6000_UCODE_API_MIN, 349 .ucode_api_min = IWL6000_UCODE_API_MIN,
350 .device_family = IWL_DEVICE_FAMILY_6000,
549 .max_inst_size = IWL60_RTC_INST_SIZE, 351 .max_inst_size = IWL60_RTC_INST_SIZE,
550 .max_data_size = IWL60_RTC_DATA_SIZE, 352 .max_data_size = IWL60_RTC_DATA_SIZE,
551 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 353 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
552 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, 354 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
553 .lib = &iwl6000_lib,
554 .base_params = &iwl6000_base_params, 355 .base_params = &iwl6000_base_params,
555 .ht_params = &iwl6000_ht_params, 356 .ht_params = &iwl6000_ht_params,
556 .led_mode = IWL_LED_BLINK, 357 .led_mode = IWL_LED_BLINK,
557}; 358};
558 359
559MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK)); 360MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
560MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); 361MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK));
561MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); 362MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK));
562MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); 363MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index 84cbe7bb504c..95f27f1a423b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -64,7 +64,6 @@
64#include <net/mac80211.h> 64#include <net/mac80211.h>
65 65
66#include "iwl-dev.h" 66#include "iwl-dev.h"
67#include "iwl-core.h"
68#include "iwl-agn-calib.h" 67#include "iwl-agn-calib.h"
69#include "iwl-trans.h" 68#include "iwl-trans.h"
70#include "iwl-agn.h" 69#include "iwl-agn.h"
@@ -190,7 +189,7 @@ static int iwl_sens_energy_cck(struct iwl_priv *priv,
190 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time; 189 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
191 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time; 190 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
192 struct iwl_sensitivity_data *data = NULL; 191 struct iwl_sensitivity_data *data = NULL;
193 const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens; 192 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
194 193
195 data = &(priv->sensitivity_data); 194 data = &(priv->sensitivity_data);
196 195
@@ -373,7 +372,7 @@ static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
373 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time; 372 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
374 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time; 373 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
375 struct iwl_sensitivity_data *data = NULL; 374 struct iwl_sensitivity_data *data = NULL;
376 const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens; 375 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
377 376
378 data = &(priv->sensitivity_data); 377 data = &(priv->sensitivity_data);
379 378
@@ -521,7 +520,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
521 520
522 iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]); 521 iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]);
523 522
524 if (cfg(priv)->base_params->hd_v2) { 523 if (priv->cfg->base_params->hd_v2) {
525 cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] = 524 cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
526 HD_INA_NON_SQUARE_DET_OFDM_DATA_V2; 525 HD_INA_NON_SQUARE_DET_OFDM_DATA_V2;
527 cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] = 526 cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
@@ -597,9 +596,9 @@ void iwl_init_sensitivity(struct iwl_priv *priv)
597 int ret = 0; 596 int ret = 0;
598 int i; 597 int i;
599 struct iwl_sensitivity_data *data = NULL; 598 struct iwl_sensitivity_data *data = NULL;
600 const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens; 599 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
601 600
602 if (priv->disable_sens_cal) 601 if (priv->calib_disabled & IWL_SENSITIVITY_CALIB_DISABLED)
603 return; 602 return;
604 603
605 IWL_DEBUG_CALIB(priv, "Start iwl_init_sensitivity\n"); 604 IWL_DEBUG_CALIB(priv, "Start iwl_init_sensitivity\n");
@@ -663,7 +662,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv)
663 struct statistics_rx_phy *ofdm, *cck; 662 struct statistics_rx_phy *ofdm, *cck;
664 struct statistics_general_data statis; 663 struct statistics_general_data statis;
665 664
666 if (priv->disable_sens_cal) 665 if (priv->calib_disabled & IWL_SENSITIVITY_CALIB_DISABLED)
667 return; 666 return;
668 667
669 data = &(priv->sensitivity_data); 668 data = &(priv->sensitivity_data);
@@ -833,28 +832,28 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
833 * To be safe, simply mask out any chains that we know 832 * To be safe, simply mask out any chains that we know
834 * are not on the device. 833 * are not on the device.
835 */ 834 */
836 active_chains &= hw_params(priv).valid_rx_ant; 835 active_chains &= priv->hw_params.valid_rx_ant;
837 836
838 num_tx_chains = 0; 837 num_tx_chains = 0;
839 for (i = 0; i < NUM_RX_CHAINS; i++) { 838 for (i = 0; i < NUM_RX_CHAINS; i++) {
840 /* loops on all the bits of 839 /* loops on all the bits of
841 * priv->hw_setting.valid_tx_ant */ 840 * priv->hw_setting.valid_tx_ant */
842 u8 ant_msk = (1 << i); 841 u8 ant_msk = (1 << i);
843 if (!(hw_params(priv).valid_tx_ant & ant_msk)) 842 if (!(priv->hw_params.valid_tx_ant & ant_msk))
844 continue; 843 continue;
845 844
846 num_tx_chains++; 845 num_tx_chains++;
847 if (data->disconn_array[i] == 0) 846 if (data->disconn_array[i] == 0)
848 /* there is a Tx antenna connected */ 847 /* there is a Tx antenna connected */
849 break; 848 break;
850 if (num_tx_chains == hw_params(priv).tx_chains_num && 849 if (num_tx_chains == priv->hw_params.tx_chains_num &&
851 data->disconn_array[i]) { 850 data->disconn_array[i]) {
852 /* 851 /*
853 * If all chains are disconnected 852 * If all chains are disconnected
854 * connect the first valid tx chain 853 * connect the first valid tx chain
855 */ 854 */
856 first_chain = 855 first_chain =
857 find_first_chain(hw_params(priv).valid_tx_ant); 856 find_first_chain(priv->hw_params.valid_tx_ant);
858 data->disconn_array[first_chain] = 0; 857 data->disconn_array[first_chain] = 0;
859 active_chains |= BIT(first_chain); 858 active_chains |= BIT(first_chain);
860 IWL_DEBUG_CALIB(priv, 859 IWL_DEBUG_CALIB(priv,
@@ -864,13 +863,13 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
864 } 863 }
865 } 864 }
866 865
867 if (active_chains != hw_params(priv).valid_rx_ant && 866 if (active_chains != priv->hw_params.valid_rx_ant &&
868 active_chains != priv->chain_noise_data.active_chains) 867 active_chains != priv->chain_noise_data.active_chains)
869 IWL_DEBUG_CALIB(priv, 868 IWL_DEBUG_CALIB(priv,
870 "Detected that not all antennas are connected! " 869 "Detected that not all antennas are connected! "
871 "Connected: %#x, valid: %#x.\n", 870 "Connected: %#x, valid: %#x.\n",
872 active_chains, 871 active_chains,
873 hw_params(priv).valid_rx_ant); 872 priv->hw_params.valid_rx_ant);
874 873
875 /* Save for use within RXON, TX, SCAN commands, etc. */ 874 /* Save for use within RXON, TX, SCAN commands, etc. */
876 data->active_chains = active_chains; 875 data->active_chains = active_chains;
@@ -895,7 +894,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
895 continue; 894 continue;
896 } 895 }
897 896
898 delta_g = (cfg(priv)->base_params->chain_noise_scale * 897 delta_g = (priv->cfg->base_params->chain_noise_scale *
899 ((s32)average_noise[default_chain] - 898 ((s32)average_noise[default_chain] -
900 (s32)average_noise[i])) / 1500; 899 (s32)average_noise[i])) / 1500;
901 900
@@ -970,7 +969,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
970 */ 969 */
971 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 970 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
972 971
973 if (priv->disable_chain_noise_cal) 972 if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
974 return; 973 return;
975 974
976 data = &(priv->chain_noise_data); 975 data = &(priv->chain_noise_data);
@@ -1051,11 +1050,11 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
1051 return; 1050 return;
1052 1051
1053 /* Analyze signal for disconnected antenna */ 1052 /* Analyze signal for disconnected antenna */
1054 if (cfg(priv)->bt_params && 1053 if (priv->cfg->bt_params &&
1055 cfg(priv)->bt_params->advanced_bt_coexist) { 1054 priv->cfg->bt_params->advanced_bt_coexist) {
1056 /* Disable disconnected antenna algorithm for advanced 1055 /* Disable disconnected antenna algorithm for advanced
1057 bt coex, assuming valid antennas are connected */ 1056 bt coex, assuming valid antennas are connected */
1058 data->active_chains = hw_params(priv).valid_rx_ant; 1057 data->active_chains = priv->hw_params.valid_rx_ant;
1059 for (i = 0; i < NUM_RX_CHAINS; i++) 1058 for (i = 0; i < NUM_RX_CHAINS; i++)
1060 if (!(data->active_chains & (1<<i))) 1059 if (!(data->active_chains & (1<<i)))
1061 data->disconn_array[i] = 1; 1060 data->disconn_array[i] = 1;
@@ -1085,7 +1084,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
1085 min_average_noise, min_average_noise_antenna_i); 1084 min_average_noise, min_average_noise_antenna_i);
1086 1085
1087 iwlagn_gain_computation(priv, average_noise, 1086 iwlagn_gain_computation(priv, average_noise,
1088 find_first_chain(hw_params(priv).valid_rx_ant)); 1087 find_first_chain(priv->hw_params.valid_rx_ant));
1089 1088
1090 /* Some power changes may have been made during the calibration. 1089 /* Some power changes may have been made during the calibration.
1091 * Update and commit the RXON 1090 * Update and commit the RXON
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
index 9ed6683314a7..dbe13787f272 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
@@ -63,7 +63,6 @@
63#define __iwl_calib_h__ 63#define __iwl_calib_h__
64 64
65#include "iwl-dev.h" 65#include "iwl-dev.h"
66#include "iwl-core.h"
67#include "iwl-commands.h" 66#include "iwl-commands.h"
68 67
69void iwl_chain_noise_calibration(struct iwl_priv *priv); 68void iwl_chain_noise_calibration(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-devices.c b/drivers/net/wireless/iwlwifi/iwl-agn-devices.c
new file mode 100644
index 000000000000..48533b3a0f9a
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-devices.c
@@ -0,0 +1,755 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27/*
28 * DVM device-specific data & functions
29 */
30#include "iwl-agn.h"
31#include "iwl-dev.h"
32#include "iwl-commands.h"
33#include "iwl-io.h"
34#include "iwl-prph.h"
35
36/*
37 * 1000 series
38 * ===========
39 */
40
41/*
42 * For 1000, use advance thermal throttling critical temperature threshold,
43 * but legacy thermal management implementation for now.
44 * This is for the reason of 1000 uCode using advance thermal throttling API
45 * but not implement ct_kill_exit based on ct_kill exit temperature
46 * so the thermal throttling will still based on legacy thermal throttling
47 * management.
48 * The code here need to be modified once 1000 uCode has the advanced thermal
49 * throttling algorithm in place
50 */
51static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
52{
53 /* want Celsius */
54 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
55 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
56}
57
58/* NIC configuration for 1000 series */
59static void iwl1000_nic_config(struct iwl_priv *priv)
60{
61 /* set CSR_HW_CONFIG_REG for uCode use */
62 iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
63 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
64 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
65
66 /* Setting digital SVR for 1000 card to 1.32V */
67 /* locking is acquired in iwl_set_bits_mask_prph() function */
68 iwl_set_bits_mask_prph(priv->trans, APMG_DIGITAL_SVR_REG,
69 APMG_SVR_DIGITAL_VOLTAGE_1_32,
70 ~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
71}
72
73/**
74 * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time
75 * @priv -- pointer to iwl_priv data structure
76 * @tsf_bits -- number of bits need to shift for masking)
77 */
78static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
79 u16 tsf_bits)
80{
81 return (1 << tsf_bits) - 1;
82}
83
84/**
85 * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time
86 * @priv -- pointer to iwl_priv data structure
87 * @tsf_bits -- number of bits need to shift for masking)
88 */
89static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv,
90 u16 tsf_bits)
91{
92 return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
93}
94
95/*
96 * extended beacon time format
97 * time in usec will be changed into a 32-bit value in extended:internal format
98 * the extended part is the beacon counts
99 * the internal part is the time in usec within one beacon interval
100 */
101static u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec,
102 u32 beacon_interval)
103{
104 u32 quot;
105 u32 rem;
106 u32 interval = beacon_interval * TIME_UNIT;
107
108 if (!interval || !usec)
109 return 0;
110
111 quot = (usec / interval) &
112 (iwl_beacon_time_mask_high(priv, IWLAGN_EXT_BEACON_TIME_POS) >>
113 IWLAGN_EXT_BEACON_TIME_POS);
114 rem = (usec % interval) & iwl_beacon_time_mask_low(priv,
115 IWLAGN_EXT_BEACON_TIME_POS);
116
117 return (quot << IWLAGN_EXT_BEACON_TIME_POS) + rem;
118}
119
120/* base is usually what we get from ucode with each received frame,
121 * the same as HW timer counter counting down
122 */
123static __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
124 u32 addon, u32 beacon_interval)
125{
126 u32 base_low = base & iwl_beacon_time_mask_low(priv,
127 IWLAGN_EXT_BEACON_TIME_POS);
128 u32 addon_low = addon & iwl_beacon_time_mask_low(priv,
129 IWLAGN_EXT_BEACON_TIME_POS);
130 u32 interval = beacon_interval * TIME_UNIT;
131 u32 res = (base & iwl_beacon_time_mask_high(priv,
132 IWLAGN_EXT_BEACON_TIME_POS)) +
133 (addon & iwl_beacon_time_mask_high(priv,
134 IWLAGN_EXT_BEACON_TIME_POS));
135
136 if (base_low > addon_low)
137 res += base_low - addon_low;
138 else if (base_low < addon_low) {
139 res += interval + base_low - addon_low;
140 res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
141 } else
142 res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
143
144 return cpu_to_le32(res);
145}
146
147static const struct iwl_sensitivity_ranges iwl1000_sensitivity = {
148 .min_nrg_cck = 95,
149 .auto_corr_min_ofdm = 90,
150 .auto_corr_min_ofdm_mrc = 170,
151 .auto_corr_min_ofdm_x1 = 120,
152 .auto_corr_min_ofdm_mrc_x1 = 240,
153
154 .auto_corr_max_ofdm = 120,
155 .auto_corr_max_ofdm_mrc = 210,
156 .auto_corr_max_ofdm_x1 = 155,
157 .auto_corr_max_ofdm_mrc_x1 = 290,
158
159 .auto_corr_min_cck = 125,
160 .auto_corr_max_cck = 200,
161 .auto_corr_min_cck_mrc = 170,
162 .auto_corr_max_cck_mrc = 400,
163 .nrg_th_cck = 95,
164 .nrg_th_ofdm = 95,
165
166 .barker_corr_th_min = 190,
167 .barker_corr_th_min_mrc = 390,
168 .nrg_th_cca = 62,
169};
170
171static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
172{
173 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ);
174
175 priv->hw_params.tx_chains_num =
176 num_of_ant(priv->hw_params.valid_tx_ant);
177 if (priv->cfg->rx_with_siso_diversity)
178 priv->hw_params.rx_chains_num = 1;
179 else
180 priv->hw_params.rx_chains_num =
181 num_of_ant(priv->hw_params.valid_rx_ant);
182
183 iwl1000_set_ct_threshold(priv);
184
185 /* Set initial sensitivity parameters */
186 priv->hw_params.sens = &iwl1000_sensitivity;
187}
188
189struct iwl_lib_ops iwl1000_lib = {
190 .set_hw_params = iwl1000_hw_set_hw_params,
191 .nic_config = iwl1000_nic_config,
192 .eeprom_ops = {
193 .regulatory_bands = {
194 EEPROM_REG_BAND_1_CHANNELS,
195 EEPROM_REG_BAND_2_CHANNELS,
196 EEPROM_REG_BAND_3_CHANNELS,
197 EEPROM_REG_BAND_4_CHANNELS,
198 EEPROM_REG_BAND_5_CHANNELS,
199 EEPROM_REG_BAND_24_HT40_CHANNELS,
200 EEPROM_REGULATORY_BAND_NO_HT40,
201 },
202 },
203 .temperature = iwlagn_temperature,
204};
205
206
207/*
208 * 2000 series
209 * ===========
210 */
211
212static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
213{
214 /* want Celsius */
215 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
216 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
217}
218
219/* NIC configuration for 2000 series */
220static void iwl2000_nic_config(struct iwl_priv *priv)
221{
222 iwl_rf_config(priv);
223
224 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
225 CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
226}
227
228static const struct iwl_sensitivity_ranges iwl2000_sensitivity = {
229 .min_nrg_cck = 97,
230 .auto_corr_min_ofdm = 80,
231 .auto_corr_min_ofdm_mrc = 128,
232 .auto_corr_min_ofdm_x1 = 105,
233 .auto_corr_min_ofdm_mrc_x1 = 192,
234
235 .auto_corr_max_ofdm = 145,
236 .auto_corr_max_ofdm_mrc = 232,
237 .auto_corr_max_ofdm_x1 = 110,
238 .auto_corr_max_ofdm_mrc_x1 = 232,
239
240 .auto_corr_min_cck = 125,
241 .auto_corr_max_cck = 175,
242 .auto_corr_min_cck_mrc = 160,
243 .auto_corr_max_cck_mrc = 310,
244 .nrg_th_cck = 97,
245 .nrg_th_ofdm = 100,
246
247 .barker_corr_th_min = 190,
248 .barker_corr_th_min_mrc = 390,
249 .nrg_th_cca = 62,
250};
251
252static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
253{
254 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ);
255
256 priv->hw_params.tx_chains_num =
257 num_of_ant(priv->hw_params.valid_tx_ant);
258 if (priv->cfg->rx_with_siso_diversity)
259 priv->hw_params.rx_chains_num = 1;
260 else
261 priv->hw_params.rx_chains_num =
262 num_of_ant(priv->hw_params.valid_rx_ant);
263
264 iwl2000_set_ct_threshold(priv);
265
266 /* Set initial sensitivity parameters */
267 priv->hw_params.sens = &iwl2000_sensitivity;
268}
269
270struct iwl_lib_ops iwl2000_lib = {
271 .set_hw_params = iwl2000_hw_set_hw_params,
272 .nic_config = iwl2000_nic_config,
273 .eeprom_ops = {
274 .regulatory_bands = {
275 EEPROM_REG_BAND_1_CHANNELS,
276 EEPROM_REG_BAND_2_CHANNELS,
277 EEPROM_REG_BAND_3_CHANNELS,
278 EEPROM_REG_BAND_4_CHANNELS,
279 EEPROM_REG_BAND_5_CHANNELS,
280 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
281 EEPROM_REGULATORY_BAND_NO_HT40,
282 },
283 .enhanced_txpower = true,
284 },
285 .temperature = iwlagn_temperature,
286};
287
288struct iwl_lib_ops iwl2030_lib = {
289 .set_hw_params = iwl2000_hw_set_hw_params,
290 .nic_config = iwl2000_nic_config,
291 .eeprom_ops = {
292 .regulatory_bands = {
293 EEPROM_REG_BAND_1_CHANNELS,
294 EEPROM_REG_BAND_2_CHANNELS,
295 EEPROM_REG_BAND_3_CHANNELS,
296 EEPROM_REG_BAND_4_CHANNELS,
297 EEPROM_REG_BAND_5_CHANNELS,
298 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
299 EEPROM_REGULATORY_BAND_NO_HT40,
300 },
301 .enhanced_txpower = true,
302 },
303 .temperature = iwlagn_temperature,
304};
305
306/*
307 * 5000 series
308 * ===========
309 */
310
311/* NIC configuration for 5000 series */
312static void iwl5000_nic_config(struct iwl_priv *priv)
313{
314 iwl_rf_config(priv);
315
316 /* W/A : NIC is stuck in a reset state after Early PCIe power off
317 * (PCIe power is lost before PERST# is asserted),
318 * causing ME FW to lose ownership and not being able to obtain it back.
319 */
320 iwl_set_bits_mask_prph(priv->trans, APMG_PS_CTRL_REG,
321 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
322 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
323}
324
325static const struct iwl_sensitivity_ranges iwl5000_sensitivity = {
326 .min_nrg_cck = 100,
327 .auto_corr_min_ofdm = 90,
328 .auto_corr_min_ofdm_mrc = 170,
329 .auto_corr_min_ofdm_x1 = 105,
330 .auto_corr_min_ofdm_mrc_x1 = 220,
331
332 .auto_corr_max_ofdm = 120,
333 .auto_corr_max_ofdm_mrc = 210,
334 .auto_corr_max_ofdm_x1 = 120,
335 .auto_corr_max_ofdm_mrc_x1 = 240,
336
337 .auto_corr_min_cck = 125,
338 .auto_corr_max_cck = 200,
339 .auto_corr_min_cck_mrc = 200,
340 .auto_corr_max_cck_mrc = 400,
341 .nrg_th_cck = 100,
342 .nrg_th_ofdm = 100,
343
344 .barker_corr_th_min = 190,
345 .barker_corr_th_min_mrc = 390,
346 .nrg_th_cca = 62,
347};
348
349static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
350 .min_nrg_cck = 95,
351 .auto_corr_min_ofdm = 90,
352 .auto_corr_min_ofdm_mrc = 170,
353 .auto_corr_min_ofdm_x1 = 105,
354 .auto_corr_min_ofdm_mrc_x1 = 220,
355
356 .auto_corr_max_ofdm = 120,
357 .auto_corr_max_ofdm_mrc = 210,
358 /* max = min for performance bug in 5150 DSP */
359 .auto_corr_max_ofdm_x1 = 105,
360 .auto_corr_max_ofdm_mrc_x1 = 220,
361
362 .auto_corr_min_cck = 125,
363 .auto_corr_max_cck = 200,
364 .auto_corr_min_cck_mrc = 170,
365 .auto_corr_max_cck_mrc = 400,
366 .nrg_th_cck = 95,
367 .nrg_th_ofdm = 95,
368
369 .barker_corr_th_min = 190,
370 .barker_corr_th_min_mrc = 390,
371 .nrg_th_cca = 62,
372};
373
374#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
375
376static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
377{
378 u16 temperature, voltage;
379 __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(priv,
380 EEPROM_KELVIN_TEMPERATURE);
381
382 temperature = le16_to_cpu(temp_calib[0]);
383 voltage = le16_to_cpu(temp_calib[1]);
384
385 /* offset = temp - volt / coeff */
386 return (s32)(temperature -
387 voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
388}
389
390static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
391{
392 const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
393 s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
394 iwl_temp_calib_to_offset(priv);
395
396 priv->hw_params.ct_kill_threshold = threshold * volt2temp_coef;
397}
398
399static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
400{
401 /* want Celsius */
402 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
403}
404
405static void iwl5000_hw_set_hw_params(struct iwl_priv *priv)
406{
407 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
408 BIT(IEEE80211_BAND_5GHZ);
409
410 priv->hw_params.tx_chains_num =
411 num_of_ant(priv->hw_params.valid_tx_ant);
412 priv->hw_params.rx_chains_num =
413 num_of_ant(priv->hw_params.valid_rx_ant);
414
415 iwl5000_set_ct_threshold(priv);
416
417 /* Set initial sensitivity parameters */
418 priv->hw_params.sens = &iwl5000_sensitivity;
419}
420
421static void iwl5150_hw_set_hw_params(struct iwl_priv *priv)
422{
423 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
424 BIT(IEEE80211_BAND_5GHZ);
425
426 priv->hw_params.tx_chains_num =
427 num_of_ant(priv->hw_params.valid_tx_ant);
428 priv->hw_params.rx_chains_num =
429 num_of_ant(priv->hw_params.valid_rx_ant);
430
431 iwl5150_set_ct_threshold(priv);
432
433 /* Set initial sensitivity parameters */
434 priv->hw_params.sens = &iwl5150_sensitivity;
435}
436
437static void iwl5150_temperature(struct iwl_priv *priv)
438{
439 u32 vt = 0;
440 s32 offset = iwl_temp_calib_to_offset(priv);
441
442 vt = le32_to_cpu(priv->statistics.common.temperature);
443 vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
444 /* now vt hold the temperature in Kelvin */
445 priv->temperature = KELVIN_TO_CELSIUS(vt);
446 iwl_tt_handler(priv);
447}
448
449static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
450 struct ieee80211_channel_switch *ch_switch)
451{
452 /*
453 * MULTI-FIXME
454 * See iwlagn_mac_channel_switch.
455 */
456 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
457 struct iwl5000_channel_switch_cmd cmd;
458 const struct iwl_channel_info *ch_info;
459 u32 switch_time_in_usec, ucode_switch_time;
460 u16 ch;
461 u32 tsf_low;
462 u8 switch_count;
463 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
464 struct ieee80211_vif *vif = ctx->vif;
465 struct iwl_host_cmd hcmd = {
466 .id = REPLY_CHANNEL_SWITCH,
467 .len = { sizeof(cmd), },
468 .flags = CMD_SYNC,
469 .data = { &cmd, },
470 };
471
472 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
473 ch = ch_switch->channel->hw_value;
474 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
475 ctx->active.channel, ch);
476 cmd.channel = cpu_to_le16(ch);
477 cmd.rxon_flags = ctx->staging.flags;
478 cmd.rxon_filter_flags = ctx->staging.filter_flags;
479 switch_count = ch_switch->count;
480 tsf_low = ch_switch->timestamp & 0x0ffffffff;
481 /*
482 * calculate the ucode channel switch time
483 * adding TSF as one of the factor for when to switch
484 */
485 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
486 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
487 beacon_interval)) {
488 switch_count -= (priv->ucode_beacon_time -
489 tsf_low) / beacon_interval;
490 } else
491 switch_count = 0;
492 }
493 if (switch_count <= 1)
494 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
495 else {
496 switch_time_in_usec =
497 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
498 ucode_switch_time = iwl_usecs_to_beacons(priv,
499 switch_time_in_usec,
500 beacon_interval);
501 cmd.switch_time = iwl_add_beacon_time(priv,
502 priv->ucode_beacon_time,
503 ucode_switch_time,
504 beacon_interval);
505 }
506 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
507 cmd.switch_time);
508 ch_info = iwl_get_channel_info(priv, priv->band, ch);
509 if (ch_info)
510 cmd.expect_beacon = is_channel_radar(ch_info);
511 else {
512 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
513 ctx->active.channel, ch);
514 return -EFAULT;
515 }
516
517 return iwl_dvm_send_cmd(priv, &hcmd);
518}
519
520struct iwl_lib_ops iwl5000_lib = {
521 .set_hw_params = iwl5000_hw_set_hw_params,
522 .set_channel_switch = iwl5000_hw_channel_switch,
523 .nic_config = iwl5000_nic_config,
524 .eeprom_ops = {
525 .regulatory_bands = {
526 EEPROM_REG_BAND_1_CHANNELS,
527 EEPROM_REG_BAND_2_CHANNELS,
528 EEPROM_REG_BAND_3_CHANNELS,
529 EEPROM_REG_BAND_4_CHANNELS,
530 EEPROM_REG_BAND_5_CHANNELS,
531 EEPROM_REG_BAND_24_HT40_CHANNELS,
532 EEPROM_REG_BAND_52_HT40_CHANNELS
533 },
534 },
535 .temperature = iwlagn_temperature,
536};
537
538struct iwl_lib_ops iwl5150_lib = {
539 .set_hw_params = iwl5150_hw_set_hw_params,
540 .set_channel_switch = iwl5000_hw_channel_switch,
541 .nic_config = iwl5000_nic_config,
542 .eeprom_ops = {
543 .regulatory_bands = {
544 EEPROM_REG_BAND_1_CHANNELS,
545 EEPROM_REG_BAND_2_CHANNELS,
546 EEPROM_REG_BAND_3_CHANNELS,
547 EEPROM_REG_BAND_4_CHANNELS,
548 EEPROM_REG_BAND_5_CHANNELS,
549 EEPROM_REG_BAND_24_HT40_CHANNELS,
550 EEPROM_REG_BAND_52_HT40_CHANNELS
551 },
552 },
553 .temperature = iwl5150_temperature,
554};
555
556
557
558/*
559 * 6000 series
560 * ===========
561 */
562
563static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
564{
565 /* want Celsius */
566 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
567 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
568}
569
570/* NIC configuration for 6000 series */
571static void iwl6000_nic_config(struct iwl_priv *priv)
572{
573 iwl_rf_config(priv);
574
575 switch (priv->cfg->device_family) {
576 case IWL_DEVICE_FAMILY_6005:
577 case IWL_DEVICE_FAMILY_6030:
578 case IWL_DEVICE_FAMILY_6000:
579 break;
580 case IWL_DEVICE_FAMILY_6000i:
581 /* 2x2 IPA phy type */
582 iwl_write32(priv->trans, CSR_GP_DRIVER_REG,
583 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
584 break;
585 case IWL_DEVICE_FAMILY_6050:
586 /* Indicate calibration version to uCode. */
587 if (iwl_eeprom_calib_version(priv) >= 6)
588 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
589 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
590 break;
591 case IWL_DEVICE_FAMILY_6150:
592 /* Indicate calibration version to uCode. */
593 if (iwl_eeprom_calib_version(priv) >= 6)
594 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
595 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
596 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
597 CSR_GP_DRIVER_REG_BIT_6050_1x2);
598 break;
599 default:
600 WARN_ON(1);
601 }
602}
603
604static const struct iwl_sensitivity_ranges iwl6000_sensitivity = {
605 .min_nrg_cck = 110,
606 .auto_corr_min_ofdm = 80,
607 .auto_corr_min_ofdm_mrc = 128,
608 .auto_corr_min_ofdm_x1 = 105,
609 .auto_corr_min_ofdm_mrc_x1 = 192,
610
611 .auto_corr_max_ofdm = 145,
612 .auto_corr_max_ofdm_mrc = 232,
613 .auto_corr_max_ofdm_x1 = 110,
614 .auto_corr_max_ofdm_mrc_x1 = 232,
615
616 .auto_corr_min_cck = 125,
617 .auto_corr_max_cck = 175,
618 .auto_corr_min_cck_mrc = 160,
619 .auto_corr_max_cck_mrc = 310,
620 .nrg_th_cck = 110,
621 .nrg_th_ofdm = 110,
622
623 .barker_corr_th_min = 190,
624 .barker_corr_th_min_mrc = 336,
625 .nrg_th_cca = 62,
626};
627
628static void iwl6000_hw_set_hw_params(struct iwl_priv *priv)
629{
630 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
631 BIT(IEEE80211_BAND_5GHZ);
632
633 priv->hw_params.tx_chains_num =
634 num_of_ant(priv->hw_params.valid_tx_ant);
635 if (priv->cfg->rx_with_siso_diversity)
636 priv->hw_params.rx_chains_num = 1;
637 else
638 priv->hw_params.rx_chains_num =
639 num_of_ant(priv->hw_params.valid_rx_ant);
640
641 iwl6000_set_ct_threshold(priv);
642
643 /* Set initial sensitivity parameters */
644 priv->hw_params.sens = &iwl6000_sensitivity;
645
646}
647
648static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
649 struct ieee80211_channel_switch *ch_switch)
650{
651 /*
652 * MULTI-FIXME
653 * See iwlagn_mac_channel_switch.
654 */
655 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
656 struct iwl6000_channel_switch_cmd cmd;
657 const struct iwl_channel_info *ch_info;
658 u32 switch_time_in_usec, ucode_switch_time;
659 u16 ch;
660 u32 tsf_low;
661 u8 switch_count;
662 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
663 struct ieee80211_vif *vif = ctx->vif;
664 struct iwl_host_cmd hcmd = {
665 .id = REPLY_CHANNEL_SWITCH,
666 .len = { sizeof(cmd), },
667 .flags = CMD_SYNC,
668 .data = { &cmd, },
669 };
670
671 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
672 ch = ch_switch->channel->hw_value;
673 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
674 ctx->active.channel, ch);
675 cmd.channel = cpu_to_le16(ch);
676 cmd.rxon_flags = ctx->staging.flags;
677 cmd.rxon_filter_flags = ctx->staging.filter_flags;
678 switch_count = ch_switch->count;
679 tsf_low = ch_switch->timestamp & 0x0ffffffff;
680 /*
681 * calculate the ucode channel switch time
682 * adding TSF as one of the factor for when to switch
683 */
684 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
685 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
686 beacon_interval)) {
687 switch_count -= (priv->ucode_beacon_time -
688 tsf_low) / beacon_interval;
689 } else
690 switch_count = 0;
691 }
692 if (switch_count <= 1)
693 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
694 else {
695 switch_time_in_usec =
696 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
697 ucode_switch_time = iwl_usecs_to_beacons(priv,
698 switch_time_in_usec,
699 beacon_interval);
700 cmd.switch_time = iwl_add_beacon_time(priv,
701 priv->ucode_beacon_time,
702 ucode_switch_time,
703 beacon_interval);
704 }
705 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
706 cmd.switch_time);
707 ch_info = iwl_get_channel_info(priv, priv->band, ch);
708 if (ch_info)
709 cmd.expect_beacon = is_channel_radar(ch_info);
710 else {
711 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
712 ctx->active.channel, ch);
713 return -EFAULT;
714 }
715
716 return iwl_dvm_send_cmd(priv, &hcmd);
717}
718
719struct iwl_lib_ops iwl6000_lib = {
720 .set_hw_params = iwl6000_hw_set_hw_params,
721 .set_channel_switch = iwl6000_hw_channel_switch,
722 .nic_config = iwl6000_nic_config,
723 .eeprom_ops = {
724 .regulatory_bands = {
725 EEPROM_REG_BAND_1_CHANNELS,
726 EEPROM_REG_BAND_2_CHANNELS,
727 EEPROM_REG_BAND_3_CHANNELS,
728 EEPROM_REG_BAND_4_CHANNELS,
729 EEPROM_REG_BAND_5_CHANNELS,
730 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
731 EEPROM_REG_BAND_52_HT40_CHANNELS
732 },
733 .enhanced_txpower = true,
734 },
735 .temperature = iwlagn_temperature,
736};
737
738struct iwl_lib_ops iwl6030_lib = {
739 .set_hw_params = iwl6000_hw_set_hw_params,
740 .set_channel_switch = iwl6000_hw_channel_switch,
741 .nic_config = iwl6000_nic_config,
742 .eeprom_ops = {
743 .regulatory_bands = {
744 EEPROM_REG_BAND_1_CHANNELS,
745 EEPROM_REG_BAND_2_CHANNELS,
746 EEPROM_REG_BAND_3_CHANNELS,
747 EEPROM_REG_BAND_4_CHANNELS,
748 EEPROM_REG_BAND_5_CHANNELS,
749 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
750 EEPROM_REG_BAND_52_HT40_CHANNELS
751 },
752 .enhanced_txpower = true,
753 },
754 .temperature = iwlagn_temperature,
755};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index d0ec0abd3c89..7960a52f6ad4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -102,10 +102,18 @@
102 102
103/* EEPROM */ 103/* EEPROM */
104#define IWLAGN_EEPROM_IMG_SIZE 2048 104#define IWLAGN_EEPROM_IMG_SIZE 2048
105/* OTP */
106/* lower blocks contain EEPROM image and calibration data */
107#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
108/* high blocks contain PAPD data */
109#define OTP_HIGH_IMAGE_SIZE_6x00 (6 * 512 * sizeof(u16)) /* 6 KB */
110#define OTP_HIGH_IMAGE_SIZE_1000 (0x200 * sizeof(u16)) /* 1024 bytes */
111#define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */
112#define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */
113#define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */
114#define OTP_MAX_LL_ITEMS_2x00 (4) /* OTP blocks for 2x00 */
115
105 116
106#define IWLAGN_CMD_FIFO_NUM 7
107#define IWLAGN_NUM_QUEUES 20 117#define IWLAGN_NUM_QUEUES 20
108#define IWLAGN_NUM_AMPDU_QUEUES 9
109#define IWLAGN_FIRST_AMPDU_QUEUE 11
110 118
111#endif /* __iwl_agn_hw_h__ */ 119#endif /* __iwl_agn_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 56f41c9409d1..01dc44267317 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -33,12 +33,11 @@
33#include <linux/sched.h> 33#include <linux/sched.h>
34 34
35#include "iwl-dev.h" 35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h" 36#include "iwl-io.h"
38#include "iwl-agn-hw.h" 37#include "iwl-agn-hw.h"
39#include "iwl-agn.h" 38#include "iwl-agn.h"
40#include "iwl-trans.h" 39#include "iwl-trans.h"
41#include "iwl-shared.h" 40#include "iwl-modparams.h"
42 41
43int iwlagn_hw_valid_rtc_data_addr(u32 addr) 42int iwlagn_hw_valid_rtc_data_addr(u32 addr)
44{ 43{
@@ -94,81 +93,6 @@ void iwlagn_temperature(struct iwl_priv *priv)
94 iwl_tt_handler(priv); 93 iwl_tt_handler(priv);
95} 94}
96 95
97u16 iwl_eeprom_calib_version(struct iwl_shared *shrd)
98{
99 struct iwl_eeprom_calib_hdr *hdr;
100
101 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(shrd,
102 EEPROM_CALIB_ALL);
103 return hdr->version;
104
105}
106
107/*
108 * EEPROM
109 */
110static u32 eeprom_indirect_address(const struct iwl_shared *shrd, u32 address)
111{
112 u16 offset = 0;
113
114 if ((address & INDIRECT_ADDRESS) == 0)
115 return address;
116
117 switch (address & INDIRECT_TYPE_MSK) {
118 case INDIRECT_HOST:
119 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_HOST);
120 break;
121 case INDIRECT_GENERAL:
122 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_GENERAL);
123 break;
124 case INDIRECT_REGULATORY:
125 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_REGULATORY);
126 break;
127 case INDIRECT_TXP_LIMIT:
128 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT);
129 break;
130 case INDIRECT_TXP_LIMIT_SIZE:
131 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT_SIZE);
132 break;
133 case INDIRECT_CALIBRATION:
134 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_CALIBRATION);
135 break;
136 case INDIRECT_PROCESS_ADJST:
137 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_PROCESS_ADJST);
138 break;
139 case INDIRECT_OTHERS:
140 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_OTHERS);
141 break;
142 default:
143 IWL_ERR(shrd->trans, "illegal indirect type: 0x%X\n",
144 address & INDIRECT_TYPE_MSK);
145 break;
146 }
147
148 /* translate the offset from words to byte */
149 return (address & ADDRESS_MSK) + (offset << 1);
150}
151
152const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset)
153{
154 u32 address = eeprom_indirect_address(shrd, offset);
155 BUG_ON(address >= shrd->cfg->base_params->eeprom_size);
156 return &shrd->eeprom[address];
157}
158
159struct iwl_mod_params iwlagn_mod_params = {
160 .amsdu_size_8K = 1,
161 .restart_fw = 1,
162 .plcp_check = true,
163 .bt_coex_active = true,
164 .no_sleep_autoadjust = true,
165 .power_level = IWL_POWER_INDEX_1,
166 .bt_ch_announce = true,
167 .wanted_ucode_alternative = 1,
168 .auto_agg = true,
169 /* the rest are 0 by default */
170};
171
172int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) 96int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
173{ 97{
174 int idx = 0; 98 int idx = 0;
@@ -228,13 +152,13 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
228 IWL_SCD_BE_MSK | IWL_SCD_BK_MSK | 152 IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
229 IWL_SCD_MGMT_MSK; 153 IWL_SCD_MGMT_MSK;
230 if ((flush_control & BIT(IWL_RXON_CTX_PAN)) && 154 if ((flush_control & BIT(IWL_RXON_CTX_PAN)) &&
231 (priv->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS))) 155 (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
232 flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK | 156 flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK |
233 IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK | 157 IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK |
234 IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK | 158 IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
235 IWL_PAN_SCD_MULTICAST_MSK; 159 IWL_PAN_SCD_MULTICAST_MSK;
236 160
237 if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE) 161 if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
238 flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK; 162 flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
239 163
240 IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n", 164 IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
@@ -253,7 +177,7 @@ void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
253 goto done; 177 goto done;
254 } 178 }
255 IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n"); 179 IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
256 iwl_trans_wait_tx_queue_empty(trans(priv)); 180 iwl_trans_wait_tx_queue_empty(priv->trans);
257done: 181done:
258 ieee80211_wake_queues(priv->hw); 182 ieee80211_wake_queues(priv->hw);
259 mutex_unlock(&priv->mutex); 183 mutex_unlock(&priv->mutex);
@@ -262,76 +186,8 @@ done:
262/* 186/*
263 * BT coex 187 * BT coex
264 */ 188 */
265/* 189/* Notmal TDM */
266 * Macros to access the lookup table. 190static const __le32 iwlagn_def_3w_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
267 *
268 * The lookup table has 7 inputs: bt3_prio, bt3_txrx, bt_rf_act, wifi_req,
269* wifi_prio, wifi_txrx and wifi_sh_ant_req.
270 *
271 * It has three outputs: WLAN_ACTIVE, WLAN_KILL and ANT_SWITCH
272 *
273 * The format is that "registers" 8 through 11 contain the WLAN_ACTIVE bits
274 * one after another in 32-bit registers, and "registers" 0 through 7 contain
275 * the WLAN_KILL and ANT_SWITCH bits interleaved (in that order).
276 *
277 * These macros encode that format.
278 */
279#define LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, wifi_req, wifi_prio, \
280 wifi_txrx, wifi_sh_ant_req) \
281 (bt3_prio | (bt3_txrx << 1) | (bt_rf_act << 2) | (wifi_req << 3) | \
282 (wifi_prio << 4) | (wifi_txrx << 5) | (wifi_sh_ant_req << 6))
283
284#define LUT_PTA_WLAN_ACTIVE_OP(lut, op, val) \
285 lut[8 + ((val) >> 5)] op (cpu_to_le32(BIT((val) & 0x1f)))
286#define LUT_TEST_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
287 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
288 (!!(LUT_PTA_WLAN_ACTIVE_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, \
289 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
290 wifi_sh_ant_req))))
291#define LUT_SET_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
292 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
293 LUT_PTA_WLAN_ACTIVE_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, \
294 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
295 wifi_sh_ant_req))
296#define LUT_CLEAR_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, \
297 wifi_req, wifi_prio, wifi_txrx, \
298 wifi_sh_ant_req) \
299 LUT_PTA_WLAN_ACTIVE_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, \
300 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
301 wifi_sh_ant_req))
302
303#define LUT_WLAN_KILL_OP(lut, op, val) \
304 lut[(val) >> 4] op (cpu_to_le32(BIT(((val) << 1) & 0x1e)))
305#define LUT_TEST_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
306 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
307 (!!(LUT_WLAN_KILL_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
308 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))))
309#define LUT_SET_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
310 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
311 LUT_WLAN_KILL_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
312 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
313#define LUT_CLEAR_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
314 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
315 LUT_WLAN_KILL_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
316 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
317
318#define LUT_ANT_SWITCH_OP(lut, op, val) \
319 lut[(val) >> 4] op (cpu_to_le32(BIT((((val) << 1) & 0x1e) + 1)))
320#define LUT_TEST_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
321 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
322 (!!(LUT_ANT_SWITCH_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
323 wifi_req, wifi_prio, wifi_txrx, \
324 wifi_sh_ant_req))))
325#define LUT_SET_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
326 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
327 LUT_ANT_SWITCH_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
328 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
329#define LUT_CLEAR_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
330 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
331 LUT_ANT_SWITCH_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
332 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
333
334static const __le32 iwlagn_def_3w_lookup[12] = {
335 cpu_to_le32(0xaaaaaaaa), 191 cpu_to_le32(0xaaaaaaaa),
336 cpu_to_le32(0xaaaaaaaa), 192 cpu_to_le32(0xaaaaaaaa),
337 cpu_to_le32(0xaeaaaaaa), 193 cpu_to_le32(0xaeaaaaaa),
@@ -346,7 +202,25 @@ static const __le32 iwlagn_def_3w_lookup[12] = {
346 cpu_to_le32(0xf0005000), 202 cpu_to_le32(0xf0005000),
347}; 203};
348 204
349static const __le32 iwlagn_concurrent_lookup[12] = { 205
206/* Loose Coex */
207static const __le32 iwlagn_loose_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
208 cpu_to_le32(0xaaaaaaaa),
209 cpu_to_le32(0xaaaaaaaa),
210 cpu_to_le32(0xaeaaaaaa),
211 cpu_to_le32(0xaaaaaaaa),
212 cpu_to_le32(0xcc00ff28),
213 cpu_to_le32(0x0000aaaa),
214 cpu_to_le32(0xcc00aaaa),
215 cpu_to_le32(0x0000aaaa),
216 cpu_to_le32(0x00000000),
217 cpu_to_le32(0x00000000),
218 cpu_to_le32(0xf0005000),
219 cpu_to_le32(0xf0005000),
220};
221
222/* Full concurrency */
223static const __le32 iwlagn_concurrent_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
350 cpu_to_le32(0xaaaaaaaa), 224 cpu_to_le32(0xaaaaaaaa),
351 cpu_to_le32(0xaaaaaaaa), 225 cpu_to_le32(0xaaaaaaaa),
352 cpu_to_le32(0xaaaaaaaa), 226 cpu_to_le32(0xaaaaaaaa),
@@ -369,24 +243,30 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
369 .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT, 243 .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
370 .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT, 244 .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
371 }; 245 };
372 struct iwl6000_bt_cmd bt_cmd_6000; 246 struct iwl_bt_cmd_v1 bt_cmd_v1;
373 struct iwl2000_bt_cmd bt_cmd_2000; 247 struct iwl_bt_cmd_v2 bt_cmd_v2;
374 int ret; 248 int ret;
375 249
376 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) != 250 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
377 sizeof(basic.bt3_lookup_table)); 251 sizeof(basic.bt3_lookup_table));
378 252
379 if (cfg(priv)->bt_params) { 253 if (priv->cfg->bt_params) {
380 if (cfg(priv)->bt_params->bt_session_2) { 254 /*
381 bt_cmd_2000.prio_boost = cpu_to_le32( 255 * newer generation of devices (2000 series and newer)
382 cfg(priv)->bt_params->bt_prio_boost); 256 * use the version 2 of the bt command
383 bt_cmd_2000.tx_prio_boost = 0; 257 * we need to make sure sending the host command
384 bt_cmd_2000.rx_prio_boost = 0; 258 * with correct data structure to avoid uCode assert
259 */
260 if (priv->cfg->bt_params->bt_session_2) {
261 bt_cmd_v2.prio_boost = cpu_to_le32(
262 priv->cfg->bt_params->bt_prio_boost);
263 bt_cmd_v2.tx_prio_boost = 0;
264 bt_cmd_v2.rx_prio_boost = 0;
385 } else { 265 } else {
386 bt_cmd_6000.prio_boost = 266 bt_cmd_v1.prio_boost =
387 cfg(priv)->bt_params->bt_prio_boost; 267 priv->cfg->bt_params->bt_prio_boost;
388 bt_cmd_6000.tx_prio_boost = 0; 268 bt_cmd_v1.tx_prio_boost = 0;
389 bt_cmd_6000.rx_prio_boost = 0; 269 bt_cmd_v1.rx_prio_boost = 0;
390 } 270 }
391 } else { 271 } else {
392 IWL_ERR(priv, "failed to construct BT Coex Config\n"); 272 IWL_ERR(priv, "failed to construct BT Coex Config\n");
@@ -395,6 +275,7 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
395 275
396 basic.kill_ack_mask = priv->kill_ack_mask; 276 basic.kill_ack_mask = priv->kill_ack_mask;
397 basic.kill_cts_mask = priv->kill_cts_mask; 277 basic.kill_cts_mask = priv->kill_cts_mask;
278 basic.reduce_txpower = priv->reduced_txpower;
398 basic.valid = priv->bt_valid; 279 basic.valid = priv->bt_valid;
399 280
400 /* 281 /*
@@ -403,7 +284,7 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
403 * (might be in monitor mode), or the interface is in 284 * (might be in monitor mode), or the interface is in
404 * IBSS mode (no proper uCode support for coex then). 285 * IBSS mode (no proper uCode support for coex then).
405 */ 286 */
406 if (!iwlagn_mod_params.bt_coex_active || 287 if (!iwlwifi_mod_params.bt_coex_active ||
407 priv->iw_mode == NL80211_IFTYPE_ADHOC) { 288 priv->iw_mode == NL80211_IFTYPE_ADHOC) {
408 basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED; 289 basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
409 } else { 290 } else {
@@ -432,16 +313,16 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
432 priv->bt_full_concurrent ? 313 priv->bt_full_concurrent ?
433 "full concurrency" : "3-wire"); 314 "full concurrency" : "3-wire");
434 315
435 if (cfg(priv)->bt_params->bt_session_2) { 316 if (priv->cfg->bt_params->bt_session_2) {
436 memcpy(&bt_cmd_2000.basic, &basic, 317 memcpy(&bt_cmd_v2.basic, &basic,
437 sizeof(basic)); 318 sizeof(basic));
438 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, 319 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
439 CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000); 320 CMD_SYNC, sizeof(bt_cmd_v2), &bt_cmd_v2);
440 } else { 321 } else {
441 memcpy(&bt_cmd_6000.basic, &basic, 322 memcpy(&bt_cmd_v1.basic, &basic,
442 sizeof(basic)); 323 sizeof(basic));
443 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, 324 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
444 CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000); 325 CMD_SYNC, sizeof(bt_cmd_v1), &bt_cmd_v1);
445 } 326 }
446 if (ret) 327 if (ret)
447 IWL_ERR(priv, "failed to send BT Coex Config\n"); 328 IWL_ERR(priv, "failed to send BT Coex Config\n");
@@ -615,7 +496,7 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
615 struct iwl_bt_uart_msg *uart_msg) 496 struct iwl_bt_uart_msg *uart_msg)
616{ 497{
617 IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, " 498 IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, "
618 "Update Req = 0x%X", 499 "Update Req = 0x%X\n",
619 (BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >> 500 (BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
620 BT_UART_MSG_FRAME1MSGTYPE_POS, 501 BT_UART_MSG_FRAME1MSGTYPE_POS,
621 (BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >> 502 (BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >>
@@ -624,7 +505,7 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
624 BT_UART_MSG_FRAME1UPDATEREQ_POS); 505 BT_UART_MSG_FRAME1UPDATEREQ_POS);
625 506
626 IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, " 507 IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
627 "Chl_SeqN = 0x%X, In band = 0x%X", 508 "Chl_SeqN = 0x%X, In band = 0x%X\n",
628 (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >> 509 (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
629 BT_UART_MSG_FRAME2OPENCONNECTIONS_POS, 510 BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
630 (BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >> 511 (BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >>
@@ -635,7 +516,7 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
635 BT_UART_MSG_FRAME2INBAND_POS); 516 BT_UART_MSG_FRAME2INBAND_POS);
636 517
637 IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, " 518 IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
638 "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X", 519 "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X\n",
639 (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >> 520 (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
640 BT_UART_MSG_FRAME3SCOESCO_POS, 521 BT_UART_MSG_FRAME3SCOESCO_POS,
641 (BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >> 522 (BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >>
@@ -649,12 +530,12 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
649 (BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >> 530 (BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
650 BT_UART_MSG_FRAME3OBEX_POS); 531 BT_UART_MSG_FRAME3OBEX_POS);
651 532
652 IWL_DEBUG_COEX(priv, "Idle duration = 0x%X", 533 IWL_DEBUG_COEX(priv, "Idle duration = 0x%X\n",
653 (BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >> 534 (BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
654 BT_UART_MSG_FRAME4IDLEDURATION_POS); 535 BT_UART_MSG_FRAME4IDLEDURATION_POS);
655 536
656 IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, " 537 IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
657 "eSCO Retransmissions = 0x%X", 538 "eSCO Retransmissions = 0x%X\n",
658 (BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >> 539 (BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
659 BT_UART_MSG_FRAME5TXACTIVITY_POS, 540 BT_UART_MSG_FRAME5TXACTIVITY_POS,
660 (BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >> 541 (BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >>
@@ -662,14 +543,14 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
662 (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >> 543 (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
663 BT_UART_MSG_FRAME5ESCORETRANSMIT_POS); 544 BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);
664 545
665 IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X", 546 IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X\n",
666 (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >> 547 (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
667 BT_UART_MSG_FRAME6SNIFFINTERVAL_POS, 548 BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
668 (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >> 549 (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
669 BT_UART_MSG_FRAME6DISCOVERABLE_POS); 550 BT_UART_MSG_FRAME6DISCOVERABLE_POS);
670 551
671 IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = " 552 IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = "
672 "0x%X, Inquiry = 0x%X, Connectable = 0x%X", 553 "0x%X, Inquiry = 0x%X, Connectable = 0x%X\n",
673 (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >> 554 (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
674 BT_UART_MSG_FRAME7SNIFFACTIVITY_POS, 555 BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
675 (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >> 556 (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
@@ -680,29 +561,62 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
680 BT_UART_MSG_FRAME7CONNECTABLE_POS); 561 BT_UART_MSG_FRAME7CONNECTABLE_POS);
681} 562}
682 563
683static void iwlagn_set_kill_msk(struct iwl_priv *priv, 564static bool iwlagn_set_kill_msk(struct iwl_priv *priv,
684 struct iwl_bt_uart_msg *uart_msg) 565 struct iwl_bt_uart_msg *uart_msg)
685{ 566{
686 u8 kill_msk; 567 bool need_update = false;
687 static const __le32 bt_kill_ack_msg[2] = { 568 u8 kill_msk = IWL_BT_KILL_REDUCE;
569 static const __le32 bt_kill_ack_msg[3] = {
688 IWLAGN_BT_KILL_ACK_MASK_DEFAULT, 570 IWLAGN_BT_KILL_ACK_MASK_DEFAULT,
689 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO }; 571 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO,
690 static const __le32 bt_kill_cts_msg[2] = { 572 IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE};
573 static const __le32 bt_kill_cts_msg[3] = {
691 IWLAGN_BT_KILL_CTS_MASK_DEFAULT, 574 IWLAGN_BT_KILL_CTS_MASK_DEFAULT,
692 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO }; 575 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO,
576 IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE};
693 577
694 kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) 578 if (!priv->reduced_txpower)
695 ? 1 : 0; 579 kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3)
580 ? IWL_BT_KILL_OVERRIDE : IWL_BT_KILL_DEFAULT;
696 if (priv->kill_ack_mask != bt_kill_ack_msg[kill_msk] || 581 if (priv->kill_ack_mask != bt_kill_ack_msg[kill_msk] ||
697 priv->kill_cts_mask != bt_kill_cts_msg[kill_msk]) { 582 priv->kill_cts_mask != bt_kill_cts_msg[kill_msk]) {
698 priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK; 583 priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK;
699 priv->kill_ack_mask = bt_kill_ack_msg[kill_msk]; 584 priv->kill_ack_mask = bt_kill_ack_msg[kill_msk];
700 priv->bt_valid |= IWLAGN_BT_VALID_KILL_CTS_MASK; 585 priv->bt_valid |= IWLAGN_BT_VALID_KILL_CTS_MASK;
701 priv->kill_cts_mask = bt_kill_cts_msg[kill_msk]; 586 priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];
587 need_update = true;
588 }
589 return need_update;
590}
702 591
703 /* schedule to send runtime bt_config */ 592static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
704 queue_work(priv->workqueue, &priv->bt_runtime_config); 593 struct iwl_bt_uart_msg *uart_msg)
594{
595 bool need_update = false;
596
597 if (!priv->reduced_txpower &&
598 !iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
599 (uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK |
600 BT_UART_MSG_FRAME3OBEX_MSK)) &&
601 !(uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK |
602 BT_UART_MSG_FRAME3SNIFF_MSK | BT_UART_MSG_FRAME3A2DP_MSK))) {
603 /* enabling reduced tx power */
604 priv->reduced_txpower = true;
605 priv->bt_valid |= IWLAGN_BT_VALID_REDUCED_TX_PWR;
606 need_update = true;
607 } else if (priv->reduced_txpower &&
608 (iwl_is_associated(priv, IWL_RXON_CTX_PAN) ||
609 (uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK |
610 BT_UART_MSG_FRAME3SNIFF_MSK | BT_UART_MSG_FRAME3A2DP_MSK)) ||
611 !(uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK |
612 BT_UART_MSG_FRAME3OBEX_MSK)))) {
613 /* disable reduced tx power */
614 priv->reduced_txpower = false;
615 priv->bt_valid &= ~IWLAGN_BT_VALID_REDUCED_TX_PWR;
616 need_update = true;
705 } 617 }
618
619 return need_update;
706} 620}
707 621
708int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, 622int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
@@ -750,7 +664,12 @@ int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
750 } 664 }
751 } 665 }
752 666
753 iwlagn_set_kill_msk(priv, uart_msg); 667 /* schedule to send runtime bt_config */
668 /* check reduce power before change ack/cts kill mask */
669 if (iwlagn_fill_txpower_mode(priv, uart_msg) ||
670 iwlagn_set_kill_msk(priv, uart_msg))
671 queue_work(priv->workqueue, &priv->bt_runtime_config);
672
754 673
755 /* FIXME: based on notification, adjust the prio_boost */ 674 /* FIXME: based on notification, adjust the prio_boost */
756 675
@@ -798,8 +717,8 @@ static bool is_single_rx_stream(struct iwl_priv *priv)
798 */ 717 */
799static int iwl_get_active_rx_chain_count(struct iwl_priv *priv) 718static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
800{ 719{
801 if (cfg(priv)->bt_params && 720 if (priv->cfg->bt_params &&
802 cfg(priv)->bt_params->advanced_bt_coexist && 721 priv->cfg->bt_params->advanced_bt_coexist &&
803 (priv->bt_full_concurrent || 722 (priv->bt_full_concurrent ||
804 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) { 723 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
805 /* 724 /*
@@ -856,7 +775,7 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
856void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 775void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
857{ 776{
858 bool is_single = is_single_rx_stream(priv); 777 bool is_single = is_single_rx_stream(priv);
859 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->shrd->status); 778 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
860 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt; 779 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
861 u32 active_chains; 780 u32 active_chains;
862 u16 rx_chain; 781 u16 rx_chain;
@@ -868,10 +787,10 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
868 if (priv->chain_noise_data.active_chains) 787 if (priv->chain_noise_data.active_chains)
869 active_chains = priv->chain_noise_data.active_chains; 788 active_chains = priv->chain_noise_data.active_chains;
870 else 789 else
871 active_chains = hw_params(priv).valid_rx_ant; 790 active_chains = priv->hw_params.valid_rx_ant;
872 791
873 if (cfg(priv)->bt_params && 792 if (priv->cfg->bt_params &&
874 cfg(priv)->bt_params->advanced_bt_coexist && 793 priv->cfg->bt_params->advanced_bt_coexist &&
875 (priv->bt_full_concurrent || 794 (priv->bt_full_concurrent ||
876 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) { 795 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
877 /* 796 /*
@@ -1190,7 +1109,7 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
1190 memcpy(&rxon, &ctx->active, sizeof(rxon)); 1109 memcpy(&rxon, &ctx->active, sizeof(rxon));
1191 1110
1192 priv->ucode_loaded = false; 1111 priv->ucode_loaded = false;
1193 iwl_trans_stop_device(trans(priv)); 1112 iwl_trans_stop_device(priv->trans);
1194 1113
1195 priv->wowlan = true; 1114 priv->wowlan = true;
1196 1115
@@ -1212,7 +1131,7 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
1212 if (ret) 1131 if (ret)
1213 goto out; 1132 goto out;
1214 1133
1215 if (!iwlagn_mod_params.sw_crypto) { 1134 if (!iwlwifi_mod_params.sw_crypto) {
1216 /* mark all keys clear */ 1135 /* mark all keys clear */
1217 priv->ucode_key_table = 0; 1136 priv->ucode_key_table = 0;
1218 ctx->key_mapping_keys = 0; 1137 ctx->key_mapping_keys = 0;
@@ -1298,6 +1217,12 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1298 return -EIO; 1217 return -EIO;
1299 } 1218 }
1300 1219
1220 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
1221 IWL_ERR(priv, "Command %s failed: FW Error\n",
1222 iwl_dvm_get_cmd_string(cmd->id));
1223 return -EIO;
1224 }
1225
1301 /* 1226 /*
1302 * Synchronous commands from this op-mode must hold 1227 * Synchronous commands from this op-mode must hold
1303 * the mutex, this ensures we don't try to send two 1228 * the mutex, this ensures we don't try to send two
@@ -1312,7 +1237,7 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1312 return -EIO; 1237 return -EIO;
1313 } 1238 }
1314 1239
1315 return iwl_trans_send_cmd(trans(priv), cmd); 1240 return iwl_trans_send_cmd(priv->trans, cmd);
1316} 1241}
1317 1242
1318int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id, 1243int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 7e590b349dd7..51e1a69ffdda 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -36,9 +36,9 @@
36#include <linux/workqueue.h> 36#include <linux/workqueue.h>
37 37
38#include "iwl-dev.h" 38#include "iwl-dev.h"
39#include "iwl-core.h"
40#include "iwl-agn.h" 39#include "iwl-agn.h"
41#include "iwl-op-mode.h" 40#include "iwl-op-mode.h"
41#include "iwl-modparams.h"
42 42
43#define RS_NAME "iwl-agn-rs" 43#define RS_NAME "iwl-agn-rs"
44 44
@@ -420,7 +420,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
420 420
421 load = rs_tl_get_load(lq_data, tid); 421 load = rs_tl_get_load(lq_data, tid);
422 422
423 if ((iwlagn_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) { 423 if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
424 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", 424 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
425 sta->addr, tid); 425 sta->addr, tid);
426 ret = ieee80211_start_tx_ba_session(sta, tid, 5000); 426 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
@@ -819,7 +819,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
819 819
820 if (num_of_ant(tbl->ant_type) > 1) 820 if (num_of_ant(tbl->ant_type) > 1)
821 tbl->ant_type = 821 tbl->ant_type =
822 first_antenna(hw_params(priv).valid_tx_ant); 822 first_antenna(priv->hw_params.valid_tx_ant);
823 823
824 tbl->is_ht40 = 0; 824 tbl->is_ht40 = 0;
825 tbl->is_SGI = 0; 825 tbl->is_SGI = 0;
@@ -969,7 +969,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
969 (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) || 969 (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
970 (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) || 970 (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
971 (tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) || 971 (tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
972 (tbl_type.ant_type != info->antenna_sel_tx) || 972 (tbl_type.ant_type != info->status.antenna) ||
973 (!!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)) || 973 (!!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
974 (!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) || 974 (!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
975 (rs_index != mac_index)) { 975 (rs_index != mac_index)) {
@@ -1085,7 +1085,7 @@ done:
1085 (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate)) 1085 (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
1086 rs_program_fix_rate(priv, lq_sta); 1086 rs_program_fix_rate(priv, lq_sta);
1087#endif 1087#endif
1088 if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist) 1088 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
1089 rs_bt_update_lq(priv, ctx, lq_sta); 1089 rs_bt_update_lq(priv, ctx, lq_sta);
1090} 1090}
1091 1091
@@ -1291,7 +1291,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
1291 return -1; 1291 return -1;
1292 1292
1293 /* Need both Tx chains/antennas to support MIMO */ 1293 /* Need both Tx chains/antennas to support MIMO */
1294 if (hw_params(priv).tx_chains_num < 2) 1294 if (priv->hw_params.tx_chains_num < 2)
1295 return -1; 1295 return -1;
1296 1296
1297 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n"); 1297 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
@@ -1347,7 +1347,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
1347 return -1; 1347 return -1;
1348 1348
1349 /* Need both Tx chains/antennas to support MIMO */ 1349 /* Need both Tx chains/antennas to support MIMO */
1350 if (hw_params(priv).tx_chains_num < 3) 1350 if (priv->hw_params.tx_chains_num < 3)
1351 return -1; 1351 return -1;
1352 1352
1353 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n"); 1353 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n");
@@ -1446,8 +1446,8 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1446 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1446 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1447 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1447 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1448 u8 start_action; 1448 u8 start_action;
1449 u8 valid_tx_ant = hw_params(priv).valid_tx_ant; 1449 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1450 u8 tx_chains_num = hw_params(priv).tx_chains_num; 1450 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1451 int ret = 0; 1451 int ret = 0;
1452 u8 update_search_tbl_counter = 0; 1452 u8 update_search_tbl_counter = 0;
1453 1453
@@ -1464,7 +1464,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1464 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: 1464 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1465 /* avoid antenna B and MIMO */ 1465 /* avoid antenna B and MIMO */
1466 valid_tx_ant = 1466 valid_tx_ant =
1467 first_antenna(hw_params(priv).valid_tx_ant); 1467 first_antenna(priv->hw_params.valid_tx_ant);
1468 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 && 1468 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
1469 tbl->action != IWL_LEGACY_SWITCH_SISO) 1469 tbl->action != IWL_LEGACY_SWITCH_SISO)
1470 tbl->action = IWL_LEGACY_SWITCH_SISO; 1470 tbl->action = IWL_LEGACY_SWITCH_SISO;
@@ -1488,7 +1488,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1488 else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2) 1488 else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1489 tbl->action = IWL_LEGACY_SWITCH_SISO; 1489 tbl->action = IWL_LEGACY_SWITCH_SISO;
1490 valid_tx_ant = 1490 valid_tx_ant =
1491 first_antenna(hw_params(priv).valid_tx_ant); 1491 first_antenna(priv->hw_params.valid_tx_ant);
1492 } 1492 }
1493 1493
1494 start_action = tbl->action; 1494 start_action = tbl->action;
@@ -1622,8 +1622,8 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1622 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1622 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1623 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1623 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1624 u8 start_action; 1624 u8 start_action;
1625 u8 valid_tx_ant = hw_params(priv).valid_tx_ant; 1625 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1626 u8 tx_chains_num = hw_params(priv).tx_chains_num; 1626 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1627 u8 update_search_tbl_counter = 0; 1627 u8 update_search_tbl_counter = 0;
1628 int ret; 1628 int ret;
1629 1629
@@ -1640,7 +1640,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1640 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: 1640 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1641 /* avoid antenna B and MIMO */ 1641 /* avoid antenna B and MIMO */
1642 valid_tx_ant = 1642 valid_tx_ant =
1643 first_antenna(hw_params(priv).valid_tx_ant); 1643 first_antenna(priv->hw_params.valid_tx_ant);
1644 if (tbl->action != IWL_SISO_SWITCH_ANTENNA1) 1644 if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
1645 tbl->action = IWL_SISO_SWITCH_ANTENNA1; 1645 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1646 break; 1646 break;
@@ -1658,7 +1658,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1658 /* configure as 1x1 if bt full concurrency */ 1658 /* configure as 1x1 if bt full concurrency */
1659 if (priv->bt_full_concurrent) { 1659 if (priv->bt_full_concurrent) {
1660 valid_tx_ant = 1660 valid_tx_ant =
1661 first_antenna(hw_params(priv).valid_tx_ant); 1661 first_antenna(priv->hw_params.valid_tx_ant);
1662 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2) 1662 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1663 tbl->action = IWL_SISO_SWITCH_ANTENNA1; 1663 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1664 } 1664 }
@@ -1794,8 +1794,8 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
1794 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1794 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1795 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1795 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1796 u8 start_action; 1796 u8 start_action;
1797 u8 valid_tx_ant = hw_params(priv).valid_tx_ant; 1797 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1798 u8 tx_chains_num = hw_params(priv).tx_chains_num; 1798 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1799 u8 update_search_tbl_counter = 0; 1799 u8 update_search_tbl_counter = 0;
1800 int ret; 1800 int ret;
1801 1801
@@ -1964,8 +1964,8 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
1964 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1964 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1965 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1965 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1966 u8 start_action; 1966 u8 start_action;
1967 u8 valid_tx_ant = hw_params(priv).valid_tx_ant; 1967 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1968 u8 tx_chains_num = hw_params(priv).tx_chains_num; 1968 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1969 int ret; 1969 int ret;
1970 u8 update_search_tbl_counter = 0; 1970 u8 update_search_tbl_counter = 0;
1971 1971
@@ -2166,7 +2166,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
2166 (lq_sta->total_success > lq_sta->max_success_limit) || 2166 (lq_sta->total_success > lq_sta->max_success_limit) ||
2167 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer) 2167 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
2168 && (flush_interval_passed))) { 2168 && (flush_interval_passed))) {
2169 IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:", 2169 IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n",
2170 lq_sta->total_failed, 2170 lq_sta->total_failed,
2171 lq_sta->total_success, 2171 lq_sta->total_success,
2172 flush_interval_passed); 2172 flush_interval_passed);
@@ -2698,7 +2698,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2698 2698
2699 i = lq_sta->last_txrate_idx; 2699 i = lq_sta->last_txrate_idx;
2700 2700
2701 valid_tx_ant = hw_params(priv).valid_tx_ant; 2701 valid_tx_ant = priv->hw_params.valid_tx_ant;
2702 2702
2703 if (!lq_sta->search_better_tbl) 2703 if (!lq_sta->search_better_tbl)
2704 active_tbl = lq_sta->active_tbl; 2704 active_tbl = lq_sta->active_tbl;
@@ -2826,6 +2826,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
2826 struct iwl_station_priv *sta_priv; 2826 struct iwl_station_priv *sta_priv;
2827 struct iwl_lq_sta *lq_sta; 2827 struct iwl_lq_sta *lq_sta;
2828 struct ieee80211_supported_band *sband; 2828 struct ieee80211_supported_band *sband;
2829 unsigned long supp; /* must be unsigned long for for_each_set_bit */
2829 2830
2830 sta_priv = (struct iwl_station_priv *) sta->drv_priv; 2831 sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2831 lq_sta = &sta_priv->lq_sta; 2832 lq_sta = &sta_priv->lq_sta;
@@ -2855,8 +2856,15 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
2855 lq_sta->max_rate_idx = -1; 2856 lq_sta->max_rate_idx = -1;
2856 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX; 2857 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
2857 lq_sta->is_green = rs_use_green(sta); 2858 lq_sta->is_green = rs_use_green(sta);
2858 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000); 2859 lq_sta->band = sband->band;
2859 lq_sta->band = priv->band; 2860 /*
2861 * active legacy rates as per supported rates bitmap
2862 */
2863 supp = sta->supp_rates[sband->band];
2864 lq_sta->active_legacy_rate = 0;
2865 for_each_set_bit(i, &supp, BITS_PER_LONG)
2866 lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
2867
2860 /* 2868 /*
2861 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), 2869 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2862 * supp_rates[] does not; shift to convert format, force 9 MBits off. 2870 * supp_rates[] does not; shift to convert format, force 9 MBits off.
@@ -2884,15 +2892,15 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
2884 2892
2885 /* These values will be overridden later */ 2893 /* These values will be overridden later */
2886 lq_sta->lq.general_params.single_stream_ant_msk = 2894 lq_sta->lq.general_params.single_stream_ant_msk =
2887 first_antenna(hw_params(priv).valid_tx_ant); 2895 first_antenna(priv->hw_params.valid_tx_ant);
2888 lq_sta->lq.general_params.dual_stream_ant_msk = 2896 lq_sta->lq.general_params.dual_stream_ant_msk =
2889 hw_params(priv).valid_tx_ant & 2897 priv->hw_params.valid_tx_ant &
2890 ~first_antenna(hw_params(priv).valid_tx_ant); 2898 ~first_antenna(priv->hw_params.valid_tx_ant);
2891 if (!lq_sta->lq.general_params.dual_stream_ant_msk) { 2899 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2892 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB; 2900 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2893 } else if (num_of_ant(hw_params(priv).valid_tx_ant) == 2) { 2901 } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
2894 lq_sta->lq.general_params.dual_stream_ant_msk = 2902 lq_sta->lq.general_params.dual_stream_ant_msk =
2895 hw_params(priv).valid_tx_ant; 2903 priv->hw_params.valid_tx_ant;
2896 } 2904 }
2897 2905
2898 /* as default allow aggregation for all tids */ 2906 /* as default allow aggregation for all tids */
@@ -2938,7 +2946,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2938 if (priv && priv->bt_full_concurrent) { 2946 if (priv && priv->bt_full_concurrent) {
2939 /* 1x1 only */ 2947 /* 1x1 only */
2940 tbl_type.ant_type = 2948 tbl_type.ant_type =
2941 first_antenna(hw_params(priv).valid_tx_ant); 2949 first_antenna(priv->hw_params.valid_tx_ant);
2942 } 2950 }
2943 2951
2944 /* How many times should we repeat the initial rate? */ 2952 /* How many times should we repeat the initial rate? */
@@ -2970,7 +2978,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2970 if (priv->bt_full_concurrent) 2978 if (priv->bt_full_concurrent)
2971 valid_tx_ant = ANT_A; 2979 valid_tx_ant = ANT_A;
2972 else 2980 else
2973 valid_tx_ant = hw_params(priv).valid_tx_ant; 2981 valid_tx_ant = priv->hw_params.valid_tx_ant;
2974 } 2982 }
2975 2983
2976 /* Fill rest of rate table */ 2984 /* Fill rest of rate table */
@@ -3004,7 +3012,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
3004 if (priv && priv->bt_full_concurrent) { 3012 if (priv && priv->bt_full_concurrent) {
3005 /* 1x1 only */ 3013 /* 1x1 only */
3006 tbl_type.ant_type = 3014 tbl_type.ant_type =
3007 first_antenna(hw_params(priv).valid_tx_ant); 3015 first_antenna(priv->hw_params.valid_tx_ant);
3008 } 3016 }
3009 3017
3010 /* Indicate to uCode which entries might be MIMO. 3018 /* Indicate to uCode which entries might be MIMO.
@@ -3055,11 +3063,11 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
3055 * overwrite if needed, pass aggregation time limit 3063 * overwrite if needed, pass aggregation time limit
3056 * to uCode in uSec 3064 * to uCode in uSec
3057 */ 3065 */
3058 if (priv && cfg(priv)->bt_params && 3066 if (priv && priv->cfg->bt_params &&
3059 cfg(priv)->bt_params->agg_time_limit && 3067 priv->cfg->bt_params->agg_time_limit &&
3060 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) 3068 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
3061 lq_cmd->agg_params.agg_time_limit = 3069 lq_cmd->agg_params.agg_time_limit =
3062 cpu_to_le16(cfg(priv)->bt_params->agg_time_limit); 3070 cpu_to_le16(priv->cfg->bt_params->agg_time_limit);
3063} 3071}
3064 3072
3065static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 3073static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -3091,7 +3099,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
3091 u8 ant_sel_tx; 3099 u8 ant_sel_tx;
3092 3100
3093 priv = lq_sta->drv; 3101 priv = lq_sta->drv;
3094 valid_tx_ant = hw_params(priv).valid_tx_ant; 3102 valid_tx_ant = priv->hw_params.valid_tx_ant;
3095 if (lq_sta->dbg_fixed_rate) { 3103 if (lq_sta->dbg_fixed_rate) {
3096 ant_sel_tx = 3104 ant_sel_tx =
3097 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) 3105 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
@@ -3162,9 +3170,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
3162 desc += sprintf(buff+desc, "fixed rate 0x%X\n", 3170 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
3163 lq_sta->dbg_fixed_rate); 3171 lq_sta->dbg_fixed_rate);
3164 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", 3172 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
3165 (hw_params(priv).valid_tx_ant & ANT_A) ? "ANT_A," : "", 3173 (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
3166 (hw_params(priv).valid_tx_ant & ANT_B) ? "ANT_B," : "", 3174 (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
3167 (hw_params(priv).valid_tx_ant & ANT_C) ? "ANT_C" : ""); 3175 (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
3168 desc += sprintf(buff+desc, "lq type %s\n", 3176 desc += sprintf(buff+desc, "lq type %s\n",
3169 (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); 3177 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
3170 if (is_Ht(tbl->lq_type)) { 3178 if (is_Ht(tbl->lq_type)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 203b1c13c491..82d02e1ae89f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -30,6 +30,7 @@
30#include <net/mac80211.h> 30#include <net/mac80211.h>
31 31
32#include "iwl-commands.h" 32#include "iwl-commands.h"
33#include "iwl-config.h"
33 34
34struct iwl_rate_info { 35struct iwl_rate_info {
35 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ 36 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
@@ -174,32 +175,6 @@ enum {
174 IWL_RATE_11M_IEEE = 22, 175 IWL_RATE_11M_IEEE = 22,
175}; 176};
176 177
177#define IWL_CCK_BASIC_RATES_MASK \
178 (IWL_RATE_1M_MASK | \
179 IWL_RATE_2M_MASK)
180
181#define IWL_CCK_RATES_MASK \
182 (IWL_CCK_BASIC_RATES_MASK | \
183 IWL_RATE_5M_MASK | \
184 IWL_RATE_11M_MASK)
185
186#define IWL_OFDM_BASIC_RATES_MASK \
187 (IWL_RATE_6M_MASK | \
188 IWL_RATE_12M_MASK | \
189 IWL_RATE_24M_MASK)
190
191#define IWL_OFDM_RATES_MASK \
192 (IWL_OFDM_BASIC_RATES_MASK | \
193 IWL_RATE_9M_MASK | \
194 IWL_RATE_18M_MASK | \
195 IWL_RATE_36M_MASK | \
196 IWL_RATE_48M_MASK | \
197 IWL_RATE_54M_MASK)
198
199#define IWL_BASIC_RATES_MASK \
200 (IWL_OFDM_BASIC_RATES_MASK | \
201 IWL_CCK_BASIC_RATES_MASK)
202
203#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) 178#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
204 179
205#define IWL_INVALID_VALUE -1 180#define IWL_INVALID_VALUE -1
@@ -306,15 +281,6 @@ enum iwl_table_type {
306#define is_a_band(tbl) ((tbl) == LQ_A) 281#define is_a_band(tbl) ((tbl) == LQ_A)
307#define is_g_and(tbl) ((tbl) == LQ_G) 282#define is_g_and(tbl) ((tbl) == LQ_G)
308 283
309#define ANT_NONE 0x0
310#define ANT_A BIT(0)
311#define ANT_B BIT(1)
312#define ANT_AB (ANT_A | ANT_B)
313#define ANT_C BIT(2)
314#define ANT_AC (ANT_A | ANT_C)
315#define ANT_BC (ANT_B | ANT_C)
316#define ANT_ABC (ANT_AB | ANT_C)
317
318#define IWL_MAX_MCS_DISPLAY_SIZE 12 284#define IWL_MAX_MCS_DISPLAY_SIZE 12
319 285
320struct iwl_rate_mcs_info { 286struct iwl_rate_mcs_info {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
index f4b84d1596e3..403de96f9747 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -34,95 +34,91 @@
34#include <asm/unaligned.h> 34#include <asm/unaligned.h>
35#include "iwl-eeprom.h" 35#include "iwl-eeprom.h"
36#include "iwl-dev.h" 36#include "iwl-dev.h"
37#include "iwl-core.h"
38#include "iwl-io.h" 37#include "iwl-io.h"
39#include "iwl-agn-calib.h" 38#include "iwl-agn-calib.h"
40#include "iwl-agn.h" 39#include "iwl-agn.h"
41#include "iwl-shared.h" 40#include "iwl-modparams.h"
42 41
43const char *get_cmd_string(u8 cmd) 42#define IWL_CMD_ENTRY(x) [x] = #x
44{ 43
45 switch (cmd) { 44const char *iwl_dvm_cmd_strings[REPLY_MAX] = {
46 IWL_CMD(REPLY_ALIVE); 45 IWL_CMD_ENTRY(REPLY_ALIVE),
47 IWL_CMD(REPLY_ERROR); 46 IWL_CMD_ENTRY(REPLY_ERROR),
48 IWL_CMD(REPLY_ECHO); 47 IWL_CMD_ENTRY(REPLY_ECHO),
49 IWL_CMD(REPLY_RXON); 48 IWL_CMD_ENTRY(REPLY_RXON),
50 IWL_CMD(REPLY_RXON_ASSOC); 49 IWL_CMD_ENTRY(REPLY_RXON_ASSOC),
51 IWL_CMD(REPLY_QOS_PARAM); 50 IWL_CMD_ENTRY(REPLY_QOS_PARAM),
52 IWL_CMD(REPLY_RXON_TIMING); 51 IWL_CMD_ENTRY(REPLY_RXON_TIMING),
53 IWL_CMD(REPLY_ADD_STA); 52 IWL_CMD_ENTRY(REPLY_ADD_STA),
54 IWL_CMD(REPLY_REMOVE_STA); 53 IWL_CMD_ENTRY(REPLY_REMOVE_STA),
55 IWL_CMD(REPLY_REMOVE_ALL_STA); 54 IWL_CMD_ENTRY(REPLY_REMOVE_ALL_STA),
56 IWL_CMD(REPLY_TXFIFO_FLUSH); 55 IWL_CMD_ENTRY(REPLY_TXFIFO_FLUSH),
57 IWL_CMD(REPLY_WEPKEY); 56 IWL_CMD_ENTRY(REPLY_WEPKEY),
58 IWL_CMD(REPLY_TX); 57 IWL_CMD_ENTRY(REPLY_TX),
59 IWL_CMD(REPLY_LEDS_CMD); 58 IWL_CMD_ENTRY(REPLY_LEDS_CMD),
60 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); 59 IWL_CMD_ENTRY(REPLY_TX_LINK_QUALITY_CMD),
61 IWL_CMD(COEX_PRIORITY_TABLE_CMD); 60 IWL_CMD_ENTRY(COEX_PRIORITY_TABLE_CMD),
62 IWL_CMD(COEX_MEDIUM_NOTIFICATION); 61 IWL_CMD_ENTRY(COEX_MEDIUM_NOTIFICATION),
63 IWL_CMD(COEX_EVENT_CMD); 62 IWL_CMD_ENTRY(COEX_EVENT_CMD),
64 IWL_CMD(REPLY_QUIET_CMD); 63 IWL_CMD_ENTRY(REPLY_QUIET_CMD),
65 IWL_CMD(REPLY_CHANNEL_SWITCH); 64 IWL_CMD_ENTRY(REPLY_CHANNEL_SWITCH),
66 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); 65 IWL_CMD_ENTRY(CHANNEL_SWITCH_NOTIFICATION),
67 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD); 66 IWL_CMD_ENTRY(REPLY_SPECTRUM_MEASUREMENT_CMD),
68 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION); 67 IWL_CMD_ENTRY(SPECTRUM_MEASURE_NOTIFICATION),
69 IWL_CMD(POWER_TABLE_CMD); 68 IWL_CMD_ENTRY(POWER_TABLE_CMD),
70 IWL_CMD(PM_SLEEP_NOTIFICATION); 69 IWL_CMD_ENTRY(PM_SLEEP_NOTIFICATION),
71 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC); 70 IWL_CMD_ENTRY(PM_DEBUG_STATISTIC_NOTIFIC),
72 IWL_CMD(REPLY_SCAN_CMD); 71 IWL_CMD_ENTRY(REPLY_SCAN_CMD),
73 IWL_CMD(REPLY_SCAN_ABORT_CMD); 72 IWL_CMD_ENTRY(REPLY_SCAN_ABORT_CMD),
74 IWL_CMD(SCAN_START_NOTIFICATION); 73 IWL_CMD_ENTRY(SCAN_START_NOTIFICATION),
75 IWL_CMD(SCAN_RESULTS_NOTIFICATION); 74 IWL_CMD_ENTRY(SCAN_RESULTS_NOTIFICATION),
76 IWL_CMD(SCAN_COMPLETE_NOTIFICATION); 75 IWL_CMD_ENTRY(SCAN_COMPLETE_NOTIFICATION),
77 IWL_CMD(BEACON_NOTIFICATION); 76 IWL_CMD_ENTRY(BEACON_NOTIFICATION),
78 IWL_CMD(REPLY_TX_BEACON); 77 IWL_CMD_ENTRY(REPLY_TX_BEACON),
79 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION); 78 IWL_CMD_ENTRY(WHO_IS_AWAKE_NOTIFICATION),
80 IWL_CMD(QUIET_NOTIFICATION); 79 IWL_CMD_ENTRY(QUIET_NOTIFICATION),
81 IWL_CMD(REPLY_TX_PWR_TABLE_CMD); 80 IWL_CMD_ENTRY(REPLY_TX_PWR_TABLE_CMD),
82 IWL_CMD(MEASURE_ABORT_NOTIFICATION); 81 IWL_CMD_ENTRY(MEASURE_ABORT_NOTIFICATION),
83 IWL_CMD(REPLY_BT_CONFIG); 82 IWL_CMD_ENTRY(REPLY_BT_CONFIG),
84 IWL_CMD(REPLY_STATISTICS_CMD); 83 IWL_CMD_ENTRY(REPLY_STATISTICS_CMD),
85 IWL_CMD(STATISTICS_NOTIFICATION); 84 IWL_CMD_ENTRY(STATISTICS_NOTIFICATION),
86 IWL_CMD(REPLY_CARD_STATE_CMD); 85 IWL_CMD_ENTRY(REPLY_CARD_STATE_CMD),
87 IWL_CMD(CARD_STATE_NOTIFICATION); 86 IWL_CMD_ENTRY(CARD_STATE_NOTIFICATION),
88 IWL_CMD(MISSED_BEACONS_NOTIFICATION); 87 IWL_CMD_ENTRY(MISSED_BEACONS_NOTIFICATION),
89 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD); 88 IWL_CMD_ENTRY(REPLY_CT_KILL_CONFIG_CMD),
90 IWL_CMD(SENSITIVITY_CMD); 89 IWL_CMD_ENTRY(SENSITIVITY_CMD),
91 IWL_CMD(REPLY_PHY_CALIBRATION_CMD); 90 IWL_CMD_ENTRY(REPLY_PHY_CALIBRATION_CMD),
92 IWL_CMD(REPLY_RX_PHY_CMD); 91 IWL_CMD_ENTRY(REPLY_RX_PHY_CMD),
93 IWL_CMD(REPLY_RX_MPDU_CMD); 92 IWL_CMD_ENTRY(REPLY_RX_MPDU_CMD),
94 IWL_CMD(REPLY_RX); 93 IWL_CMD_ENTRY(REPLY_RX),
95 IWL_CMD(REPLY_COMPRESSED_BA); 94 IWL_CMD_ENTRY(REPLY_COMPRESSED_BA),
96 IWL_CMD(CALIBRATION_CFG_CMD); 95 IWL_CMD_ENTRY(CALIBRATION_CFG_CMD),
97 IWL_CMD(CALIBRATION_RES_NOTIFICATION); 96 IWL_CMD_ENTRY(CALIBRATION_RES_NOTIFICATION),
98 IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION); 97 IWL_CMD_ENTRY(CALIBRATION_COMPLETE_NOTIFICATION),
99 IWL_CMD(REPLY_TX_POWER_DBM_CMD); 98 IWL_CMD_ENTRY(REPLY_TX_POWER_DBM_CMD),
100 IWL_CMD(TEMPERATURE_NOTIFICATION); 99 IWL_CMD_ENTRY(TEMPERATURE_NOTIFICATION),
101 IWL_CMD(TX_ANT_CONFIGURATION_CMD); 100 IWL_CMD_ENTRY(TX_ANT_CONFIGURATION_CMD),
102 IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF); 101 IWL_CMD_ENTRY(REPLY_BT_COEX_PROFILE_NOTIF),
103 IWL_CMD(REPLY_BT_COEX_PRIO_TABLE); 102 IWL_CMD_ENTRY(REPLY_BT_COEX_PRIO_TABLE),
104 IWL_CMD(REPLY_BT_COEX_PROT_ENV); 103 IWL_CMD_ENTRY(REPLY_BT_COEX_PROT_ENV),
105 IWL_CMD(REPLY_WIPAN_PARAMS); 104 IWL_CMD_ENTRY(REPLY_WIPAN_PARAMS),
106 IWL_CMD(REPLY_WIPAN_RXON); 105 IWL_CMD_ENTRY(REPLY_WIPAN_RXON),
107 IWL_CMD(REPLY_WIPAN_RXON_TIMING); 106 IWL_CMD_ENTRY(REPLY_WIPAN_RXON_TIMING),
108 IWL_CMD(REPLY_WIPAN_RXON_ASSOC); 107 IWL_CMD_ENTRY(REPLY_WIPAN_RXON_ASSOC),
109 IWL_CMD(REPLY_WIPAN_QOS_PARAM); 108 IWL_CMD_ENTRY(REPLY_WIPAN_QOS_PARAM),
110 IWL_CMD(REPLY_WIPAN_WEPKEY); 109 IWL_CMD_ENTRY(REPLY_WIPAN_WEPKEY),
111 IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH); 110 IWL_CMD_ENTRY(REPLY_WIPAN_P2P_CHANNEL_SWITCH),
112 IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION); 111 IWL_CMD_ENTRY(REPLY_WIPAN_NOA_NOTIFICATION),
113 IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE); 112 IWL_CMD_ENTRY(REPLY_WIPAN_DEACTIVATION_COMPLETE),
114 IWL_CMD(REPLY_WOWLAN_PATTERNS); 113 IWL_CMD_ENTRY(REPLY_WOWLAN_PATTERNS),
115 IWL_CMD(REPLY_WOWLAN_WAKEUP_FILTER); 114 IWL_CMD_ENTRY(REPLY_WOWLAN_WAKEUP_FILTER),
116 IWL_CMD(REPLY_WOWLAN_TSC_RSC_PARAMS); 115 IWL_CMD_ENTRY(REPLY_WOWLAN_TSC_RSC_PARAMS),
117 IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS); 116 IWL_CMD_ENTRY(REPLY_WOWLAN_TKIP_PARAMS),
118 IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL); 117 IWL_CMD_ENTRY(REPLY_WOWLAN_KEK_KCK_MATERIAL),
119 IWL_CMD(REPLY_WOWLAN_GET_STATUS); 118 IWL_CMD_ENTRY(REPLY_WOWLAN_GET_STATUS),
120 IWL_CMD(REPLY_D3_CONFIG); 119 IWL_CMD_ENTRY(REPLY_D3_CONFIG),
121 default: 120};
122 return "UNKNOWN"; 121#undef IWL_CMD_ENTRY
123
124 }
125}
126 122
127/****************************************************************************** 123/******************************************************************************
128 * 124 *
@@ -137,10 +133,9 @@ static int iwlagn_rx_reply_error(struct iwl_priv *priv,
137 struct iwl_rx_packet *pkt = rxb_addr(rxb); 133 struct iwl_rx_packet *pkt = rxb_addr(rxb);
138 struct iwl_error_resp *err_resp = (void *)pkt->data; 134 struct iwl_error_resp *err_resp = (void *)pkt->data;
139 135
140 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) " 136 IWL_ERR(priv, "Error Reply type 0x%08X cmd REPLY_ERROR (0x%02X) "
141 "seq 0x%04X ser 0x%08X\n", 137 "seq 0x%04X ser 0x%08X\n",
142 le32_to_cpu(err_resp->error_type), 138 le32_to_cpu(err_resp->error_type),
143 get_cmd_string(err_resp->cmd_id),
144 err_resp->cmd_id, 139 err_resp->cmd_id,
145 le16_to_cpu(err_resp->bad_cmd_seq_num), 140 le16_to_cpu(err_resp->bad_cmd_seq_num),
146 le32_to_cpu(err_resp->error_info)); 141 le32_to_cpu(err_resp->error_info));
@@ -216,8 +211,7 @@ static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
216 u32 __maybe_unused len = 211 u32 __maybe_unused len =
217 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 212 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
218 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " 213 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
219 "notification for %s:\n", len, 214 "notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len);
220 get_cmd_string(pkt->hdr.cmd));
221 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len); 215 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len);
222 return 0; 216 return 0;
223} 217}
@@ -246,69 +240,6 @@ static int iwlagn_rx_beacon_notif(struct iwl_priv *priv,
246 return 0; 240 return 0;
247} 241}
248 242
249/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
250#define ACK_CNT_RATIO (50)
251#define BA_TIMEOUT_CNT (5)
252#define BA_TIMEOUT_MAX (16)
253
254/**
255 * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
256 *
257 * When the ACK count ratio is low and aggregated BA timeout retries exceeding
258 * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
259 * operation state.
260 */
261static bool iwlagn_good_ack_health(struct iwl_priv *priv,
262 struct statistics_tx *cur)
263{
264 int actual_delta, expected_delta, ba_timeout_delta;
265 struct statistics_tx *old;
266
267 if (priv->agg_tids_count)
268 return true;
269
270 lockdep_assert_held(&priv->statistics.lock);
271
272 old = &priv->statistics.tx;
273
274 actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
275 le32_to_cpu(old->actual_ack_cnt);
276 expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
277 le32_to_cpu(old->expected_ack_cnt);
278
279 /* Values should not be negative, but we do not trust the firmware */
280 if (actual_delta <= 0 || expected_delta <= 0)
281 return true;
282
283 ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
284 le32_to_cpu(old->agg.ba_timeout);
285
286 if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
287 ba_timeout_delta > BA_TIMEOUT_CNT) {
288 IWL_DEBUG_RADIO(priv,
289 "deltas: actual %d expected %d ba_timeout %d\n",
290 actual_delta, expected_delta, ba_timeout_delta);
291
292#ifdef CONFIG_IWLWIFI_DEBUGFS
293 /*
294 * This is ifdef'ed on DEBUGFS because otherwise the
295 * statistics aren't available. If DEBUGFS is set but
296 * DEBUG is not, these will just compile out.
297 */
298 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
299 priv->delta_stats.tx.rx_detected_cnt);
300 IWL_DEBUG_RADIO(priv,
301 "ack_or_ba_timeout_collision delta %d\n",
302 priv->delta_stats.tx.ack_or_ba_timeout_collision);
303#endif
304
305 if (ba_timeout_delta >= BA_TIMEOUT_MAX)
306 return false;
307 }
308
309 return true;
310}
311
312/** 243/**
313 * iwl_good_plcp_health - checks for plcp error. 244 * iwl_good_plcp_health - checks for plcp error.
314 * 245 *
@@ -347,6 +278,45 @@ static bool iwlagn_good_plcp_health(struct iwl_priv *priv,
347 return true; 278 return true;
348} 279}
349 280
281int iwl_force_rf_reset(struct iwl_priv *priv, bool external)
282{
283 struct iwl_rf_reset *rf_reset;
284
285 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
286 return -EAGAIN;
287
288 if (!iwl_is_any_associated(priv)) {
289 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
290 return -ENOLINK;
291 }
292
293 rf_reset = &priv->rf_reset;
294 rf_reset->reset_request_count++;
295 if (!external && rf_reset->last_reset_jiffies &&
296 time_after(rf_reset->last_reset_jiffies +
297 IWL_DELAY_NEXT_FORCE_RF_RESET, jiffies)) {
298 IWL_DEBUG_INFO(priv, "RF reset rejected\n");
299 rf_reset->reset_reject_count++;
300 return -EAGAIN;
301 }
302 rf_reset->reset_success_count++;
303 rf_reset->last_reset_jiffies = jiffies;
304
305 /*
306 * There is no easy and better way to force reset the radio,
307 * the only known method is switching channel which will force to
308 * reset and tune the radio.
309 * Use internal short scan (single channel) operation to should
310 * achieve this objective.
311 * Driver should reset the radio when number of consecutive missed
312 * beacon, or any other uCode error condition detected.
313 */
314 IWL_DEBUG_INFO(priv, "perform radio reset.\n");
315 iwl_internal_short_hw_scan(priv);
316 return 0;
317}
318
319
350static void iwlagn_recover_from_statistics(struct iwl_priv *priv, 320static void iwlagn_recover_from_statistics(struct iwl_priv *priv,
351 struct statistics_rx_phy *cur_ofdm, 321 struct statistics_rx_phy *cur_ofdm,
352 struct statistics_rx_ht_phy *cur_ofdm_ht, 322 struct statistics_rx_ht_phy *cur_ofdm_ht,
@@ -368,15 +338,9 @@ static void iwlagn_recover_from_statistics(struct iwl_priv *priv,
368 if (msecs < 99) 338 if (msecs < 99)
369 return; 339 return;
370 340
371 if (iwlagn_mod_params.ack_check && !iwlagn_good_ack_health(priv, tx)) { 341 if (iwlwifi_mod_params.plcp_check &&
372 IWL_ERR(priv, "low ack count detected, restart firmware\n");
373 if (!iwl_force_reset(priv, IWL_FW_RESET, false))
374 return;
375 }
376
377 if (iwlagn_mod_params.plcp_check &&
378 !iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs)) 342 !iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
379 iwl_force_reset(priv, IWL_RF_RESET, false); 343 iwl_force_rf_reset(priv, false);
380} 344}
381 345
382/* Calculate noise level, based on measurements during network silence just 346/* Calculate noise level, based on measurements during network silence just
@@ -589,8 +553,8 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
589 iwlagn_rx_calc_noise(priv); 553 iwlagn_rx_calc_noise(priv);
590 queue_work(priv->workqueue, &priv->run_time_calib_work); 554 queue_work(priv->workqueue, &priv->run_time_calib_work);
591 } 555 }
592 if (cfg(priv)->lib->temperature && change) 556 if (priv->lib->temperature && change)
593 cfg(priv)->lib->temperature(priv); 557 priv->lib->temperature(priv);
594 558
595 spin_unlock(&priv->statistics.lock); 559 spin_unlock(&priv->statistics.lock);
596 560
@@ -639,16 +603,16 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
639 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | 603 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
640 CT_CARD_DISABLED)) { 604 CT_CARD_DISABLED)) {
641 605
642 iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_SET, 606 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
643 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 607 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
644 608
645 iwl_write_direct32(trans(priv), HBUS_TARG_MBX_C, 609 iwl_write_direct32(priv->trans, HBUS_TARG_MBX_C,
646 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); 610 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
647 611
648 if (!(flags & RXON_CARD_DISABLED)) { 612 if (!(flags & RXON_CARD_DISABLED)) {
649 iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR, 613 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
650 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 614 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
651 iwl_write_direct32(trans(priv), HBUS_TARG_MBX_C, 615 iwl_write_direct32(priv->trans, HBUS_TARG_MBX_C,
652 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); 616 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
653 } 617 }
654 if (flags & CT_CARD_DISABLED) 618 if (flags & CT_CARD_DISABLED)
@@ -671,7 +635,7 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
671 wiphy_rfkill_set_hw_state(priv->hw->wiphy, 635 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
672 test_bit(STATUS_RF_KILL_HW, &priv->status)); 636 test_bit(STATUS_RF_KILL_HW, &priv->status));
673 else 637 else
674 wake_up(&trans(priv)->wait_command_queue); 638 wake_up(&priv->trans->wait_command_queue);
675 return 0; 639 return 0;
676} 640}
677 641
@@ -773,8 +737,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
773 struct sk_buff *skb; 737 struct sk_buff *skb;
774 __le16 fc = hdr->frame_control; 738 __le16 fc = hdr->frame_control;
775 struct iwl_rxon_context *ctx; 739 struct iwl_rxon_context *ctx;
776 struct page *p; 740 unsigned int hdrlen, fraglen;
777 int offset;
778 741
779 /* We only process data packets if the interface is open */ 742 /* We only process data packets if the interface is open */
780 if (unlikely(!priv->is_open)) { 743 if (unlikely(!priv->is_open)) {
@@ -784,21 +747,34 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
784 } 747 }
785 748
786 /* In case of HW accelerated crypto and bad decryption, drop */ 749 /* In case of HW accelerated crypto and bad decryption, drop */
787 if (!iwlagn_mod_params.sw_crypto && 750 if (!iwlwifi_mod_params.sw_crypto &&
788 iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats)) 751 iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats))
789 return; 752 return;
790 753
791 skb = dev_alloc_skb(128); 754 /* Dont use dev_alloc_skb(), we'll have enough headroom once
755 * ieee80211_hdr pulled.
756 */
757 skb = alloc_skb(128, GFP_ATOMIC);
792 if (!skb) { 758 if (!skb) {
793 IWL_ERR(priv, "dev_alloc_skb failed\n"); 759 IWL_ERR(priv, "alloc_skb failed\n");
794 return; 760 return;
795 } 761 }
762 /* If frame is small enough to fit in skb->head, pull it completely.
763 * If not, only pull ieee80211_hdr so that splice() or TCP coalesce
764 * are more efficient.
765 */
766 hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr);
767
768 memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
769 fraglen = len - hdrlen;
796 770
797 offset = (void *)hdr - rxb_addr(rxb); 771 if (fraglen) {
798 p = rxb_steal_page(rxb); 772 int offset = (void *)hdr + hdrlen -
799 skb_add_rx_frag(skb, 0, p, offset, len, len); 773 rxb_addr(rxb) + rxb_offset(rxb);
800 774
801 iwl_update_stats(priv, false, fc, len); 775 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
776 fraglen, rxb->truesize);
777 }
802 778
803 /* 779 /*
804 * Wake any queues that were stopped due to a passive channel tx 780 * Wake any queues that were stopped due to a passive channel tx
@@ -809,8 +785,8 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
809 */ 785 */
810 if (unlikely(ieee80211_is_beacon(fc) && priv->passive_no_rx)) { 786 if (unlikely(ieee80211_is_beacon(fc) && priv->passive_no_rx)) {
811 for_each_context(priv, ctx) { 787 for_each_context(priv, ctx) {
812 if (compare_ether_addr(hdr->addr3, 788 if (!ether_addr_equal(hdr->addr3,
813 ctx->active.bssid_addr)) 789 ctx->active.bssid_addr))
814 continue; 790 continue;
815 iwlagn_lift_passive_no_rx(priv); 791 iwlagn_lift_passive_no_rx(priv);
816 } 792 }
@@ -970,7 +946,7 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
970 } 946 }
971 947
972 if ((unlikely(phy_res->cfg_phy_cnt > 20))) { 948 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
973 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", 949 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n",
974 phy_res->cfg_phy_cnt); 950 phy_res->cfg_phy_cnt);
975 return 0; 951 return 0;
976 } 952 }
@@ -1005,7 +981,6 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
1005 /* Find max signal strength (dBm) among 3 antenna/receiver chains */ 981 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
1006 rx_status.signal = iwlagn_calc_rssi(priv, phy_res); 982 rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
1007 983
1008 iwl_dbg_log_rx_data_frame(priv, len, header);
1009 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", 984 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
1010 rx_status.signal, (unsigned long long)rx_status.mactime); 985 rx_status.signal, (unsigned long long)rx_status.mactime);
1011 986
@@ -1134,16 +1109,13 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
1134 handlers[REPLY_COMPRESSED_BA] = 1109 handlers[REPLY_COMPRESSED_BA] =
1135 iwlagn_rx_reply_compressed_ba; 1110 iwlagn_rx_reply_compressed_ba;
1136 1111
1137 /* init calibration handlers */
1138 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
1139 iwlagn_rx_calib_result;
1140 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx; 1112 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
1141 1113
1142 /* set up notification wait support */ 1114 /* set up notification wait support */
1143 iwl_notification_wait_init(&priv->notif_wait); 1115 iwl_notification_wait_init(&priv->notif_wait);
1144 1116
1145 /* Set up BT Rx handlers */ 1117 /* Set up BT Rx handlers */
1146 if (cfg(priv)->bt_params) 1118 if (priv->cfg->bt_params)
1147 iwlagn_bt_rx_handler_setup(priv); 1119 iwlagn_bt_rx_handler_setup(priv);
1148} 1120}
1149 1121
@@ -1185,9 +1157,9 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
1185 err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd); 1157 err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
1186 } else { 1158 } else {
1187 /* No handling needed */ 1159 /* No handling needed */
1188 IWL_DEBUG_RX(priv, 1160 IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
1189 "No handler needed for %s, 0x%02x\n", 1161 iwl_dvm_get_cmd_string(pkt->hdr.cmd),
1190 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 1162 pkt->hdr.cmd);
1191 } 1163 }
1192 } 1164 }
1193 return err; 1165 return err;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 2e1a31797a9e..74fbee627306 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -24,12 +24,79 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26 26
27#include <linux/etherdevice.h>
27#include "iwl-dev.h" 28#include "iwl-dev.h"
28#include "iwl-agn.h" 29#include "iwl-agn.h"
29#include "iwl-core.h"
30#include "iwl-agn-calib.h" 30#include "iwl-agn-calib.h"
31#include "iwl-trans.h" 31#include "iwl-trans.h"
32#include "iwl-shared.h" 32#include "iwl-modparams.h"
33
34/*
35 * initialize rxon structure with default values from eeprom
36 */
37void iwl_connection_init_rx_config(struct iwl_priv *priv,
38 struct iwl_rxon_context *ctx)
39{
40 const struct iwl_channel_info *ch_info;
41
42 memset(&ctx->staging, 0, sizeof(ctx->staging));
43
44 if (!ctx->vif) {
45 ctx->staging.dev_type = ctx->unused_devtype;
46 } else
47 switch (ctx->vif->type) {
48 case NL80211_IFTYPE_AP:
49 ctx->staging.dev_type = ctx->ap_devtype;
50 break;
51
52 case NL80211_IFTYPE_STATION:
53 ctx->staging.dev_type = ctx->station_devtype;
54 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
55 break;
56
57 case NL80211_IFTYPE_ADHOC:
58 ctx->staging.dev_type = ctx->ibss_devtype;
59 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
60 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
61 RXON_FILTER_ACCEPT_GRP_MSK;
62 break;
63
64 default:
65 IWL_ERR(priv, "Unsupported interface type %d\n",
66 ctx->vif->type);
67 break;
68 }
69
70#if 0
71 /* TODO: Figure out when short_preamble would be set and cache from
72 * that */
73 if (!hw_to_local(priv->hw)->short_preamble)
74 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
75 else
76 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
77#endif
78
79 ch_info = iwl_get_channel_info(priv, priv->band,
80 le16_to_cpu(ctx->active.channel));
81
82 if (!ch_info)
83 ch_info = &priv->channel_info[0];
84
85 ctx->staging.channel = cpu_to_le16(ch_info->channel);
86 priv->band = ch_info->band;
87
88 iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
89
90 /* clear both MIX and PURE40 mode flag */
91 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
92 RXON_FLG_CHANNEL_MODE_PURE_40);
93 if (ctx->vif)
94 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
95
96 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
97 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
98 ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
99}
33 100
34static int iwlagn_disable_bss(struct iwl_priv *priv, 101static int iwlagn_disable_bss(struct iwl_priv *priv,
35 struct iwl_rxon_context *ctx, 102 struct iwl_rxon_context *ctx,
@@ -59,9 +126,12 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
59 __le32 old_filter = send->filter_flags; 126 __le32 old_filter = send->filter_flags;
60 u8 old_dev_type = send->dev_type; 127 u8 old_dev_type = send->dev_type;
61 int ret; 128 int ret;
129 static const u8 deactivate_cmd[] = {
130 REPLY_WIPAN_DEACTIVATION_COMPLETE
131 };
62 132
63 iwl_init_notification_wait(&priv->notif_wait, &disable_wait, 133 iwl_init_notification_wait(&priv->notif_wait, &disable_wait,
64 REPLY_WIPAN_DEACTIVATION_COMPLETE, 134 deactivate_cmd, ARRAY_SIZE(deactivate_cmd),
65 NULL, NULL); 135 NULL, NULL);
66 136
67 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 137 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
@@ -101,8 +171,7 @@ static int iwlagn_disconn_pan(struct iwl_priv *priv,
101 return ret; 171 return ret;
102} 172}
103 173
104static void iwlagn_update_qos(struct iwl_priv *priv, 174void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
105 struct iwl_rxon_context *ctx)
106{ 175{
107 int ret; 176 int ret;
108 177
@@ -129,8 +198,8 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
129 IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n"); 198 IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
130} 199}
131 200
132static int iwlagn_update_beacon(struct iwl_priv *priv, 201int iwlagn_update_beacon(struct iwl_priv *priv,
133 struct ieee80211_vif *vif) 202 struct ieee80211_vif *vif)
134{ 203{
135 lockdep_assert_held(&priv->mutex); 204 lockdep_assert_held(&priv->mutex);
136 205
@@ -186,6 +255,109 @@ static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
186 return ret; 255 return ret;
187} 256}
188 257
258static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
259{
260 u16 new_val;
261 u16 beacon_factor;
262
263 /*
264 * If mac80211 hasn't given us a beacon interval, program
265 * the default into the device (not checking this here
266 * would cause the adjustment below to return the maximum
267 * value, which may break PAN.)
268 */
269 if (!beacon_val)
270 return DEFAULT_BEACON_INTERVAL;
271
272 /*
273 * If the beacon interval we obtained from the peer
274 * is too large, we'll have to wake up more often
275 * (and in IBSS case, we'll beacon too much)
276 *
277 * For example, if max_beacon_val is 4096, and the
278 * requested beacon interval is 7000, we'll have to
279 * use 3500 to be able to wake up on the beacons.
280 *
281 * This could badly influence beacon detection stats.
282 */
283
284 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
285 new_val = beacon_val / beacon_factor;
286
287 if (!new_val)
288 new_val = max_beacon_val;
289
290 return new_val;
291}
292
293static int iwl_send_rxon_timing(struct iwl_priv *priv,
294 struct iwl_rxon_context *ctx)
295{
296 u64 tsf;
297 s32 interval_tm, rem;
298 struct ieee80211_conf *conf = NULL;
299 u16 beacon_int;
300 struct ieee80211_vif *vif = ctx->vif;
301
302 conf = &priv->hw->conf;
303
304 lockdep_assert_held(&priv->mutex);
305
306 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
307
308 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
309 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
310
311 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
312
313 /*
314 * TODO: For IBSS we need to get atim_window from mac80211,
315 * for now just always use 0
316 */
317 ctx->timing.atim_window = 0;
318
319 if (ctx->ctxid == IWL_RXON_CTX_PAN &&
320 (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
321 iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
322 priv->contexts[IWL_RXON_CTX_BSS].vif &&
323 priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
324 ctx->timing.beacon_interval =
325 priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
326 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
327 } else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
328 iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
329 priv->contexts[IWL_RXON_CTX_PAN].vif &&
330 priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
331 (!iwl_is_associated_ctx(ctx) || !ctx->vif ||
332 !ctx->vif->bss_conf.beacon_int)) {
333 ctx->timing.beacon_interval =
334 priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
335 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
336 } else {
337 beacon_int = iwl_adjust_beacon_interval(beacon_int,
338 IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
339 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
340 }
341
342 ctx->beacon_int = beacon_int;
343
344 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
345 interval_tm = beacon_int * TIME_UNIT;
346 rem = do_div(tsf, interval_tm);
347 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
348
349 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
350
351 IWL_DEBUG_ASSOC(priv,
352 "beacon interval %d beacon timer %d beacon tim %d\n",
353 le16_to_cpu(ctx->timing.beacon_interval),
354 le32_to_cpu(ctx->timing.beacon_init_val),
355 le16_to_cpu(ctx->timing.atim_window));
356
357 return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
358 CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
359}
360
189static int iwlagn_rxon_disconn(struct iwl_priv *priv, 361static int iwlagn_rxon_disconn(struct iwl_priv *priv,
190 struct iwl_rxon_context *ctx) 362 struct iwl_rxon_context *ctx)
191{ 363{
@@ -228,6 +400,64 @@ static int iwlagn_rxon_disconn(struct iwl_priv *priv,
228 return 0; 400 return 0;
229} 401}
230 402
403static int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
404{
405 int ret;
406 s8 prev_tx_power;
407 bool defer;
408 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
409
410 if (priv->calib_disabled & IWL_TX_POWER_CALIB_DISABLED)
411 return 0;
412
413 lockdep_assert_held(&priv->mutex);
414
415 if (priv->tx_power_user_lmt == tx_power && !force)
416 return 0;
417
418 if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
419 IWL_WARN(priv,
420 "Requested user TXPOWER %d below lower limit %d.\n",
421 tx_power,
422 IWLAGN_TX_POWER_TARGET_POWER_MIN);
423 return -EINVAL;
424 }
425
426 if (tx_power > priv->tx_power_device_lmt) {
427 IWL_WARN(priv,
428 "Requested user TXPOWER %d above upper limit %d.\n",
429 tx_power, priv->tx_power_device_lmt);
430 return -EINVAL;
431 }
432
433 if (!iwl_is_ready_rf(priv))
434 return -EIO;
435
436 /* scan complete and commit_rxon use tx_power_next value,
437 * it always need to be updated for newest request */
438 priv->tx_power_next = tx_power;
439
440 /* do not set tx power when scanning or channel changing */
441 defer = test_bit(STATUS_SCANNING, &priv->status) ||
442 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
443 if (defer && !force) {
444 IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
445 return 0;
446 }
447
448 prev_tx_power = priv->tx_power_user_lmt;
449 priv->tx_power_user_lmt = tx_power;
450
451 ret = iwlagn_send_tx_power(priv);
452
453 /* if fail to set tx_power, restore the orig. tx power */
454 if (ret) {
455 priv->tx_power_user_lmt = prev_tx_power;
456 priv->tx_power_next = prev_tx_power;
457 }
458 return ret;
459}
460
231static int iwlagn_rxon_connect(struct iwl_priv *priv, 461static int iwlagn_rxon_connect(struct iwl_priv *priv,
232 struct iwl_rxon_context *ctx) 462 struct iwl_rxon_context *ctx)
233{ 463{
@@ -295,9 +525,9 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
295 } 525 }
296 526
297 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION && 527 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
298 cfg(priv)->ht_params && cfg(priv)->ht_params->smps_mode) 528 priv->cfg->ht_params && priv->cfg->ht_params->smps_mode)
299 ieee80211_request_smps(ctx->vif, 529 ieee80211_request_smps(ctx->vif,
300 cfg(priv)->ht_params->smps_mode); 530 priv->cfg->ht_params->smps_mode);
301 531
302 return 0; 532 return 0;
303} 533}
@@ -309,7 +539,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
309 int slot0 = 300, slot1 = 0; 539 int slot0 = 300, slot1 = 0;
310 int ret; 540 int ret;
311 541
312 if (priv->shrd->valid_contexts == BIT(IWL_RXON_CTX_BSS)) 542 if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
313 return 0; 543 return 0;
314 544
315 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); 545 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
@@ -394,6 +624,414 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
394 return ret; 624 return ret;
395} 625}
396 626
627static void _iwl_set_rxon_ht(struct iwl_priv *priv,
628 struct iwl_ht_config *ht_conf,
629 struct iwl_rxon_context *ctx)
630{
631 struct iwl_rxon_cmd *rxon = &ctx->staging;
632
633 if (!ctx->ht.enabled) {
634 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
635 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
636 RXON_FLG_HT40_PROT_MSK |
637 RXON_FLG_HT_PROT_MSK);
638 return;
639 }
640
641 /* FIXME: if the definition of ht.protection changed, the "translation"
642 * will be needed for rxon->flags
643 */
644 rxon->flags |= cpu_to_le32(ctx->ht.protection <<
645 RXON_FLG_HT_OPERATING_MODE_POS);
646
647 /* Set up channel bandwidth:
648 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
649 /* clear the HT channel mode before set the mode */
650 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
651 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
652 if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) {
653 /* pure ht40 */
654 if (ctx->ht.protection ==
655 IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
656 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
657 /*
658 * Note: control channel is opposite of extension
659 * channel
660 */
661 switch (ctx->ht.extension_chan_offset) {
662 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
663 rxon->flags &=
664 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
665 break;
666 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
667 rxon->flags |=
668 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
669 break;
670 }
671 } else {
672 /*
673 * Note: control channel is opposite of extension
674 * channel
675 */
676 switch (ctx->ht.extension_chan_offset) {
677 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
678 rxon->flags &=
679 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
680 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
681 break;
682 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
683 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
684 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
685 break;
686 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
687 default:
688 /*
689 * channel location only valid if in Mixed
690 * mode
691 */
692 IWL_ERR(priv,
693 "invalid extension channel offset\n");
694 break;
695 }
696 }
697 } else {
698 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
699 }
700
701 iwlagn_set_rxon_chain(priv, ctx);
702
703 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
704 "extension channel offset 0x%x\n",
705 le32_to_cpu(rxon->flags), ctx->ht.protection,
706 ctx->ht.extension_chan_offset);
707}
708
709void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
710{
711 struct iwl_rxon_context *ctx;
712
713 for_each_context(priv, ctx)
714 _iwl_set_rxon_ht(priv, ht_conf, ctx);
715}
716
717/**
718 * iwl_set_rxon_channel - Set the band and channel values in staging RXON
719 * @ch: requested channel as a pointer to struct ieee80211_channel
720
721 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
722 * in the staging RXON flag structure based on the ch->band
723 */
724void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
725 struct iwl_rxon_context *ctx)
726{
727 enum ieee80211_band band = ch->band;
728 u16 channel = ch->hw_value;
729
730 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
731 (priv->band == band))
732 return;
733
734 ctx->staging.channel = cpu_to_le16(channel);
735 if (band == IEEE80211_BAND_5GHZ)
736 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
737 else
738 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
739
740 priv->band = band;
741
742 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
743
744}
745
746void iwl_set_flags_for_band(struct iwl_priv *priv,
747 struct iwl_rxon_context *ctx,
748 enum ieee80211_band band,
749 struct ieee80211_vif *vif)
750{
751 if (band == IEEE80211_BAND_5GHZ) {
752 ctx->staging.flags &=
753 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
754 | RXON_FLG_CCK_MSK);
755 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
756 } else {
757 /* Copied from iwl_post_associate() */
758 if (vif && vif->bss_conf.use_short_slot)
759 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
760 else
761 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
762
763 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
764 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
765 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
766 }
767}
768
769static void iwl_set_rxon_hwcrypto(struct iwl_priv *priv,
770 struct iwl_rxon_context *ctx, int hw_decrypt)
771{
772 struct iwl_rxon_cmd *rxon = &ctx->staging;
773
774 if (hw_decrypt)
775 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
776 else
777 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
778
779}
780
781/* validate RXON structure is valid */
782static int iwl_check_rxon_cmd(struct iwl_priv *priv,
783 struct iwl_rxon_context *ctx)
784{
785 struct iwl_rxon_cmd *rxon = &ctx->staging;
786 u32 errors = 0;
787
788 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
789 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
790 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
791 errors |= BIT(0);
792 }
793 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
794 IWL_WARN(priv, "check 2.4G: wrong radar\n");
795 errors |= BIT(1);
796 }
797 } else {
798 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
799 IWL_WARN(priv, "check 5.2G: not short slot!\n");
800 errors |= BIT(2);
801 }
802 if (rxon->flags & RXON_FLG_CCK_MSK) {
803 IWL_WARN(priv, "check 5.2G: CCK!\n");
804 errors |= BIT(3);
805 }
806 }
807 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
808 IWL_WARN(priv, "mac/bssid mcast!\n");
809 errors |= BIT(4);
810 }
811
812 /* make sure basic rates 6Mbps and 1Mbps are supported */
813 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
814 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
815 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
816 errors |= BIT(5);
817 }
818
819 if (le16_to_cpu(rxon->assoc_id) > 2007) {
820 IWL_WARN(priv, "aid > 2007\n");
821 errors |= BIT(6);
822 }
823
824 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
825 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
826 IWL_WARN(priv, "CCK and short slot\n");
827 errors |= BIT(7);
828 }
829
830 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
831 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
832 IWL_WARN(priv, "CCK and auto detect");
833 errors |= BIT(8);
834 }
835
836 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
837 RXON_FLG_TGG_PROTECT_MSK)) ==
838 RXON_FLG_TGG_PROTECT_MSK) {
839 IWL_WARN(priv, "TGg but no auto-detect\n");
840 errors |= BIT(9);
841 }
842
843 if (rxon->channel == 0) {
844 IWL_WARN(priv, "zero channel is invalid\n");
845 errors |= BIT(10);
846 }
847
848 WARN(errors, "Invalid RXON (%#x), channel %d",
849 errors, le16_to_cpu(rxon->channel));
850
851 return errors ? -EINVAL : 0;
852}
853
854/**
855 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
856 * @priv: staging_rxon is compared to active_rxon
857 *
858 * If the RXON structure is changing enough to require a new tune,
859 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
860 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
861 */
862int iwl_full_rxon_required(struct iwl_priv *priv,
863 struct iwl_rxon_context *ctx)
864{
865 const struct iwl_rxon_cmd *staging = &ctx->staging;
866 const struct iwl_rxon_cmd *active = &ctx->active;
867
868#define CHK(cond) \
869 if ((cond)) { \
870 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
871 return 1; \
872 }
873
874#define CHK_NEQ(c1, c2) \
875 if ((c1) != (c2)) { \
876 IWL_DEBUG_INFO(priv, "need full RXON - " \
877 #c1 " != " #c2 " - %d != %d\n", \
878 (c1), (c2)); \
879 return 1; \
880 }
881
882 /* These items are only settable from the full RXON command */
883 CHK(!iwl_is_associated_ctx(ctx));
884 CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr));
885 CHK(!ether_addr_equal(staging->node_addr, active->node_addr));
886 CHK(!ether_addr_equal(staging->wlap_bssid_addr,
887 active->wlap_bssid_addr));
888 CHK_NEQ(staging->dev_type, active->dev_type);
889 CHK_NEQ(staging->channel, active->channel);
890 CHK_NEQ(staging->air_propagation, active->air_propagation);
891 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
892 active->ofdm_ht_single_stream_basic_rates);
893 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
894 active->ofdm_ht_dual_stream_basic_rates);
895 CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
896 active->ofdm_ht_triple_stream_basic_rates);
897 CHK_NEQ(staging->assoc_id, active->assoc_id);
898
899 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
900 * be updated with the RXON_ASSOC command -- however only some
901 * flag transitions are allowed using RXON_ASSOC */
902
903 /* Check if we are not switching bands */
904 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
905 active->flags & RXON_FLG_BAND_24G_MSK);
906
907 /* Check if we are switching association toggle */
908 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
909 active->filter_flags & RXON_FILTER_ASSOC_MSK);
910
911#undef CHK
912#undef CHK_NEQ
913
914 return 0;
915}
916
917#ifdef CONFIG_IWLWIFI_DEBUG
918void iwl_print_rx_config_cmd(struct iwl_priv *priv,
919 enum iwl_rxon_context_id ctxid)
920{
921 struct iwl_rxon_context *ctx = &priv->contexts[ctxid];
922 struct iwl_rxon_cmd *rxon = &ctx->staging;
923
924 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
925 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
926 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
927 le16_to_cpu(rxon->channel));
928 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n",
929 le32_to_cpu(rxon->flags));
930 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
931 le32_to_cpu(rxon->filter_flags));
932 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
933 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
934 rxon->ofdm_basic_rates);
935 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
936 rxon->cck_basic_rates);
937 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
938 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
939 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
940 le16_to_cpu(rxon->assoc_id));
941}
942#endif
943
944static void iwl_calc_basic_rates(struct iwl_priv *priv,
945 struct iwl_rxon_context *ctx)
946{
947 int lowest_present_ofdm = 100;
948 int lowest_present_cck = 100;
949 u8 cck = 0;
950 u8 ofdm = 0;
951
952 if (ctx->vif) {
953 struct ieee80211_supported_band *sband;
954 unsigned long basic = ctx->vif->bss_conf.basic_rates;
955 int i;
956
957 sband = priv->hw->wiphy->bands[priv->hw->conf.channel->band];
958
959 for_each_set_bit(i, &basic, BITS_PER_LONG) {
960 int hw = sband->bitrates[i].hw_value;
961 if (hw >= IWL_FIRST_OFDM_RATE) {
962 ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
963 if (lowest_present_ofdm > hw)
964 lowest_present_ofdm = hw;
965 } else {
966 BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
967
968 cck |= BIT(hw);
969 if (lowest_present_cck > hw)
970 lowest_present_cck = hw;
971 }
972 }
973 }
974
975 /*
976 * Now we've got the basic rates as bitmaps in the ofdm and cck
977 * variables. This isn't sufficient though, as there might not
978 * be all the right rates in the bitmap. E.g. if the only basic
979 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
980 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
981 *
982 * [...] a STA responding to a received frame shall transmit
983 * its Control Response frame [...] at the highest rate in the
984 * BSSBasicRateSet parameter that is less than or equal to the
985 * rate of the immediately previous frame in the frame exchange
986 * sequence ([...]) and that is of the same modulation class
987 * ([...]) as the received frame. If no rate contained in the
988 * BSSBasicRateSet parameter meets these conditions, then the
989 * control frame sent in response to a received frame shall be
990 * transmitted at the highest mandatory rate of the PHY that is
991 * less than or equal to the rate of the received frame, and
992 * that is of the same modulation class as the received frame.
993 *
994 * As a consequence, we need to add all mandatory rates that are
995 * lower than all of the basic rates to these bitmaps.
996 */
997
998 if (IWL_RATE_24M_INDEX < lowest_present_ofdm)
999 ofdm |= IWL_RATE_24M_MASK >> IWL_FIRST_OFDM_RATE;
1000 if (IWL_RATE_12M_INDEX < lowest_present_ofdm)
1001 ofdm |= IWL_RATE_12M_MASK >> IWL_FIRST_OFDM_RATE;
1002 /* 6M already there or needed so always add */
1003 ofdm |= IWL_RATE_6M_MASK >> IWL_FIRST_OFDM_RATE;
1004
1005 /*
1006 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
1007 * Note, however:
1008 * - if no CCK rates are basic, it must be ERP since there must
1009 * be some basic rates at all, so they're OFDM => ERP PHY
1010 * (or we're in 5 GHz, and the cck bitmap will never be used)
1011 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
1012 * - if 5.5M is basic, 1M and 2M are mandatory
1013 * - if 2M is basic, 1M is mandatory
1014 * - if 1M is basic, that's the only valid ACK rate.
1015 * As a consequence, it's not as complicated as it sounds, just add
1016 * any lower rates to the ACK rate bitmap.
1017 */
1018 if (IWL_RATE_11M_INDEX < lowest_present_ofdm)
1019 ofdm |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE;
1020 if (IWL_RATE_5M_INDEX < lowest_present_ofdm)
1021 ofdm |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE;
1022 if (IWL_RATE_2M_INDEX < lowest_present_ofdm)
1023 ofdm |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE;
1024 /* 1M already there or needed so always add */
1025 cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE;
1026
1027 IWL_DEBUG_RATE(priv, "Set basic rates cck:0x%.2x ofdm:0x%.2x\n",
1028 cck, ofdm);
1029
1030 /* "basic_rates" is a misnomer here -- should be called ACK rates */
1031 ctx->staging.cck_basic_rates = cck;
1032 ctx->staging.ofdm_basic_rates = ofdm;
1033}
1034
397/** 1035/**
398 * iwlagn_commit_rxon - commit staging_rxon to hardware 1036 * iwlagn_commit_rxon - commit staging_rxon to hardware
399 * 1037 *
@@ -433,11 +1071,14 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
433 /* always get timestamp with Rx frame */ 1071 /* always get timestamp with Rx frame */
434 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; 1072 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
435 1073
1074 /* recalculate basic rates */
1075 iwl_calc_basic_rates(priv, ctx);
1076
436 /* 1077 /*
437 * force CTS-to-self frames protection if RTS-CTS is not preferred 1078 * force CTS-to-self frames protection if RTS-CTS is not preferred
438 * one aggregation protection method 1079 * one aggregation protection method
439 */ 1080 */
440 if (!hw_params(priv).use_rts_for_aggregation) 1081 if (!priv->hw_params.use_rts_for_aggregation)
441 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; 1082 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
442 1083
443 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || 1084 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
@@ -489,7 +1130,7 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
489 return 0; 1130 return 0;
490 } 1131 }
491 1132
492 iwl_set_rxon_hwcrypto(priv, ctx, !iwlagn_mod_params.sw_crypto); 1133 iwl_set_rxon_hwcrypto(priv, ctx, !iwlwifi_mod_params.sw_crypto);
493 1134
494 IWL_DEBUG_INFO(priv, 1135 IWL_DEBUG_INFO(priv,
495 "Going to commit RXON\n" 1136 "Going to commit RXON\n"
@@ -547,7 +1188,7 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
547 const struct iwl_channel_info *ch_info; 1188 const struct iwl_channel_info *ch_info;
548 int ret = 0; 1189 int ret = 0;
549 1190
550 IWL_DEBUG_MAC80211(priv, "enter: changed %#x", changed); 1191 IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed);
551 1192
552 mutex_lock(&priv->mutex); 1193 mutex_lock(&priv->mutex);
553 1194
@@ -621,13 +1262,6 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
621 } 1262 }
622 1263
623 iwl_update_bcast_stations(priv); 1264 iwl_update_bcast_stations(priv);
624
625 /*
626 * The list of supported rates and rate mask can be different
627 * for each band; since the band may have changed, reset
628 * the rate mask to what mac80211 lists.
629 */
630 iwl_set_rate(priv);
631 } 1265 }
632 1266
633 if (changed & (IEEE80211_CONF_CHANGE_PS | 1267 if (changed & (IEEE80211_CONF_CHANGE_PS |
@@ -656,9 +1290,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
656 return ret; 1290 return ret;
657} 1291}
658 1292
659static void iwlagn_check_needed_chains(struct iwl_priv *priv, 1293void iwlagn_check_needed_chains(struct iwl_priv *priv,
660 struct iwl_rxon_context *ctx, 1294 struct iwl_rxon_context *ctx,
661 struct ieee80211_bss_conf *bss_conf) 1295 struct ieee80211_bss_conf *bss_conf)
662{ 1296{
663 struct ieee80211_vif *vif = ctx->vif; 1297 struct ieee80211_vif *vif = ctx->vif;
664 struct iwl_rxon_context *tmp; 1298 struct iwl_rxon_context *tmp;
@@ -750,11 +1384,14 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
750 ht_conf->single_chain_sufficient = !need_multiple; 1384 ht_conf->single_chain_sufficient = !need_multiple;
751} 1385}
752 1386
753static void iwlagn_chain_noise_reset(struct iwl_priv *priv) 1387void iwlagn_chain_noise_reset(struct iwl_priv *priv)
754{ 1388{
755 struct iwl_chain_noise_data *data = &priv->chain_noise_data; 1389 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
756 int ret; 1390 int ret;
757 1391
1392 if (!(priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED))
1393 return;
1394
758 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && 1395 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
759 iwl_is_any_associated(priv)) { 1396 iwl_is_any_associated(priv)) {
760 struct iwl_calib_chain_noise_reset_cmd cmd; 1397 struct iwl_calib_chain_noise_reset_cmd cmd;
@@ -907,8 +1544,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
907 iwl_power_update_mode(priv, false); 1544 iwl_power_update_mode(priv, false);
908 1545
909 /* Enable RX differential gain and sensitivity calibrations */ 1546 /* Enable RX differential gain and sensitivity calibrations */
910 if (!priv->disable_chain_noise_cal) 1547 iwlagn_chain_noise_reset(priv);
911 iwlagn_chain_noise_reset(priv);
912 priv->start_calib = 1; 1548 priv->start_calib = 1;
913 } 1549 }
914 1550
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index c4175603864b..b31584e87bc7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -30,10 +30,11 @@
30#include <net/mac80211.h> 30#include <net/mac80211.h>
31 31
32#include "iwl-dev.h" 32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-agn.h" 33#include "iwl-agn.h"
35#include "iwl-trans.h" 34#include "iwl-trans.h"
36 35
36const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
37
37static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) 38static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
38{ 39{
39 lockdep_assert_held(&priv->sta_lock); 40 lockdep_assert_held(&priv->sta_lock);
@@ -170,6 +171,50 @@ int iwl_send_add_sta(struct iwl_priv *priv,
170 return cmd.handler_status; 171 return cmd.handler_status;
171} 172}
172 173
174static bool iwl_is_channel_extension(struct iwl_priv *priv,
175 enum ieee80211_band band,
176 u16 channel, u8 extension_chan_offset)
177{
178 const struct iwl_channel_info *ch_info;
179
180 ch_info = iwl_get_channel_info(priv, band, channel);
181 if (!is_channel_valid(ch_info))
182 return false;
183
184 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
185 return !(ch_info->ht40_extension_channel &
186 IEEE80211_CHAN_NO_HT40PLUS);
187 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
188 return !(ch_info->ht40_extension_channel &
189 IEEE80211_CHAN_NO_HT40MINUS);
190
191 return false;
192}
193
194bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
195 struct iwl_rxon_context *ctx,
196 struct ieee80211_sta_ht_cap *ht_cap)
197{
198 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
199 return false;
200
201 /*
202 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
203 * the bit will not set if it is pure 40MHz case
204 */
205 if (ht_cap && !ht_cap->ht_supported)
206 return false;
207
208#ifdef CONFIG_IWLWIFI_DEBUGFS
209 if (priv->disable_ht40)
210 return false;
211#endif
212
213 return iwl_is_channel_extension(priv, priv->band,
214 le16_to_cpu(ctx->staging.channel),
215 ctx->ht.extension_chan_offset);
216}
217
173static void iwl_sta_calc_ht_flags(struct iwl_priv *priv, 218static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
174 struct ieee80211_sta *sta, 219 struct ieee80211_sta *sta,
175 struct iwl_rxon_context *ctx, 220 struct iwl_rxon_context *ctx,
@@ -277,8 +322,8 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
277 sta_id = ctx->bcast_sta_id; 322 sta_id = ctx->bcast_sta_id;
278 else 323 else
279 for (i = IWL_STA_ID; i < IWLAGN_STATION_COUNT; i++) { 324 for (i = IWL_STA_ID; i < IWLAGN_STATION_COUNT; i++) {
280 if (!compare_ether_addr(priv->stations[i].sta.sta.addr, 325 if (ether_addr_equal(priv->stations[i].sta.sta.addr,
281 addr)) { 326 addr)) {
282 sta_id = i; 327 sta_id = i;
283 break; 328 break;
284 } 329 }
@@ -308,7 +353,7 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
308 353
309 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && 354 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
310 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) && 355 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
311 !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) { 356 ether_addr_equal(priv->stations[sta_id].sta.sta.addr, addr)) {
312 IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not " 357 IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not "
313 "adding again.\n", sta_id, addr); 358 "adding again.\n", sta_id, addr);
314 return sta_id; 359 return sta_id;
@@ -581,6 +626,56 @@ void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
581 spin_unlock_bh(&priv->sta_lock); 626 spin_unlock_bh(&priv->sta_lock);
582} 627}
583 628
629static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
630 u8 sta_id, struct iwl_link_quality_cmd *link_cmd)
631{
632 int i, r;
633 u32 rate_flags = 0;
634 __le32 rate_n_flags;
635
636 lockdep_assert_held(&priv->mutex);
637
638 memset(link_cmd, 0, sizeof(*link_cmd));
639
640 /* Set up the rate scaling to start at selected rate, fall back
641 * all the way down to 1M in IEEE order, and then spin on 1M */
642 if (priv->band == IEEE80211_BAND_5GHZ)
643 r = IWL_RATE_6M_INDEX;
644 else if (ctx && ctx->vif && ctx->vif->p2p)
645 r = IWL_RATE_6M_INDEX;
646 else
647 r = IWL_RATE_1M_INDEX;
648
649 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
650 rate_flags |= RATE_MCS_CCK_MSK;
651
652 rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
653 RATE_MCS_ANT_POS;
654 rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
655 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
656 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
657
658 link_cmd->general_params.single_stream_ant_msk =
659 first_antenna(priv->hw_params.valid_tx_ant);
660
661 link_cmd->general_params.dual_stream_ant_msk =
662 priv->hw_params.valid_tx_ant &
663 ~first_antenna(priv->hw_params.valid_tx_ant);
664 if (!link_cmd->general_params.dual_stream_ant_msk) {
665 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
666 } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
667 link_cmd->general_params.dual_stream_ant_msk =
668 priv->hw_params.valid_tx_ant;
669 }
670
671 link_cmd->agg_params.agg_dis_start_th =
672 LINK_QUAL_AGG_DISABLE_START_DEF;
673 link_cmd->agg_params.agg_time_limit =
674 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
675
676 link_cmd->sta_id = sta_id;
677}
678
584/** 679/**
585 * iwl_clear_ucode_stations - clear ucode station table bits 680 * iwl_clear_ucode_stations - clear ucode station table bits
586 * 681 *
@@ -841,56 +936,6 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
841} 936}
842 937
843 938
844void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
845 u8 sta_id, struct iwl_link_quality_cmd *link_cmd)
846{
847 int i, r;
848 u32 rate_flags = 0;
849 __le32 rate_n_flags;
850
851 lockdep_assert_held(&priv->mutex);
852
853 memset(link_cmd, 0, sizeof(*link_cmd));
854
855 /* Set up the rate scaling to start at selected rate, fall back
856 * all the way down to 1M in IEEE order, and then spin on 1M */
857 if (priv->band == IEEE80211_BAND_5GHZ)
858 r = IWL_RATE_6M_INDEX;
859 else if (ctx && ctx->vif && ctx->vif->p2p)
860 r = IWL_RATE_6M_INDEX;
861 else
862 r = IWL_RATE_1M_INDEX;
863
864 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
865 rate_flags |= RATE_MCS_CCK_MSK;
866
867 rate_flags |= first_antenna(hw_params(priv).valid_tx_ant) <<
868 RATE_MCS_ANT_POS;
869 rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
870 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
871 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
872
873 link_cmd->general_params.single_stream_ant_msk =
874 first_antenna(hw_params(priv).valid_tx_ant);
875
876 link_cmd->general_params.dual_stream_ant_msk =
877 hw_params(priv).valid_tx_ant &
878 ~first_antenna(hw_params(priv).valid_tx_ant);
879 if (!link_cmd->general_params.dual_stream_ant_msk) {
880 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
881 } else if (num_of_ant(hw_params(priv).valid_tx_ant) == 2) {
882 link_cmd->general_params.dual_stream_ant_msk =
883 hw_params(priv).valid_tx_ant;
884 }
885
886 link_cmd->agg_params.agg_dis_start_th =
887 LINK_QUAL_AGG_DISABLE_START_DEF;
888 link_cmd->agg_params.agg_time_limit =
889 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
890
891 link_cmd->sta_id = sta_id;
892}
893
894static struct iwl_link_quality_cmd * 939static struct iwl_link_quality_cmd *
895iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 940iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
896 u8 sta_id) 941 u8 sta_id)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
index baaf5ba2fc38..a5cfe0aceedb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
@@ -37,11 +37,11 @@
37#include "iwl-agn.h" 37#include "iwl-agn.h"
38#include "iwl-eeprom.h" 38#include "iwl-eeprom.h"
39#include "iwl-dev.h" 39#include "iwl-dev.h"
40#include "iwl-core.h"
41#include "iwl-io.h" 40#include "iwl-io.h"
42#include "iwl-commands.h" 41#include "iwl-commands.h"
43#include "iwl-debug.h" 42#include "iwl-debug.h"
44#include "iwl-agn-tt.h" 43#include "iwl-agn-tt.h"
44#include "iwl-modparams.h"
45 45
46/* default Thermal Throttling transaction table 46/* default Thermal Throttling transaction table
47 * Current state | Throttling Down | Throttling Up 47 * Current state | Throttling Down | Throttling Up
@@ -179,19 +179,19 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data)
179 179
180 if (tt->state == IWL_TI_CT_KILL) { 180 if (tt->state == IWL_TI_CT_KILL) {
181 if (priv->thermal_throttle.ct_kill_toggle) { 181 if (priv->thermal_throttle.ct_kill_toggle) {
182 iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR, 182 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
183 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); 183 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
184 priv->thermal_throttle.ct_kill_toggle = false; 184 priv->thermal_throttle.ct_kill_toggle = false;
185 } else { 185 } else {
186 iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_SET, 186 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
187 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); 187 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
188 priv->thermal_throttle.ct_kill_toggle = true; 188 priv->thermal_throttle.ct_kill_toggle = true;
189 } 189 }
190 iwl_read32(trans(priv), CSR_UCODE_DRV_GP1); 190 iwl_read32(priv->trans, CSR_UCODE_DRV_GP1);
191 spin_lock_irqsave(&trans(priv)->reg_lock, flags); 191 spin_lock_irqsave(&priv->trans->reg_lock, flags);
192 if (likely(iwl_grab_nic_access(trans(priv)))) 192 if (likely(iwl_grab_nic_access(priv->trans)))
193 iwl_release_nic_access(trans(priv)); 193 iwl_release_nic_access(priv->trans);
194 spin_unlock_irqrestore(&trans(priv)->reg_lock, flags); 194 spin_unlock_irqrestore(&priv->trans->reg_lock, flags);
195 195
196 /* Reschedule the ct_kill timer to occur in 196 /* Reschedule the ct_kill timer to occur in
197 * CT_KILL_EXIT_DURATION seconds to ensure we get a 197 * CT_KILL_EXIT_DURATION seconds to ensure we get a
@@ -632,7 +632,7 @@ void iwl_tt_initialize(struct iwl_priv *priv)
632 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter); 632 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
633 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit); 633 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
634 634
635 if (cfg(priv)->base_params->adv_thermal_throttle) { 635 if (priv->cfg->base_params->adv_thermal_throttle) {
636 IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n"); 636 IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n");
637 tt->restriction = kcalloc(IWL_TI_STATE_MAX, 637 tt->restriction = kcalloc(IWL_TI_STATE_MAX,
638 sizeof(struct iwl_tt_restriction), 638 sizeof(struct iwl_tt_restriction),
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 34adedc74d35..f2e9f298a947 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -34,12 +34,22 @@
34#include <linux/ieee80211.h> 34#include <linux/ieee80211.h>
35 35
36#include "iwl-dev.h" 36#include "iwl-dev.h"
37#include "iwl-core.h"
38#include "iwl-io.h" 37#include "iwl-io.h"
39#include "iwl-agn-hw.h" 38#include "iwl-agn-hw.h"
40#include "iwl-agn.h" 39#include "iwl-agn.h"
41#include "iwl-trans.h" 40#include "iwl-trans.h"
42 41
42static const u8 tid_to_ac[] = {
43 IEEE80211_AC_BE,
44 IEEE80211_AC_BK,
45 IEEE80211_AC_BK,
46 IEEE80211_AC_BE,
47 IEEE80211_AC_VI,
48 IEEE80211_AC_VI,
49 IEEE80211_AC_VO,
50 IEEE80211_AC_VO,
51};
52
43static void iwlagn_tx_cmd_protection(struct iwl_priv *priv, 53static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
44 struct ieee80211_tx_info *info, 54 struct ieee80211_tx_info *info,
45 __le16 fc, __le32 *tx_flags) 55 __le16 fc, __le32 *tx_flags)
@@ -74,8 +84,8 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
74 else if (ieee80211_is_back_req(fc)) 84 else if (ieee80211_is_back_req(fc))
75 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; 85 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
76 else if (info->band == IEEE80211_BAND_2GHZ && 86 else if (info->band == IEEE80211_BAND_2GHZ &&
77 cfg(priv)->bt_params && 87 priv->cfg->bt_params &&
78 cfg(priv)->bt_params->advanced_bt_coexist && 88 priv->cfg->bt_params->advanced_bt_coexist &&
79 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) || 89 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
80 ieee80211_is_reassoc_req(fc) || 90 ieee80211_is_reassoc_req(fc) ||
81 skb->protocol == cpu_to_be16(ETH_P_PAE))) 91 skb->protocol == cpu_to_be16(ETH_P_PAE)))
@@ -192,15 +202,15 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
192 rate_flags |= RATE_MCS_CCK_MSK; 202 rate_flags |= RATE_MCS_CCK_MSK;
193 203
194 /* Set up antennas */ 204 /* Set up antennas */
195 if (cfg(priv)->bt_params && 205 if (priv->cfg->bt_params &&
196 cfg(priv)->bt_params->advanced_bt_coexist && 206 priv->cfg->bt_params->advanced_bt_coexist &&
197 priv->bt_full_concurrent) { 207 priv->bt_full_concurrent) {
198 /* operated as 1x1 in full concurrency mode */ 208 /* operated as 1x1 in full concurrency mode */
199 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 209 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
200 first_antenna(hw_params(priv).valid_tx_ant)); 210 first_antenna(priv->hw_params.valid_tx_ant));
201 } else 211 } else
202 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 212 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
203 hw_params(priv).valid_tx_ant); 213 priv->hw_params.valid_tx_ant);
204 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 214 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
205 215
206 /* Set the rate in the TX cmd */ 216 /* Set the rate in the TX cmd */
@@ -293,6 +303,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
293 u16 len, seq_number = 0; 303 u16 len, seq_number = 0;
294 u8 sta_id, tid = IWL_MAX_TID_COUNT; 304 u8 sta_id, tid = IWL_MAX_TID_COUNT;
295 bool is_agg = false; 305 bool is_agg = false;
306 int txq_id;
296 307
297 if (info->control.vif) 308 if (info->control.vif)
298 ctx = iwl_rxon_ctx_from_vif(info->control.vif); 309 ctx = iwl_rxon_ctx_from_vif(info->control.vif);
@@ -384,12 +395,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
384 395
385 /* TODO need this for burst mode later on */ 396 /* TODO need this for burst mode later on */
386 iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id); 397 iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
387 iwl_dbg_log_tx_data_frame(priv, len, hdr);
388 398
389 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc); 399 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
390 400
391 iwl_update_stats(priv, true, fc, len);
392
393 memset(&info->status, 0, sizeof(info->status)); 401 memset(&info->status, 0, sizeof(info->status));
394 402
395 info->driver_data[0] = ctx; 403 info->driver_data[0] = ctx;
@@ -435,7 +443,31 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
435 /* Copy MAC header from skb into command buffer */ 443 /* Copy MAC header from skb into command buffer */
436 memcpy(tx_cmd->hdr, hdr, hdr_len); 444 memcpy(tx_cmd->hdr, hdr, hdr_len);
437 445
438 if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id, tid)) 446 if (is_agg)
447 txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
448 else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
449 /*
450 * Send this frame after DTIM -- there's a special queue
451 * reserved for this for contexts that support AP mode.
452 */
453 txq_id = ctx->mcast_queue;
454
455 /*
456 * The microcode will clear the more data
457 * bit in the last frame it transmits.
458 */
459 hdr->frame_control |=
460 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
461 } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
462 txq_id = IWL_AUX_QUEUE;
463 else
464 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
465
466 WARN_ON_ONCE(!is_agg && txq_id != info->hw_queue);
467 WARN_ON_ONCE(is_agg &&
468 priv->queue_to_mac80211[txq_id] != info->hw_queue);
469
470 if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id))
439 goto drop_unlock_sta; 471 goto drop_unlock_sta;
440 472
441 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) && 473 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) &&
@@ -464,11 +496,33 @@ drop_unlock_priv:
464 return -1; 496 return -1;
465} 497}
466 498
499static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq)
500{
501 int q;
502
503 for (q = IWLAGN_FIRST_AMPDU_QUEUE;
504 q < priv->cfg->base_params->num_of_queues; q++) {
505 if (!test_and_set_bit(q, priv->agg_q_alloc)) {
506 priv->queue_to_mac80211[q] = mq;
507 return q;
508 }
509 }
510
511 return -ENOSPC;
512}
513
514static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q)
515{
516 clear_bit(q, priv->agg_q_alloc);
517 priv->queue_to_mac80211[q] = IWL_INVALID_MAC80211_QUEUE;
518}
519
467int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, 520int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
468 struct ieee80211_sta *sta, u16 tid) 521 struct ieee80211_sta *sta, u16 tid)
469{ 522{
470 struct iwl_tid_data *tid_data; 523 struct iwl_tid_data *tid_data;
471 int sta_id; 524 int sta_id, txq_id;
525 enum iwl_agg_state agg_state;
472 526
473 sta_id = iwl_sta_id(sta); 527 sta_id = iwl_sta_id(sta);
474 528
@@ -480,6 +534,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
480 spin_lock_bh(&priv->sta_lock); 534 spin_lock_bh(&priv->sta_lock);
481 535
482 tid_data = &priv->tid_data[sta_id][tid]; 536 tid_data = &priv->tid_data[sta_id][tid];
537 txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
483 538
484 switch (priv->tid_data[sta_id][tid].agg.state) { 539 switch (priv->tid_data[sta_id][tid].agg.state) {
485 case IWL_EMPTYING_HW_QUEUE_ADDBA: 540 case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@ -491,6 +546,13 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
491 */ 546 */
492 IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); 547 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
493 goto turn_off; 548 goto turn_off;
549 case IWL_AGG_STARTING:
550 /*
551 * This can happen when the session is stopped before
552 * we receive ADDBA response
553 */
554 IWL_DEBUG_HT(priv, "AGG stop before AGG became operational\n");
555 goto turn_off;
494 case IWL_AGG_ON: 556 case IWL_AGG_ON:
495 break; 557 break;
496 default: 558 default:
@@ -504,9 +566,13 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
504 tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); 566 tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
505 567
506 /* There are still packets for this RA / TID in the HW */ 568 /* There are still packets for this RA / TID in the HW */
507 if (tid_data->agg.ssn != tid_data->next_reclaimed) { 569 if (!test_bit(txq_id, priv->agg_q_alloc)) {
570 IWL_DEBUG_TX_QUEUES(priv,
571 "stopping AGG on STA/TID %d/%d but hwq %d not used\n",
572 sta_id, tid, txq_id);
573 } else if (tid_data->agg.ssn != tid_data->next_reclaimed) {
508 IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, " 574 IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
509 "next_recl = %d", 575 "next_recl = %d\n",
510 tid_data->agg.ssn, 576 tid_data->agg.ssn,
511 tid_data->next_reclaimed); 577 tid_data->next_reclaimed);
512 priv->tid_data[sta_id][tid].agg.state = 578 priv->tid_data[sta_id][tid].agg.state =
@@ -515,14 +581,22 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
515 return 0; 581 return 0;
516 } 582 }
517 583
518 IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d", 584 IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
519 tid_data->agg.ssn); 585 tid_data->agg.ssn);
520turn_off: 586turn_off:
587 agg_state = priv->tid_data[sta_id][tid].agg.state;
521 priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF; 588 priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
522 589
523 spin_unlock_bh(&priv->sta_lock); 590 spin_unlock_bh(&priv->sta_lock);
524 591
525 iwl_trans_tx_agg_disable(trans(priv), sta_id, tid); 592 if (test_bit(txq_id, priv->agg_q_alloc)) {
593 /* If the transport didn't know that we wanted to start
594 * agreggation, don't tell it that we want to stop them
595 */
596 if (agg_state != IWL_AGG_STARTING)
597 iwl_trans_tx_agg_disable(priv->trans, txq_id);
598 iwlagn_dealloc_agg_txq(priv, txq_id);
599 }
526 600
527 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 601 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
528 602
@@ -532,9 +606,9 @@ turn_off:
532int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, 606int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
533 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 607 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
534{ 608{
609 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
535 struct iwl_tid_data *tid_data; 610 struct iwl_tid_data *tid_data;
536 int sta_id; 611 int sta_id, txq_id, ret;
537 int ret;
538 612
539 IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n", 613 IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
540 sta->addr, tid); 614 sta->addr, tid);
@@ -552,36 +626,37 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
552 return -ENXIO; 626 return -ENXIO;
553 } 627 }
554 628
629 txq_id = iwlagn_alloc_agg_txq(priv, ctx->ac_to_queue[tid_to_ac[tid]]);
630 if (txq_id < 0) {
631 IWL_DEBUG_TX_QUEUES(priv,
632 "No free aggregation queue for %pM/%d\n",
633 sta->addr, tid);
634 return txq_id;
635 }
636
555 ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); 637 ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
556 if (ret) 638 if (ret)
557 return ret; 639 return ret;
558 640
559 spin_lock_bh(&priv->sta_lock); 641 spin_lock_bh(&priv->sta_lock);
560
561 tid_data = &priv->tid_data[sta_id][tid]; 642 tid_data = &priv->tid_data[sta_id][tid];
562 tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); 643 tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
644 tid_data->agg.txq_id = txq_id;
563 645
564 *ssn = tid_data->agg.ssn; 646 *ssn = tid_data->agg.ssn;
565 647
566 ret = iwl_trans_tx_agg_alloc(trans(priv), sta_id, tid);
567 if (ret) {
568 spin_unlock_bh(&priv->sta_lock);
569 return ret;
570 }
571
572 if (*ssn == tid_data->next_reclaimed) { 648 if (*ssn == tid_data->next_reclaimed) {
573 IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d", 649 IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
574 tid_data->agg.ssn); 650 tid_data->agg.ssn);
575 tid_data->agg.state = IWL_AGG_ON; 651 tid_data->agg.state = IWL_AGG_STARTING;
576 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 652 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
577 } else { 653 } else {
578 IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, " 654 IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
579 "next_reclaimed = %d", 655 "next_reclaimed = %d\n",
580 tid_data->agg.ssn, 656 tid_data->agg.ssn,
581 tid_data->next_reclaimed); 657 tid_data->next_reclaimed);
582 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; 658 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
583 } 659 }
584
585 spin_unlock_bh(&priv->sta_lock); 660 spin_unlock_bh(&priv->sta_lock);
586 661
587 return ret; 662 return ret;
@@ -592,15 +667,21 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
592{ 667{
593 struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; 668 struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
594 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); 669 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
670 int q, fifo;
595 u16 ssn; 671 u16 ssn;
596 672
597 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); 673 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
598 674
599 spin_lock_bh(&priv->sta_lock); 675 spin_lock_bh(&priv->sta_lock);
600 ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn; 676 ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
677 q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id;
678 priv->tid_data[sta_priv->sta_id][tid].agg.state = IWL_AGG_ON;
601 spin_unlock_bh(&priv->sta_lock); 679 spin_unlock_bh(&priv->sta_lock);
602 680
603 iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, sta_priv->sta_id, tid, 681 fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
682
683 iwl_trans_tx_agg_setup(priv->trans, q, fifo,
684 sta_priv->sta_id, tid,
604 buf_size, ssn); 685 buf_size, ssn);
605 686
606 /* 687 /*
@@ -623,7 +704,7 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
623 sta_priv->max_agg_bufsize = 704 sta_priv->max_agg_bufsize =
624 min(sta_priv->max_agg_bufsize, buf_size); 705 min(sta_priv->max_agg_bufsize, buf_size);
625 706
626 if (hw_params(priv).use_rts_for_aggregation) { 707 if (priv->hw_params.use_rts_for_aggregation) {
627 /* 708 /*
628 * switch to RTS/CTS if it is the prefer protection 709 * switch to RTS/CTS if it is the prefer protection
629 * method for HT traffic 710 * method for HT traffic
@@ -666,7 +747,9 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
666 IWL_DEBUG_TX_QUEUES(priv, 747 IWL_DEBUG_TX_QUEUES(priv,
667 "Can continue DELBA flow ssn = next_recl =" 748 "Can continue DELBA flow ssn = next_recl ="
668 " %d", tid_data->next_reclaimed); 749 " %d", tid_data->next_reclaimed);
669 iwl_trans_tx_agg_disable(trans(priv), sta_id, tid); 750 iwl_trans_tx_agg_disable(priv->trans,
751 tid_data->agg.txq_id);
752 iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
670 tid_data->agg.state = IWL_AGG_OFF; 753 tid_data->agg.state = IWL_AGG_OFF;
671 ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid); 754 ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
672 } 755 }
@@ -677,7 +760,7 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
677 IWL_DEBUG_TX_QUEUES(priv, 760 IWL_DEBUG_TX_QUEUES(priv,
678 "Can continue ADDBA flow ssn = next_recl =" 761 "Can continue ADDBA flow ssn = next_recl ="
679 " %d", tid_data->next_reclaimed); 762 " %d", tid_data->next_reclaimed);
680 tid_data->agg.state = IWL_AGG_ON; 763 tid_data->agg.state = IWL_AGG_STARTING;
681 ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid); 764 ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
682 } 765 }
683 break; 766 break;
@@ -711,9 +794,9 @@ static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
711static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, 794static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
712 struct ieee80211_tx_info *info) 795 struct ieee80211_tx_info *info)
713{ 796{
714 struct ieee80211_tx_rate *r = &info->control.rates[0]; 797 struct ieee80211_tx_rate *r = &info->status.rates[0];
715 798
716 info->antenna_sel_tx = 799 info->status.antenna =
717 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); 800 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
718 if (rate_n_flags & RATE_MCS_HT_MSK) 801 if (rate_n_flags & RATE_MCS_HT_MSK)
719 r->flags |= IEEE80211_TX_RC_MCS; 802 r->flags |= IEEE80211_TX_RC_MCS;
@@ -841,8 +924,8 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
841 * notification again. 924 * notification again.
842 */ 925 */
843 if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 && 926 if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
844 cfg(priv)->bt_params && 927 priv->cfg->bt_params &&
845 cfg(priv)->bt_params->advanced_bt_coexist) { 928 priv->cfg->bt_params->advanced_bt_coexist) {
846 IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n"); 929 IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
847 } 930 }
848 931
@@ -1005,6 +1088,29 @@ static void iwl_check_abort_status(struct iwl_priv *priv,
1005 } 1088 }
1006} 1089}
1007 1090
1091static int iwl_reclaim(struct iwl_priv *priv, int sta_id, int tid,
1092 int txq_id, int ssn, struct sk_buff_head *skbs)
1093{
1094 if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
1095 tid != IWL_TID_NON_QOS &&
1096 txq_id != priv->tid_data[sta_id][tid].agg.txq_id)) {
1097 /*
1098 * FIXME: this is a uCode bug which need to be addressed,
1099 * log the information and return for now.
1100 * Since it is can possibly happen very often and in order
1101 * not to fill the syslog, don't use IWL_ERR or IWL_WARN
1102 */
1103 IWL_DEBUG_TX_QUEUES(priv,
1104 "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
1105 txq_id, sta_id, tid,
1106 priv->tid_data[sta_id][tid].agg.txq_id);
1107 return 1;
1108 }
1109
1110 iwl_trans_reclaim(priv->trans, txq_id, ssn, skbs);
1111 return 0;
1112}
1113
1008int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, 1114int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1009 struct iwl_device_cmd *cmd) 1115 struct iwl_device_cmd *cmd)
1010{ 1116{
@@ -1059,13 +1165,12 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1059 if (tid != IWL_TID_NON_QOS) { 1165 if (tid != IWL_TID_NON_QOS) {
1060 priv->tid_data[sta_id][tid].next_reclaimed = 1166 priv->tid_data[sta_id][tid].next_reclaimed =
1061 next_reclaimed; 1167 next_reclaimed;
1062 IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d", 1168 IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
1063 next_reclaimed); 1169 next_reclaimed);
1064 } 1170 }
1065 1171
1066 /*we can free until ssn % q.n_bd not inclusive */ 1172 /*we can free until ssn % q.n_bd not inclusive */
1067 WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid, 1173 WARN_ON(iwl_reclaim(priv, sta_id, tid, txq_id, ssn, &skbs));
1068 txq_id, ssn, &skbs));
1069 iwlagn_check_ratid_empty(priv, sta_id, tid); 1174 iwlagn_check_ratid_empty(priv, sta_id, tid);
1070 freed = 0; 1175 freed = 0;
1071 1176
@@ -1159,7 +1264,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1159 * (in Tx queue's circular buffer) of first TFD/frame in window */ 1264 * (in Tx queue's circular buffer) of first TFD/frame in window */
1160 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); 1265 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1161 1266
1162 if (scd_flow >= cfg(priv)->base_params->num_of_queues) { 1267 if (scd_flow >= priv->cfg->base_params->num_of_queues) {
1163 IWL_ERR(priv, 1268 IWL_ERR(priv,
1164 "BUG_ON scd_flow is bigger than number of queues\n"); 1269 "BUG_ON scd_flow is bigger than number of queues\n");
1165 return 0; 1270 return 0;
@@ -1183,8 +1288,8 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1183 /* Release all TFDs before the SSN, i.e. all TFDs in front of 1288 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1184 * block-ack window (we assume that they've been successfully 1289 * block-ack window (we assume that they've been successfully
1185 * transmitted ... if not, it's too late anyway). */ 1290 * transmitted ... if not, it's too late anyway). */
1186 if (iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow, 1291 if (iwl_reclaim(priv, sta_id, tid, scd_flow,
1187 ba_resp_scd_ssn, &reclaimed_skbs)) { 1292 ba_resp_scd_ssn, &reclaimed_skbs)) {
1188 spin_unlock(&priv->sta_lock); 1293 spin_unlock(&priv->sta_lock);
1189 return 0; 1294 return 0;
1190 } 1295 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index f1226dbf789d..8d7637083fcf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -26,6 +26,9 @@
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
29#include <linux/kernel.h> 32#include <linux/kernel.h>
30#include <linux/module.h> 33#include <linux/module.h>
31#include <linux/init.h> 34#include <linux/init.h>
@@ -43,13 +46,13 @@
43 46
44#include "iwl-eeprom.h" 47#include "iwl-eeprom.h"
45#include "iwl-dev.h" 48#include "iwl-dev.h"
46#include "iwl-core.h"
47#include "iwl-io.h" 49#include "iwl-io.h"
48#include "iwl-agn-calib.h" 50#include "iwl-agn-calib.h"
49#include "iwl-agn.h" 51#include "iwl-agn.h"
50#include "iwl-shared.h"
51#include "iwl-trans.h" 52#include "iwl-trans.h"
52#include "iwl-op-mode.h" 53#include "iwl-op-mode.h"
54#include "iwl-drv.h"
55#include "iwl-modparams.h"
53 56
54/****************************************************************************** 57/******************************************************************************
55 * 58 *
@@ -177,7 +180,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
177 rate = info->control.rates[0].idx; 180 rate = info->control.rates[0].idx;
178 181
179 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 182 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
180 hw_params(priv).valid_tx_ant); 183 priv->hw_params.valid_tx_ant);
181 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 184 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
182 185
183 /* In mac80211, rates for 5 GHz start at 0 */ 186 /* In mac80211, rates for 5 GHz start at 0 */
@@ -286,6 +289,25 @@ out:
286 mutex_unlock(&priv->mutex); 289 mutex_unlock(&priv->mutex);
287} 290}
288 291
292int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
293{
294 struct iwl_statistics_cmd statistics_cmd = {
295 .configuration_flags =
296 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
297 };
298
299 if (flags & CMD_ASYNC)
300 return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
301 CMD_ASYNC,
302 sizeof(struct iwl_statistics_cmd),
303 &statistics_cmd);
304 else
305 return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
306 CMD_SYNC,
307 sizeof(struct iwl_statistics_cmd),
308 &statistics_cmd);
309}
310
289/** 311/**
290 * iwl_bg_statistics_periodic - Timer callback to queue statistics 312 * iwl_bg_statistics_periodic - Timer callback to queue statistics
291 * 313 *
@@ -326,14 +348,14 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
326 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32)); 348 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
327 349
328 /* Make sure device is powered up for SRAM reads */ 350 /* Make sure device is powered up for SRAM reads */
329 spin_lock_irqsave(&trans(priv)->reg_lock, reg_flags); 351 spin_lock_irqsave(&priv->trans->reg_lock, reg_flags);
330 if (unlikely(!iwl_grab_nic_access(trans(priv)))) { 352 if (unlikely(!iwl_grab_nic_access(priv->trans))) {
331 spin_unlock_irqrestore(&trans(priv)->reg_lock, reg_flags); 353 spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags);
332 return; 354 return;
333 } 355 }
334 356
335 /* Set starting address; reads will auto-increment */ 357 /* Set starting address; reads will auto-increment */
336 iwl_write32(trans(priv), HBUS_TARG_MEM_RADDR, ptr); 358 iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, ptr);
337 359
338 /* 360 /*
339 * Refuse to read more than would have fit into the log from 361 * Refuse to read more than would have fit into the log from
@@ -349,20 +371,20 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
349 * place event id # at far right for easier visual parsing. 371 * place event id # at far right for easier visual parsing.
350 */ 372 */
351 for (i = 0; i < num_events; i++) { 373 for (i = 0; i < num_events; i++) {
352 ev = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT); 374 ev = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
353 time = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT); 375 time = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
354 if (mode == 0) { 376 if (mode == 0) {
355 trace_iwlwifi_dev_ucode_cont_event( 377 trace_iwlwifi_dev_ucode_cont_event(
356 trans(priv)->dev, 0, time, ev); 378 priv->trans->dev, 0, time, ev);
357 } else { 379 } else {
358 data = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT); 380 data = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
359 trace_iwlwifi_dev_ucode_cont_event( 381 trace_iwlwifi_dev_ucode_cont_event(
360 trans(priv)->dev, time, data, ev); 382 priv->trans->dev, time, data, ev);
361 } 383 }
362 } 384 }
363 /* Allow device to power down */ 385 /* Allow device to power down */
364 iwl_release_nic_access(trans(priv)); 386 iwl_release_nic_access(priv->trans);
365 spin_unlock_irqrestore(&trans(priv)->reg_lock, reg_flags); 387 spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags);
366} 388}
367 389
368static void iwl_continuous_event_trace(struct iwl_priv *priv) 390static void iwl_continuous_event_trace(struct iwl_priv *priv)
@@ -379,10 +401,9 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
379 u32 num_wraps; /* # times uCode wrapped to top of log */ 401 u32 num_wraps; /* # times uCode wrapped to top of log */
380 u32 next_entry; /* index of next entry to be written by uCode */ 402 u32 next_entry; /* index of next entry to be written by uCode */
381 403
382 base = priv->shrd->device_pointers.log_event_table; 404 base = priv->device_pointers.log_event_table;
383 if (iwlagn_hw_valid_rtc_data_addr(base)) { 405 if (iwlagn_hw_valid_rtc_data_addr(base)) {
384 iwl_read_targ_mem_words(trans(priv), base, &read, sizeof(read)); 406 iwl_read_targ_mem_words(priv->trans, base, &read, sizeof(read));
385
386 capacity = read.capacity; 407 capacity = read.capacity;
387 mode = read.mode; 408 mode = read.mode;
388 num_wraps = read.wrap_counter; 409 num_wraps = read.wrap_counter;
@@ -422,7 +443,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
422 else 443 else
423 priv->event_log.wraps_once_count++; 444 priv->event_log.wraps_once_count++;
424 445
425 trace_iwlwifi_dev_ucode_wrap_event(trans(priv)->dev, 446 trace_iwlwifi_dev_ucode_wrap_event(priv->trans->dev,
426 num_wraps - priv->event_log.num_wraps, 447 num_wraps - priv->event_log.num_wraps,
427 next_entry, priv->event_log.next_entry); 448 next_entry, priv->event_log.next_entry);
428 449
@@ -488,7 +509,76 @@ static void iwl_bg_tx_flush(struct work_struct *work)
488 iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL); 509 iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
489} 510}
490 511
491static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags) 512/*
513 * queue/FIFO/AC mapping definitions
514 */
515
516#define IWL_TX_FIFO_BK 0 /* shared */
517#define IWL_TX_FIFO_BE 1
518#define IWL_TX_FIFO_VI 2 /* shared */
519#define IWL_TX_FIFO_VO 3
520#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
521#define IWL_TX_FIFO_BE_IPAN 4
522#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
523#define IWL_TX_FIFO_VO_IPAN 5
524/* re-uses the VO FIFO, uCode will properly flush/schedule */
525#define IWL_TX_FIFO_AUX 5
526#define IWL_TX_FIFO_UNUSED -1
527
528#define IWLAGN_CMD_FIFO_NUM 7
529
530/*
531 * This queue number is required for proper operation
532 * because the ucode will stop/start the scheduler as
533 * required.
534 */
535#define IWL_IPAN_MCAST_QUEUE 8
536
537static const u8 iwlagn_default_queue_to_tx_fifo[] = {
538 IWL_TX_FIFO_VO,
539 IWL_TX_FIFO_VI,
540 IWL_TX_FIFO_BE,
541 IWL_TX_FIFO_BK,
542 IWLAGN_CMD_FIFO_NUM,
543};
544
545static const u8 iwlagn_ipan_queue_to_tx_fifo[] = {
546 IWL_TX_FIFO_VO,
547 IWL_TX_FIFO_VI,
548 IWL_TX_FIFO_BE,
549 IWL_TX_FIFO_BK,
550 IWL_TX_FIFO_BK_IPAN,
551 IWL_TX_FIFO_BE_IPAN,
552 IWL_TX_FIFO_VI_IPAN,
553 IWL_TX_FIFO_VO_IPAN,
554 IWL_TX_FIFO_BE_IPAN,
555 IWLAGN_CMD_FIFO_NUM,
556 IWL_TX_FIFO_AUX,
557};
558
559static const u8 iwlagn_bss_ac_to_fifo[] = {
560 IWL_TX_FIFO_VO,
561 IWL_TX_FIFO_VI,
562 IWL_TX_FIFO_BE,
563 IWL_TX_FIFO_BK,
564};
565
566static const u8 iwlagn_bss_ac_to_queue[] = {
567 0, 1, 2, 3,
568};
569
570static const u8 iwlagn_pan_ac_to_fifo[] = {
571 IWL_TX_FIFO_VO_IPAN,
572 IWL_TX_FIFO_VI_IPAN,
573 IWL_TX_FIFO_BE_IPAN,
574 IWL_TX_FIFO_BK_IPAN,
575};
576
577static const u8 iwlagn_pan_ac_to_queue[] = {
578 7, 6, 5, 4,
579};
580
581void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
492{ 582{
493 int i; 583 int i;
494 584
@@ -496,9 +586,9 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
496 * The default context is always valid, 586 * The default context is always valid,
497 * the PAN context depends on uCode. 587 * the PAN context depends on uCode.
498 */ 588 */
499 priv->shrd->valid_contexts = BIT(IWL_RXON_CTX_BSS); 589 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
500 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) 590 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN)
501 priv->shrd->valid_contexts |= BIT(IWL_RXON_CTX_PAN); 591 priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
502 592
503 for (i = 0; i < NUM_IWL_RXON_CTX; i++) 593 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
504 priv->contexts[i].ctxid = i; 594 priv->contexts[i].ctxid = i;
@@ -520,6 +610,10 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
520 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS; 610 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
521 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS; 611 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
522 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS; 612 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
613 memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue,
614 iwlagn_bss_ac_to_queue, sizeof(iwlagn_bss_ac_to_queue));
615 memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo,
616 iwlagn_bss_ac_to_fifo, sizeof(iwlagn_bss_ac_to_fifo));
523 617
524 priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON; 618 priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON;
525 priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd = 619 priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd =
@@ -542,26 +636,31 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
542 priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP; 636 priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
543 priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA; 637 priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
544 priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P; 638 priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
639 memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_queue,
640 iwlagn_pan_ac_to_queue, sizeof(iwlagn_pan_ac_to_queue));
641 memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_fifo,
642 iwlagn_pan_ac_to_fifo, sizeof(iwlagn_pan_ac_to_fifo));
643 priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
545 644
546 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); 645 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
547} 646}
548 647
549static void iwl_rf_kill_ct_config(struct iwl_priv *priv) 648void iwl_rf_kill_ct_config(struct iwl_priv *priv)
550{ 649{
551 struct iwl_ct_kill_config cmd; 650 struct iwl_ct_kill_config cmd;
552 struct iwl_ct_kill_throttling_config adv_cmd; 651 struct iwl_ct_kill_throttling_config adv_cmd;
553 int ret = 0; 652 int ret = 0;
554 653
555 iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR, 654 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
556 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); 655 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
557 656
558 priv->thermal_throttle.ct_kill_toggle = false; 657 priv->thermal_throttle.ct_kill_toggle = false;
559 658
560 if (cfg(priv)->base_params->support_ct_kill_exit) { 659 if (priv->cfg->base_params->support_ct_kill_exit) {
561 adv_cmd.critical_temperature_enter = 660 adv_cmd.critical_temperature_enter =
562 cpu_to_le32(hw_params(priv).ct_kill_threshold); 661 cpu_to_le32(priv->hw_params.ct_kill_threshold);
563 adv_cmd.critical_temperature_exit = 662 adv_cmd.critical_temperature_exit =
564 cpu_to_le32(hw_params(priv).ct_kill_exit_threshold); 663 cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
565 664
566 ret = iwl_dvm_send_cmd_pdu(priv, 665 ret = iwl_dvm_send_cmd_pdu(priv,
567 REPLY_CT_KILL_CONFIG_CMD, 666 REPLY_CT_KILL_CONFIG_CMD,
@@ -572,11 +671,11 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
572 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD " 671 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
573 "succeeded, critical temperature enter is %d," 672 "succeeded, critical temperature enter is %d,"
574 "exit is %d\n", 673 "exit is %d\n",
575 hw_params(priv).ct_kill_threshold, 674 priv->hw_params.ct_kill_threshold,
576 hw_params(priv).ct_kill_exit_threshold); 675 priv->hw_params.ct_kill_exit_threshold);
577 } else { 676 } else {
578 cmd.critical_temperature_R = 677 cmd.critical_temperature_R =
579 cpu_to_le32(hw_params(priv).ct_kill_threshold); 678 cpu_to_le32(priv->hw_params.ct_kill_threshold);
580 679
581 ret = iwl_dvm_send_cmd_pdu(priv, 680 ret = iwl_dvm_send_cmd_pdu(priv,
582 REPLY_CT_KILL_CONFIG_CMD, 681 REPLY_CT_KILL_CONFIG_CMD,
@@ -587,7 +686,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
587 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD " 686 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
588 "succeeded, " 687 "succeeded, "
589 "critical temperature is %d\n", 688 "critical temperature is %d\n",
590 hw_params(priv).ct_kill_threshold); 689 priv->hw_params.ct_kill_threshold);
591 } 690 }
592} 691}
593 692
@@ -627,6 +726,29 @@ static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
627 } 726 }
628} 727}
629 728
729void iwl_send_bt_config(struct iwl_priv *priv)
730{
731 struct iwl_bt_cmd bt_cmd = {
732 .lead_time = BT_LEAD_TIME_DEF,
733 .max_kill = BT_MAX_KILL_DEF,
734 .kill_ack_mask = 0,
735 .kill_cts_mask = 0,
736 };
737
738 if (!iwlwifi_mod_params.bt_coex_active)
739 bt_cmd.flags = BT_COEX_DISABLE;
740 else
741 bt_cmd.flags = BT_COEX_ENABLE;
742
743 priv->bt_enable_flag = bt_cmd.flags;
744 IWL_DEBUG_INFO(priv, "BT coex %s\n",
745 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
746
747 if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
748 CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd))
749 IWL_ERR(priv, "failed to send BT Coex Config\n");
750}
751
630/** 752/**
631 * iwl_alive_start - called after REPLY_ALIVE notification received 753 * iwl_alive_start - called after REPLY_ALIVE notification received
632 * from protocol/runtime uCode (initialization uCode's 754 * from protocol/runtime uCode (initialization uCode's
@@ -642,9 +764,6 @@ int iwl_alive_start(struct iwl_priv *priv)
642 /* After the ALIVE response, we can send host commands to the uCode */ 764 /* After the ALIVE response, we can send host commands to the uCode */
643 set_bit(STATUS_ALIVE, &priv->status); 765 set_bit(STATUS_ALIVE, &priv->status);
644 766
645 /* Enable watchdog to monitor the driver tx queues */
646 iwl_setup_watchdog(priv);
647
648 if (iwl_is_rfkill(priv)) 767 if (iwl_is_rfkill(priv))
649 return -ERFKILL; 768 return -ERFKILL;
650 769
@@ -654,10 +773,10 @@ int iwl_alive_start(struct iwl_priv *priv)
654 } 773 }
655 774
656 /* download priority table before any calibration request */ 775 /* download priority table before any calibration request */
657 if (cfg(priv)->bt_params && 776 if (priv->cfg->bt_params &&
658 cfg(priv)->bt_params->advanced_bt_coexist) { 777 priv->cfg->bt_params->advanced_bt_coexist) {
659 /* Configure Bluetooth device coexistence support */ 778 /* Configure Bluetooth device coexistence support */
660 if (cfg(priv)->bt_params->bt_sco_disable) 779 if (priv->cfg->bt_params->bt_sco_disable)
661 priv->bt_enable_pspoll = false; 780 priv->bt_enable_pspoll = false;
662 else 781 else
663 priv->bt_enable_pspoll = true; 782 priv->bt_enable_pspoll = true;
@@ -694,10 +813,8 @@ int iwl_alive_start(struct iwl_priv *priv)
694 813
695 ieee80211_wake_queues(priv->hw); 814 ieee80211_wake_queues(priv->hw);
696 815
697 priv->active_rate = IWL_RATES_MASK;
698
699 /* Configure Tx antenna selection based on H/W config */ 816 /* Configure Tx antenna selection based on H/W config */
700 iwlagn_send_tx_ant_config(priv, hw_params(priv).valid_tx_ant); 817 iwlagn_send_tx_ant_config(priv, priv->hw_params.valid_tx_ant);
701 818
702 if (iwl_is_associated_ctx(ctx) && !priv->wowlan) { 819 if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
703 struct iwl_rxon_cmd *active_rxon = 820 struct iwl_rxon_cmd *active_rxon =
@@ -788,10 +905,6 @@ void iwl_down(struct iwl_priv *priv)
788 exit_pending = 905 exit_pending =
789 test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); 906 test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
790 907
791 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
792 * to prevent rearm timer */
793 del_timer_sync(&priv->watchdog);
794
795 iwl_clear_ucode_stations(priv, NULL); 908 iwl_clear_ucode_stations(priv, NULL);
796 iwl_dealloc_bcast_stations(priv); 909 iwl_dealloc_bcast_stations(priv);
797 iwl_clear_driver_stations(priv); 910 iwl_clear_driver_stations(priv);
@@ -800,9 +913,9 @@ void iwl_down(struct iwl_priv *priv)
800 priv->bt_status = 0; 913 priv->bt_status = 0;
801 priv->cur_rssi_ctx = NULL; 914 priv->cur_rssi_ctx = NULL;
802 priv->bt_is_sco = 0; 915 priv->bt_is_sco = 0;
803 if (cfg(priv)->bt_params) 916 if (priv->cfg->bt_params)
804 priv->bt_traffic_load = 917 priv->bt_traffic_load =
805 cfg(priv)->bt_params->bt_init_traffic_load; 918 priv->cfg->bt_params->bt_init_traffic_load;
806 else 919 else
807 priv->bt_traffic_load = 0; 920 priv->bt_traffic_load = 0;
808 priv->bt_full_concurrent = false; 921 priv->bt_full_concurrent = false;
@@ -817,18 +930,17 @@ void iwl_down(struct iwl_priv *priv)
817 ieee80211_stop_queues(priv->hw); 930 ieee80211_stop_queues(priv->hw);
818 931
819 priv->ucode_loaded = false; 932 priv->ucode_loaded = false;
820 iwl_trans_stop_device(trans(priv)); 933 iwl_trans_stop_device(priv->trans);
821 934
822 /* Clear out all status bits but a few that are stable across reset */ 935 /* Clear out all status bits but a few that are stable across reset */
823 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << 936 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
824 STATUS_RF_KILL_HW | 937 STATUS_RF_KILL_HW |
825 test_bit(STATUS_GEO_CONFIGURED, &priv->status) << 938 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
826 STATUS_GEO_CONFIGURED | 939 STATUS_GEO_CONFIGURED |
940 test_bit(STATUS_FW_ERROR, &priv->status) <<
941 STATUS_FW_ERROR |
827 test_bit(STATUS_EXIT_PENDING, &priv->status) << 942 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
828 STATUS_EXIT_PENDING; 943 STATUS_EXIT_PENDING;
829 priv->shrd->status &=
830 test_bit(STATUS_FW_ERROR, &priv->shrd->status) <<
831 STATUS_FW_ERROR;
832 944
833 dev_kfree_skb(priv->beacon_skb); 945 dev_kfree_skb(priv->beacon_skb);
834 priv->beacon_skb = NULL; 946 priv->beacon_skb = NULL;
@@ -863,17 +975,15 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
863 975
864void iwlagn_prepare_restart(struct iwl_priv *priv) 976void iwlagn_prepare_restart(struct iwl_priv *priv)
865{ 977{
866 struct iwl_rxon_context *ctx;
867 bool bt_full_concurrent; 978 bool bt_full_concurrent;
868 u8 bt_ci_compliance; 979 u8 bt_ci_compliance;
869 u8 bt_load; 980 u8 bt_load;
870 u8 bt_status; 981 u8 bt_status;
871 bool bt_is_sco; 982 bool bt_is_sco;
983 int i;
872 984
873 lockdep_assert_held(&priv->mutex); 985 lockdep_assert_held(&priv->mutex);
874 986
875 for_each_context(priv, ctx)
876 ctx->vif = NULL;
877 priv->is_open = 0; 987 priv->is_open = 0;
878 988
879 /* 989 /*
@@ -898,6 +1008,15 @@ void iwlagn_prepare_restart(struct iwl_priv *priv)
898 priv->bt_traffic_load = bt_load; 1008 priv->bt_traffic_load = bt_load;
899 priv->bt_status = bt_status; 1009 priv->bt_status = bt_status;
900 priv->bt_is_sco = bt_is_sco; 1010 priv->bt_is_sco = bt_is_sco;
1011
1012 /* reset aggregation queues */
1013 for (i = IWLAGN_FIRST_AMPDU_QUEUE; i < IWL_MAX_HW_QUEUES; i++)
1014 priv->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
1015 /* and stop counts */
1016 for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
1017 atomic_set(&priv->queue_stop_count[i], 0);
1018
1019 memset(priv->agg_q_alloc, 0, sizeof(priv->agg_q_alloc));
901} 1020}
902 1021
903static void iwl_bg_restart(struct work_struct *data) 1022static void iwl_bg_restart(struct work_struct *data)
@@ -907,7 +1026,7 @@ static void iwl_bg_restart(struct work_struct *data)
907 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1026 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
908 return; 1027 return;
909 1028
910 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->shrd->status)) { 1029 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
911 mutex_lock(&priv->mutex); 1030 mutex_lock(&priv->mutex);
912 iwlagn_prepare_restart(priv); 1031 iwlagn_prepare_restart(priv);
913 mutex_unlock(&priv->mutex); 1032 mutex_unlock(&priv->mutex);
@@ -959,7 +1078,7 @@ static void iwlagn_disable_roc_work(struct work_struct *work)
959 * 1078 *
960 *****************************************************************************/ 1079 *****************************************************************************/
961 1080
962static void iwl_setup_deferred_work(struct iwl_priv *priv) 1081void iwl_setup_deferred_work(struct iwl_priv *priv)
963{ 1082{
964 priv->workqueue = create_singlethread_workqueue(DRV_NAME); 1083 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
965 1084
@@ -974,7 +1093,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
974 1093
975 iwl_setup_scan_deferred_work(priv); 1094 iwl_setup_scan_deferred_work(priv);
976 1095
977 if (cfg(priv)->bt_params) 1096 if (priv->cfg->bt_params)
978 iwlagn_bt_setup_deferred_work(priv); 1097 iwlagn_bt_setup_deferred_work(priv);
979 1098
980 init_timer(&priv->statistics_periodic); 1099 init_timer(&priv->statistics_periodic);
@@ -984,15 +1103,11 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
984 init_timer(&priv->ucode_trace); 1103 init_timer(&priv->ucode_trace);
985 priv->ucode_trace.data = (unsigned long)priv; 1104 priv->ucode_trace.data = (unsigned long)priv;
986 priv->ucode_trace.function = iwl_bg_ucode_trace; 1105 priv->ucode_trace.function = iwl_bg_ucode_trace;
987
988 init_timer(&priv->watchdog);
989 priv->watchdog.data = (unsigned long)priv;
990 priv->watchdog.function = iwl_bg_watchdog;
991} 1106}
992 1107
993void iwl_cancel_deferred_work(struct iwl_priv *priv) 1108void iwl_cancel_deferred_work(struct iwl_priv *priv)
994{ 1109{
995 if (cfg(priv)->bt_params) 1110 if (priv->cfg->bt_params)
996 iwlagn_bt_cancel_deferred_work(priv); 1111 iwlagn_bt_cancel_deferred_work(priv);
997 1112
998 cancel_work_sync(&priv->run_time_calib_work); 1113 cancel_work_sync(&priv->run_time_calib_work);
@@ -1028,7 +1143,193 @@ static void iwl_init_hw_rates(struct ieee80211_rate *rates)
1028 } 1143 }
1029} 1144}
1030 1145
1031static int iwl_init_drv(struct iwl_priv *priv) 1146#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
1147#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
1148static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
1149 struct ieee80211_sta_ht_cap *ht_info,
1150 enum ieee80211_band band)
1151{
1152 u16 max_bit_rate = 0;
1153 u8 rx_chains_num = priv->hw_params.rx_chains_num;
1154 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1155
1156 ht_info->cap = 0;
1157 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
1158
1159 ht_info->ht_supported = true;
1160
1161 if (priv->cfg->ht_params &&
1162 priv->cfg->ht_params->ht_greenfield_support)
1163 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
1164 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
1165 max_bit_rate = MAX_BIT_RATE_20_MHZ;
1166 if (priv->hw_params.ht40_channel & BIT(band)) {
1167 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
1168 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
1169 ht_info->mcs.rx_mask[4] = 0x01;
1170 max_bit_rate = MAX_BIT_RATE_40_MHZ;
1171 }
1172
1173 if (iwlwifi_mod_params.amsdu_size_8K)
1174 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
1175
1176 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
1177 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
1178
1179 ht_info->mcs.rx_mask[0] = 0xFF;
1180 if (rx_chains_num >= 2)
1181 ht_info->mcs.rx_mask[1] = 0xFF;
1182 if (rx_chains_num >= 3)
1183 ht_info->mcs.rx_mask[2] = 0xFF;
1184
1185 /* Highest supported Rx data rate */
1186 max_bit_rate *= rx_chains_num;
1187 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
1188 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
1189
1190 /* Tx MCS capabilities */
1191 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
1192 if (tx_chains_num != rx_chains_num) {
1193 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
1194 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
1195 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
1196 }
1197}
1198
1199/**
1200 * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
1201 */
1202static int iwl_init_geos(struct iwl_priv *priv)
1203{
1204 struct iwl_channel_info *ch;
1205 struct ieee80211_supported_band *sband;
1206 struct ieee80211_channel *channels;
1207 struct ieee80211_channel *geo_ch;
1208 struct ieee80211_rate *rates;
1209 int i = 0;
1210 s8 max_tx_power = IWLAGN_TX_POWER_TARGET_POWER_MIN;
1211
1212 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
1213 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
1214 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
1215 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
1216 return 0;
1217 }
1218
1219 channels = kcalloc(priv->channel_count,
1220 sizeof(struct ieee80211_channel), GFP_KERNEL);
1221 if (!channels)
1222 return -ENOMEM;
1223
1224 rates = kcalloc(IWL_RATE_COUNT_LEGACY, sizeof(struct ieee80211_rate),
1225 GFP_KERNEL);
1226 if (!rates) {
1227 kfree(channels);
1228 return -ENOMEM;
1229 }
1230
1231 /* 5.2GHz channels start after the 2.4GHz channels */
1232 sband = &priv->bands[IEEE80211_BAND_5GHZ];
1233 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
1234 /* just OFDM */
1235 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
1236 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
1237
1238 if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
1239 iwl_init_ht_hw_capab(priv, &sband->ht_cap,
1240 IEEE80211_BAND_5GHZ);
1241
1242 sband = &priv->bands[IEEE80211_BAND_2GHZ];
1243 sband->channels = channels;
1244 /* OFDM & CCK */
1245 sband->bitrates = rates;
1246 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
1247
1248 if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
1249 iwl_init_ht_hw_capab(priv, &sband->ht_cap,
1250 IEEE80211_BAND_2GHZ);
1251
1252 priv->ieee_channels = channels;
1253 priv->ieee_rates = rates;
1254
1255 for (i = 0; i < priv->channel_count; i++) {
1256 ch = &priv->channel_info[i];
1257
1258 /* FIXME: might be removed if scan is OK */
1259 if (!is_channel_valid(ch))
1260 continue;
1261
1262 sband = &priv->bands[ch->band];
1263
1264 geo_ch = &sband->channels[sband->n_channels++];
1265
1266 geo_ch->center_freq =
1267 ieee80211_channel_to_frequency(ch->channel, ch->band);
1268 geo_ch->max_power = ch->max_power_avg;
1269 geo_ch->max_antenna_gain = 0xff;
1270 geo_ch->hw_value = ch->channel;
1271
1272 if (is_channel_valid(ch)) {
1273 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
1274 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
1275
1276 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
1277 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
1278
1279 if (ch->flags & EEPROM_CHANNEL_RADAR)
1280 geo_ch->flags |= IEEE80211_CHAN_RADAR;
1281
1282 geo_ch->flags |= ch->ht40_extension_channel;
1283
1284 if (ch->max_power_avg > max_tx_power)
1285 max_tx_power = ch->max_power_avg;
1286 } else {
1287 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
1288 }
1289
1290 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
1291 ch->channel, geo_ch->center_freq,
1292 is_channel_a_band(ch) ? "5.2" : "2.4",
1293 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
1294 "restricted" : "valid",
1295 geo_ch->flags);
1296 }
1297
1298 priv->tx_power_device_lmt = max_tx_power;
1299 priv->tx_power_user_lmt = max_tx_power;
1300 priv->tx_power_next = max_tx_power;
1301
1302 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
1303 priv->hw_params.sku & EEPROM_SKU_CAP_BAND_52GHZ) {
1304 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
1305 "Please send your %s to maintainer.\n",
1306 priv->trans->hw_id_str);
1307 priv->hw_params.sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
1308 }
1309
1310 if (iwlwifi_mod_params.disable_5ghz)
1311 priv->bands[IEEE80211_BAND_5GHZ].n_channels = 0;
1312
1313 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
1314 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
1315 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
1316
1317 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
1318
1319 return 0;
1320}
1321
1322/*
1323 * iwl_free_geos - undo allocations in iwl_init_geos
1324 */
1325static void iwl_free_geos(struct iwl_priv *priv)
1326{
1327 kfree(priv->ieee_channels);
1328 kfree(priv->ieee_rates);
1329 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
1330}
1331
1332int iwl_init_drv(struct iwl_priv *priv)
1032{ 1333{
1033 int ret; 1334 int ret;
1034 1335
@@ -1043,7 +1344,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
1043 priv->band = IEEE80211_BAND_2GHZ; 1344 priv->band = IEEE80211_BAND_2GHZ;
1044 1345
1045 priv->plcp_delta_threshold = 1346 priv->plcp_delta_threshold =
1046 cfg(priv)->base_params->plcp_delta_threshold; 1347 priv->cfg->base_params->plcp_delta_threshold;
1047 1348
1048 priv->iw_mode = NL80211_IFTYPE_STATION; 1349 priv->iw_mode = NL80211_IFTYPE_STATION;
1049 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; 1350 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
@@ -1052,12 +1353,6 @@ static int iwl_init_drv(struct iwl_priv *priv)
1052 1353
1053 priv->ucode_owner = IWL_OWNERSHIP_DRIVER; 1354 priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
1054 1355
1055 /* initialize force reset */
1056 priv->force_reset[IWL_RF_RESET].reset_duration =
1057 IWL_DELAY_NEXT_FORCE_RF_RESET;
1058 priv->force_reset[IWL_FW_RESET].reset_duration =
1059 IWL_DELAY_NEXT_FORCE_FW_RELOAD;
1060
1061 priv->rx_statistics_jiffies = jiffies; 1356 priv->rx_statistics_jiffies = jiffies;
1062 1357
1063 /* Choose which receivers/antennas to use */ 1358 /* Choose which receivers/antennas to use */
@@ -1066,8 +1361,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
1066 iwl_init_scan_params(priv); 1361 iwl_init_scan_params(priv);
1067 1362
1068 /* init bt coex */ 1363 /* init bt coex */
1069 if (cfg(priv)->bt_params && 1364 if (priv->cfg->bt_params &&
1070 cfg(priv)->bt_params->advanced_bt_coexist) { 1365 priv->cfg->bt_params->advanced_bt_coexist) {
1071 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT; 1366 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
1072 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT; 1367 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
1073 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK; 1368 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
@@ -1097,7 +1392,7 @@ err:
1097 return ret; 1392 return ret;
1098} 1393}
1099 1394
1100static void iwl_uninit_drv(struct iwl_priv *priv) 1395void iwl_uninit_drv(struct iwl_priv *priv)
1101{ 1396{
1102 iwl_free_geos(priv); 1397 iwl_free_geos(priv);
1103 iwl_free_channel_map(priv); 1398 iwl_free_channel_map(priv);
@@ -1110,75 +1405,59 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
1110#endif 1405#endif
1111} 1406}
1112 1407
1113/* Size of one Rx buffer in host DRAM */ 1408void iwl_set_hw_params(struct iwl_priv *priv)
1114#define IWL_RX_BUF_SIZE_4K (4 * 1024)
1115#define IWL_RX_BUF_SIZE_8K (8 * 1024)
1116
1117static void iwl_set_hw_params(struct iwl_priv *priv)
1118{ 1409{
1119 if (cfg(priv)->ht_params) 1410 if (priv->cfg->ht_params)
1120 hw_params(priv).use_rts_for_aggregation = 1411 priv->hw_params.use_rts_for_aggregation =
1121 cfg(priv)->ht_params->use_rts_for_aggregation; 1412 priv->cfg->ht_params->use_rts_for_aggregation;
1122
1123 if (iwlagn_mod_params.amsdu_size_8K)
1124 hw_params(priv).rx_page_order =
1125 get_order(IWL_RX_BUF_SIZE_8K);
1126 else
1127 hw_params(priv).rx_page_order =
1128 get_order(IWL_RX_BUF_SIZE_4K);
1129 1413
1130 if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_ALL) 1414 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
1131 hw_params(priv).sku &= ~EEPROM_SKU_CAP_11N_ENABLE; 1415 priv->hw_params.sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
1132
1133 hw_params(priv).num_ampdu_queues =
1134 cfg(priv)->base_params->num_of_ampdu_queues;
1135 hw_params(priv).wd_timeout = cfg(priv)->base_params->wd_timeout;
1136 1416
1137 /* Device-specific setup */ 1417 /* Device-specific setup */
1138 cfg(priv)->lib->set_hw_params(priv); 1418 priv->lib->set_hw_params(priv);
1139} 1419}
1140 1420
1141 1421
1142 1422
1143static void iwl_debug_config(struct iwl_priv *priv) 1423/* show what optional capabilities we have */
1424void iwl_option_config(struct iwl_priv *priv)
1144{ 1425{
1145 dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEBUG "
1146#ifdef CONFIG_IWLWIFI_DEBUG 1426#ifdef CONFIG_IWLWIFI_DEBUG
1147 "enabled\n"); 1427 IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG enabled\n");
1148#else 1428#else
1149 "disabled\n"); 1429 IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG disabled\n");
1150#endif 1430#endif
1151 dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEBUGFS " 1431
1152#ifdef CONFIG_IWLWIFI_DEBUGFS 1432#ifdef CONFIG_IWLWIFI_DEBUGFS
1153 "enabled\n"); 1433 IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUGFS enabled\n");
1154#else 1434#else
1155 "disabled\n"); 1435 IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUGFS disabled\n");
1156#endif 1436#endif
1157 dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TRACING " 1437
1158#ifdef CONFIG_IWLWIFI_DEVICE_TRACING 1438#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
1159 "enabled\n"); 1439 IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING enabled\n");
1160#else 1440#else
1161 "disabled\n"); 1441 IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n");
1162#endif 1442#endif
1163 1443
1164 dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TESTMODE "
1165#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE 1444#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
1166 "enabled\n"); 1445 IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TESTMODE enabled\n");
1167#else 1446#else
1168 "disabled\n"); 1447 IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TESTMODE disabled\n");
1169#endif 1448#endif
1170 dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_P2P " 1449
1171#ifdef CONFIG_IWLWIFI_P2P 1450#ifdef CONFIG_IWLWIFI_P2P
1172 "enabled\n"); 1451 IWL_INFO(priv, "CONFIG_IWLWIFI_P2P enabled\n");
1173#else 1452#else
1174 "disabled\n"); 1453 IWL_INFO(priv, "CONFIG_IWLWIFI_P2P disabled\n");
1175#endif 1454#endif
1176} 1455}
1177 1456
1178static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, 1457static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1458 const struct iwl_cfg *cfg,
1179 const struct iwl_fw *fw) 1459 const struct iwl_fw *fw)
1180{ 1460{
1181 int err = 0;
1182 struct iwl_priv *priv; 1461 struct iwl_priv *priv;
1183 struct ieee80211_hw *hw; 1462 struct ieee80211_hw *hw;
1184 struct iwl_op_mode *op_mode; 1463 struct iwl_op_mode *op_mode;
@@ -1193,25 +1472,60 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1193 STATISTICS_NOTIFICATION, 1472 STATISTICS_NOTIFICATION,
1194 REPLY_TX, 1473 REPLY_TX,
1195 }; 1474 };
1475 int i;
1196 1476
1197 /************************ 1477 /************************
1198 * 1. Allocating HW data 1478 * 1. Allocating HW data
1199 ************************/ 1479 ************************/
1200 hw = iwl_alloc_all(); 1480 hw = iwl_alloc_all();
1201 if (!hw) { 1481 if (!hw) {
1202 pr_err("%s: Cannot allocate network device\n", 1482 pr_err("%s: Cannot allocate network device\n", cfg->name);
1203 cfg(trans)->name);
1204 err = -ENOMEM;
1205 goto out; 1483 goto out;
1206 } 1484 }
1207 1485
1208 op_mode = hw->priv; 1486 op_mode = hw->priv;
1209 op_mode->ops = &iwl_dvm_ops; 1487 op_mode->ops = &iwl_dvm_ops;
1210 priv = IWL_OP_MODE_GET_DVM(op_mode); 1488 priv = IWL_OP_MODE_GET_DVM(op_mode);
1211 priv->shrd = trans->shrd; 1489 priv->trans = trans;
1490 priv->dev = trans->dev;
1491 priv->cfg = cfg;
1212 priv->fw = fw; 1492 priv->fw = fw;
1213 /* TODO: remove fw from shared data later */ 1493
1214 priv->shrd->fw = fw; 1494 switch (priv->cfg->device_family) {
1495 case IWL_DEVICE_FAMILY_1000:
1496 case IWL_DEVICE_FAMILY_100:
1497 priv->lib = &iwl1000_lib;
1498 break;
1499 case IWL_DEVICE_FAMILY_2000:
1500 case IWL_DEVICE_FAMILY_105:
1501 priv->lib = &iwl2000_lib;
1502 break;
1503 case IWL_DEVICE_FAMILY_2030:
1504 case IWL_DEVICE_FAMILY_135:
1505 priv->lib = &iwl2030_lib;
1506 break;
1507 case IWL_DEVICE_FAMILY_5000:
1508 priv->lib = &iwl5000_lib;
1509 break;
1510 case IWL_DEVICE_FAMILY_5150:
1511 priv->lib = &iwl5150_lib;
1512 break;
1513 case IWL_DEVICE_FAMILY_6000:
1514 case IWL_DEVICE_FAMILY_6005:
1515 case IWL_DEVICE_FAMILY_6000i:
1516 case IWL_DEVICE_FAMILY_6050:
1517 case IWL_DEVICE_FAMILY_6150:
1518 priv->lib = &iwl6000_lib;
1519 break;
1520 case IWL_DEVICE_FAMILY_6030:
1521 priv->lib = &iwl6030_lib;
1522 break;
1523 default:
1524 break;
1525 }
1526
1527 if (WARN_ON(!priv->lib))
1528 goto out_free_hw;
1215 1529
1216 /* 1530 /*
1217 * Populate the state variables that the transport layer needs 1531 * Populate the state variables that the transport layer needs
@@ -1220,87 +1534,90 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1220 trans_cfg.op_mode = op_mode; 1534 trans_cfg.op_mode = op_mode;
1221 trans_cfg.no_reclaim_cmds = no_reclaim_cmds; 1535 trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
1222 trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); 1536 trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
1537 trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
1538 if (!iwlwifi_mod_params.wd_disable)
1539 trans_cfg.queue_watchdog_timeout =
1540 priv->cfg->base_params->wd_timeout;
1541 else
1542 trans_cfg.queue_watchdog_timeout = IWL_WATCHHDOG_DISABLED;
1543 trans_cfg.command_names = iwl_dvm_cmd_strings;
1223 1544
1224 ucode_flags = fw->ucode_capa.flags; 1545 ucode_flags = fw->ucode_capa.flags;
1225 1546
1226#ifndef CONFIG_IWLWIFI_P2P 1547#ifndef CONFIG_IWLWIFI_P2P
1227 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN; 1548 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
1228#endif 1549#endif
1229 1550
1230 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) { 1551 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
1231 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN; 1552 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
1232 trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM; 1553 trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
1554 trans_cfg.queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
1555 trans_cfg.n_queue_to_fifo =
1556 ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
1233 } else { 1557 } else {
1234 priv->sta_key_max_num = STA_KEY_MAX_NUM; 1558 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1235 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; 1559 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1560 trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
1561 trans_cfg.n_queue_to_fifo =
1562 ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
1236 } 1563 }
1237 1564
1238 /* Configure transport layer */ 1565 /* Configure transport layer */
1239 iwl_trans_configure(trans(priv), &trans_cfg); 1566 iwl_trans_configure(priv->trans, &trans_cfg);
1240 1567
1241 /* At this point both hw and priv are allocated. */ 1568 /* At this point both hw and priv are allocated. */
1242 1569
1243 SET_IEEE80211_DEV(priv->hw, trans(priv)->dev); 1570 SET_IEEE80211_DEV(priv->hw, priv->trans->dev);
1244 1571
1245 /* show what debugging capabilities we have */ 1572 iwl_option_config(priv);
1246 iwl_debug_config(priv);
1247 1573
1248 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); 1574 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
1249 1575
1250 /* is antenna coupling more than 35dB ? */ 1576 /* is antenna coupling more than 35dB ? */
1251 priv->bt_ant_couple_ok = 1577 priv->bt_ant_couple_ok =
1252 (iwlagn_mod_params.ant_coupling > 1578 (iwlwifi_mod_params.ant_coupling >
1253 IWL_BT_ANTENNA_COUPLING_THRESHOLD) ? 1579 IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
1254 true : false; 1580 true : false;
1255 1581
1256 /* enable/disable bt channel inhibition */ 1582 /* enable/disable bt channel inhibition */
1257 priv->bt_ch_announce = iwlagn_mod_params.bt_ch_announce; 1583 priv->bt_ch_announce = iwlwifi_mod_params.bt_ch_announce;
1258 IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n", 1584 IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
1259 (priv->bt_ch_announce) ? "On" : "Off"); 1585 (priv->bt_ch_announce) ? "On" : "Off");
1260 1586
1261 if (iwl_alloc_traffic_mem(priv))
1262 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
1263
1264 /* these spin locks will be used in apm_ops.init and EEPROM access 1587 /* these spin locks will be used in apm_ops.init and EEPROM access
1265 * we should init now 1588 * we should init now
1266 */ 1589 */
1267 spin_lock_init(&trans(priv)->reg_lock);
1268 spin_lock_init(&priv->statistics.lock); 1590 spin_lock_init(&priv->statistics.lock);
1269 1591
1270 /*********************** 1592 /***********************
1271 * 2. Read REV register 1593 * 2. Read REV register
1272 ***********************/ 1594 ***********************/
1273 IWL_INFO(priv, "Detected %s, REV=0x%X\n", 1595 IWL_INFO(priv, "Detected %s, REV=0x%X\n",
1274 cfg(priv)->name, trans(priv)->hw_rev); 1596 priv->cfg->name, priv->trans->hw_rev);
1275 1597
1276 err = iwl_trans_start_hw(trans(priv)); 1598 if (iwl_trans_start_hw(priv->trans))
1277 if (err) 1599 goto out_free_hw;
1278 goto out_free_traffic_mem;
1279 1600
1280 /***************** 1601 /* Read the EEPROM */
1281 * 3. Read EEPROM 1602 if (iwl_eeprom_init(priv, priv->trans->hw_rev)) {
1282 *****************/
1283 err = iwl_eeprom_init(trans(priv), trans(priv)->hw_rev);
1284 /* Reset chip to save power until we load uCode during "up". */
1285 iwl_trans_stop_hw(trans(priv));
1286 if (err) {
1287 IWL_ERR(priv, "Unable to init EEPROM\n"); 1603 IWL_ERR(priv, "Unable to init EEPROM\n");
1288 goto out_free_traffic_mem; 1604 goto out_free_hw;
1289 } 1605 }
1290 err = iwl_eeprom_check_version(priv); 1606 /* Reset chip to save power until we load uCode during "up". */
1291 if (err) 1607 iwl_trans_stop_hw(priv->trans, false);
1608
1609 if (iwl_eeprom_check_version(priv))
1292 goto out_free_eeprom; 1610 goto out_free_eeprom;
1293 1611
1294 err = iwl_eeprom_init_hw_params(priv); 1612 if (iwl_eeprom_init_hw_params(priv))
1295 if (err)
1296 goto out_free_eeprom; 1613 goto out_free_eeprom;
1297 1614
1298 /* extract MAC Address */ 1615 /* extract MAC Address */
1299 iwl_eeprom_get_mac(priv->shrd, priv->addresses[0].addr); 1616 iwl_eeprom_get_mac(priv, priv->addresses[0].addr);
1300 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr); 1617 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
1301 priv->hw->wiphy->addresses = priv->addresses; 1618 priv->hw->wiphy->addresses = priv->addresses;
1302 priv->hw->wiphy->n_addresses = 1; 1619 priv->hw->wiphy->n_addresses = 1;
1303 num_mac = iwl_eeprom_query16(priv->shrd, EEPROM_NUM_MAC_ADDRESS); 1620 num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS);
1304 if (num_mac > 1) { 1621 if (num_mac > 1) {
1305 memcpy(priv->addresses[1].addr, priv->addresses[0].addr, 1622 memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
1306 ETH_ALEN); 1623 ETH_ALEN);
@@ -1313,7 +1630,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1313 ************************/ 1630 ************************/
1314 iwl_set_hw_params(priv); 1631 iwl_set_hw_params(priv);
1315 1632
1316 if (!(hw_params(priv).sku & EEPROM_SKU_CAP_IPAN_ENABLE)) { 1633 if (!(priv->hw_params.sku & EEPROM_SKU_CAP_IPAN_ENABLE)) {
1317 IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN"); 1634 IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN");
1318 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN; 1635 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
1319 /* 1636 /*
@@ -1323,18 +1640,32 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1323 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P; 1640 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
1324 priv->sta_key_max_num = STA_KEY_MAX_NUM; 1641 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1325 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; 1642 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1643 trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
1644 trans_cfg.n_queue_to_fifo =
1645 ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
1326 1646
1327 /* Configure transport layer again*/ 1647 /* Configure transport layer again*/
1328 iwl_trans_configure(trans(priv), &trans_cfg); 1648 iwl_trans_configure(priv->trans, &trans_cfg);
1329 } 1649 }
1330 1650
1331 /******************* 1651 /*******************
1332 * 5. Setup priv 1652 * 5. Setup priv
1333 *******************/ 1653 *******************/
1654 for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
1655 priv->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
1656 if (i < IWLAGN_FIRST_AMPDU_QUEUE &&
1657 i != IWL_DEFAULT_CMD_QUEUE_NUM &&
1658 i != IWL_IPAN_CMD_QUEUE_NUM)
1659 priv->queue_to_mac80211[i] = i;
1660 atomic_set(&priv->queue_stop_count[i], 0);
1661 }
1334 1662
1335 err = iwl_init_drv(priv); 1663 WARN_ON(trans_cfg.queue_to_fifo[trans_cfg.cmd_queue] !=
1336 if (err) 1664 IWLAGN_CMD_FIFO_NUM);
1665
1666 if (iwl_init_drv(priv))
1337 goto out_free_eeprom; 1667 goto out_free_eeprom;
1668
1338 /* At this point both hw and priv are initialized. */ 1669 /* At this point both hw and priv are initialized. */
1339 1670
1340 /******************** 1671 /********************
@@ -1367,15 +1698,12 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1367 * 1698 *
1368 * 7. Setup and register with mac80211 and debugfs 1699 * 7. Setup and register with mac80211 and debugfs
1369 **************************************************/ 1700 **************************************************/
1370 err = iwlagn_mac_setup_register(priv, &fw->ucode_capa); 1701 if (iwlagn_mac_setup_register(priv, &fw->ucode_capa))
1371 if (err)
1372 goto out_destroy_workqueue; 1702 goto out_destroy_workqueue;
1373 1703
1374 err = iwl_dbgfs_register(priv, DRV_NAME); 1704 if (iwl_dbgfs_register(priv, DRV_NAME))
1375 if (err)
1376 IWL_ERR(priv, 1705 IWL_ERR(priv,
1377 "failed to create debugfs files. Ignoring error: %d\n", 1706 "failed to create debugfs files. Ignoring error\n");
1378 err);
1379 1707
1380 return op_mode; 1708 return op_mode;
1381 1709
@@ -1384,16 +1712,15 @@ out_destroy_workqueue:
1384 priv->workqueue = NULL; 1712 priv->workqueue = NULL;
1385 iwl_uninit_drv(priv); 1713 iwl_uninit_drv(priv);
1386out_free_eeprom: 1714out_free_eeprom:
1387 iwl_eeprom_free(priv->shrd); 1715 iwl_eeprom_free(priv);
1388out_free_traffic_mem: 1716out_free_hw:
1389 iwl_free_traffic_mem(priv);
1390 ieee80211_free_hw(priv->hw); 1717 ieee80211_free_hw(priv->hw);
1391out: 1718out:
1392 op_mode = NULL; 1719 op_mode = NULL;
1393 return op_mode; 1720 return op_mode;
1394} 1721}
1395 1722
1396static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode) 1723void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1397{ 1724{
1398 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 1725 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1399 1726
@@ -1408,9 +1735,9 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1408 1735
1409 /*This will stop the queues, move the device to low power state */ 1736 /*This will stop the queues, move the device to low power state */
1410 priv->ucode_loaded = false; 1737 priv->ucode_loaded = false;
1411 iwl_trans_stop_device(trans(priv)); 1738 iwl_trans_stop_device(priv->trans);
1412 1739
1413 iwl_eeprom_free(priv->shrd); 1740 iwl_eeprom_free(priv);
1414 1741
1415 /*netif_stop_queue(dev); */ 1742 /*netif_stop_queue(dev); */
1416 flush_workqueue(priv->workqueue); 1743 flush_workqueue(priv->workqueue);
@@ -1420,69 +1747,562 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1420 * until now... */ 1747 * until now... */
1421 destroy_workqueue(priv->workqueue); 1748 destroy_workqueue(priv->workqueue);
1422 priv->workqueue = NULL; 1749 priv->workqueue = NULL;
1423 iwl_free_traffic_mem(priv);
1424 1750
1425 iwl_uninit_drv(priv); 1751 iwl_uninit_drv(priv);
1426 1752
1427 dev_kfree_skb(priv->beacon_skb); 1753 dev_kfree_skb(priv->beacon_skb);
1428 1754
1755 iwl_trans_stop_hw(priv->trans, true);
1429 ieee80211_free_hw(priv->hw); 1756 ieee80211_free_hw(priv->hw);
1430} 1757}
1431 1758
1432static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode) 1759static const char * const desc_lookup_text[] = {
1760 "OK",
1761 "FAIL",
1762 "BAD_PARAM",
1763 "BAD_CHECKSUM",
1764 "NMI_INTERRUPT_WDG",
1765 "SYSASSERT",
1766 "FATAL_ERROR",
1767 "BAD_COMMAND",
1768 "HW_ERROR_TUNE_LOCK",
1769 "HW_ERROR_TEMPERATURE",
1770 "ILLEGAL_CHAN_FREQ",
1771 "VCC_NOT_STABLE",
1772 "FH_ERROR",
1773 "NMI_INTERRUPT_HOST",
1774 "NMI_INTERRUPT_ACTION_PT",
1775 "NMI_INTERRUPT_UNKNOWN",
1776 "UCODE_VERSION_MISMATCH",
1777 "HW_ERROR_ABS_LOCK",
1778 "HW_ERROR_CAL_LOCK_FAIL",
1779 "NMI_INTERRUPT_INST_ACTION_PT",
1780 "NMI_INTERRUPT_DATA_ACTION_PT",
1781 "NMI_TRM_HW_ER",
1782 "NMI_INTERRUPT_TRM",
1783 "NMI_INTERRUPT_BREAK_POINT",
1784 "DEBUG_0",
1785 "DEBUG_1",
1786 "DEBUG_2",
1787 "DEBUG_3",
1788};
1789
1790static struct { char *name; u8 num; } advanced_lookup[] = {
1791 { "NMI_INTERRUPT_WDG", 0x34 },
1792 { "SYSASSERT", 0x35 },
1793 { "UCODE_VERSION_MISMATCH", 0x37 },
1794 { "BAD_COMMAND", 0x38 },
1795 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
1796 { "FATAL_ERROR", 0x3D },
1797 { "NMI_TRM_HW_ERR", 0x46 },
1798 { "NMI_INTERRUPT_TRM", 0x4C },
1799 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
1800 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
1801 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
1802 { "NMI_INTERRUPT_HOST", 0x66 },
1803 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
1804 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
1805 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
1806 { "ADVANCED_SYSASSERT", 0 },
1807};
1808
1809static const char *desc_lookup(u32 num)
1810{
1811 int i;
1812 int max = ARRAY_SIZE(desc_lookup_text);
1813
1814 if (num < max)
1815 return desc_lookup_text[num];
1816
1817 max = ARRAY_SIZE(advanced_lookup) - 1;
1818 for (i = 0; i < max; i++) {
1819 if (advanced_lookup[i].num == num)
1820 break;
1821 }
1822 return advanced_lookup[i].name;
1823}
1824
1825#define ERROR_START_OFFSET (1 * sizeof(u32))
1826#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1827
1828static void iwl_dump_nic_error_log(struct iwl_priv *priv)
1829{
1830 struct iwl_trans *trans = priv->trans;
1831 u32 base;
1832 struct iwl_error_event_table table;
1833
1834 base = priv->device_pointers.error_event_table;
1835 if (priv->cur_ucode == IWL_UCODE_INIT) {
1836 if (!base)
1837 base = priv->fw->init_errlog_ptr;
1838 } else {
1839 if (!base)
1840 base = priv->fw->inst_errlog_ptr;
1841 }
1842
1843 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
1844 IWL_ERR(priv,
1845 "Not valid error log pointer 0x%08X for %s uCode\n",
1846 base,
1847 (priv->cur_ucode == IWL_UCODE_INIT)
1848 ? "Init" : "RT");
1849 return;
1850 }
1851
1852 /*TODO: Update dbgfs with ISR error stats obtained below */
1853 iwl_read_targ_mem_words(trans, base, &table, sizeof(table));
1854
1855 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
1856 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
1857 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
1858 priv->status, table.valid);
1859 }
1860
1861 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
1862 table.data1, table.data2, table.line,
1863 table.blink1, table.blink2, table.ilink1,
1864 table.ilink2, table.bcon_time, table.gp1,
1865 table.gp2, table.gp3, table.ucode_ver,
1866 table.hw_ver, table.brd_ver);
1867 IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
1868 desc_lookup(table.error_id));
1869 IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
1870 IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1);
1871 IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2);
1872 IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1);
1873 IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2);
1874 IWL_ERR(priv, "0x%08X | data1\n", table.data1);
1875 IWL_ERR(priv, "0x%08X | data2\n", table.data2);
1876 IWL_ERR(priv, "0x%08X | line\n", table.line);
1877 IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time);
1878 IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low);
1879 IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi);
1880 IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1);
1881 IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2);
1882 IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3);
1883 IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver);
1884 IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver);
1885 IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver);
1886 IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd);
1887 IWL_ERR(priv, "0x%08X | isr0\n", table.isr0);
1888 IWL_ERR(priv, "0x%08X | isr1\n", table.isr1);
1889 IWL_ERR(priv, "0x%08X | isr2\n", table.isr2);
1890 IWL_ERR(priv, "0x%08X | isr3\n", table.isr3);
1891 IWL_ERR(priv, "0x%08X | isr4\n", table.isr4);
1892 IWL_ERR(priv, "0x%08X | isr_pref\n", table.isr_pref);
1893 IWL_ERR(priv, "0x%08X | wait_event\n", table.wait_event);
1894 IWL_ERR(priv, "0x%08X | l2p_control\n", table.l2p_control);
1895 IWL_ERR(priv, "0x%08X | l2p_duration\n", table.l2p_duration);
1896 IWL_ERR(priv, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
1897 IWL_ERR(priv, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
1898 IWL_ERR(priv, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
1899 IWL_ERR(priv, "0x%08X | timestamp\n", table.u_timestamp);
1900 IWL_ERR(priv, "0x%08X | flow_handler\n", table.flow_handler);
1901}
1902
1903#define EVENT_START_OFFSET (4 * sizeof(u32))
1904
1905/**
1906 * iwl_print_event_log - Dump error event log to syslog
1907 *
1908 */
1909static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1910 u32 num_events, u32 mode,
1911 int pos, char **buf, size_t bufsz)
1912{
1913 u32 i;
1914 u32 base; /* SRAM byte address of event log header */
1915 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1916 u32 ptr; /* SRAM byte address of log data */
1917 u32 ev, time, data; /* event log data */
1918 unsigned long reg_flags;
1919
1920 struct iwl_trans *trans = priv->trans;
1921
1922 if (num_events == 0)
1923 return pos;
1924
1925 base = priv->device_pointers.log_event_table;
1926 if (priv->cur_ucode == IWL_UCODE_INIT) {
1927 if (!base)
1928 base = priv->fw->init_evtlog_ptr;
1929 } else {
1930 if (!base)
1931 base = priv->fw->inst_evtlog_ptr;
1932 }
1933
1934 if (mode == 0)
1935 event_size = 2 * sizeof(u32);
1936 else
1937 event_size = 3 * sizeof(u32);
1938
1939 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1940
1941 /* Make sure device is powered up for SRAM reads */
1942 spin_lock_irqsave(&trans->reg_lock, reg_flags);
1943 if (unlikely(!iwl_grab_nic_access(trans)))
1944 goto out_unlock;
1945
1946 /* Set starting address; reads will auto-increment */
1947 iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr);
1948
1949 /* "time" is actually "data" for mode 0 (no timestamp).
1950 * place event id # at far right for easier visual parsing. */
1951 for (i = 0; i < num_events; i++) {
1952 ev = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1953 time = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1954 if (mode == 0) {
1955 /* data, ev */
1956 if (bufsz) {
1957 pos += scnprintf(*buf + pos, bufsz - pos,
1958 "EVT_LOG:0x%08x:%04u\n",
1959 time, ev);
1960 } else {
1961 trace_iwlwifi_dev_ucode_event(trans->dev, 0,
1962 time, ev);
1963 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
1964 time, ev);
1965 }
1966 } else {
1967 data = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1968 if (bufsz) {
1969 pos += scnprintf(*buf + pos, bufsz - pos,
1970 "EVT_LOGT:%010u:0x%08x:%04u\n",
1971 time, data, ev);
1972 } else {
1973 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1974 time, data, ev);
1975 trace_iwlwifi_dev_ucode_event(trans->dev, time,
1976 data, ev);
1977 }
1978 }
1979 }
1980
1981 /* Allow device to power down */
1982 iwl_release_nic_access(trans);
1983out_unlock:
1984 spin_unlock_irqrestore(&trans->reg_lock, reg_flags);
1985 return pos;
1986}
1987
1988/**
1989 * iwl_print_last_event_logs - Dump the newest # of event log to syslog
1990 */
1991static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1992 u32 num_wraps, u32 next_entry,
1993 u32 size, u32 mode,
1994 int pos, char **buf, size_t bufsz)
1995{
1996 /*
1997 * display the newest DEFAULT_LOG_ENTRIES entries
1998 * i.e the entries just before the next ont that uCode would fill.
1999 */
2000 if (num_wraps) {
2001 if (next_entry < size) {
2002 pos = iwl_print_event_log(priv,
2003 capacity - (size - next_entry),
2004 size - next_entry, mode,
2005 pos, buf, bufsz);
2006 pos = iwl_print_event_log(priv, 0,
2007 next_entry, mode,
2008 pos, buf, bufsz);
2009 } else
2010 pos = iwl_print_event_log(priv, next_entry - size,
2011 size, mode, pos, buf, bufsz);
2012 } else {
2013 if (next_entry < size) {
2014 pos = iwl_print_event_log(priv, 0, next_entry,
2015 mode, pos, buf, bufsz);
2016 } else {
2017 pos = iwl_print_event_log(priv, next_entry - size,
2018 size, mode, pos, buf, bufsz);
2019 }
2020 }
2021 return pos;
2022}
2023
2024#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
2025
2026int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
2027 char **buf, bool display)
2028{
2029 u32 base; /* SRAM byte address of event log header */
2030 u32 capacity; /* event log capacity in # entries */
2031 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
2032 u32 num_wraps; /* # times uCode wrapped to top of log */
2033 u32 next_entry; /* index of next entry to be written by uCode */
2034 u32 size; /* # entries that we'll print */
2035 u32 logsize;
2036 int pos = 0;
2037 size_t bufsz = 0;
2038 struct iwl_trans *trans = priv->trans;
2039
2040 base = priv->device_pointers.log_event_table;
2041 if (priv->cur_ucode == IWL_UCODE_INIT) {
2042 logsize = priv->fw->init_evtlog_size;
2043 if (!base)
2044 base = priv->fw->init_evtlog_ptr;
2045 } else {
2046 logsize = priv->fw->inst_evtlog_size;
2047 if (!base)
2048 base = priv->fw->inst_evtlog_ptr;
2049 }
2050
2051 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
2052 IWL_ERR(priv,
2053 "Invalid event log pointer 0x%08X for %s uCode\n",
2054 base,
2055 (priv->cur_ucode == IWL_UCODE_INIT)
2056 ? "Init" : "RT");
2057 return -EINVAL;
2058 }
2059
2060 /* event log header */
2061 capacity = iwl_read_targ_mem(trans, base);
2062 mode = iwl_read_targ_mem(trans, base + (1 * sizeof(u32)));
2063 num_wraps = iwl_read_targ_mem(trans, base + (2 * sizeof(u32)));
2064 next_entry = iwl_read_targ_mem(trans, base + (3 * sizeof(u32)));
2065
2066 if (capacity > logsize) {
2067 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d "
2068 "entries\n", capacity, logsize);
2069 capacity = logsize;
2070 }
2071
2072 if (next_entry > logsize) {
2073 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
2074 next_entry, logsize);
2075 next_entry = logsize;
2076 }
2077
2078 size = num_wraps ? capacity : next_entry;
2079
2080 /* bail out if nothing in log */
2081 if (size == 0) {
2082 IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n");
2083 return pos;
2084 }
2085
2086#ifdef CONFIG_IWLWIFI_DEBUG
2087 if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log)
2088 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
2089 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
2090#else
2091 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
2092 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
2093#endif
2094 IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
2095 size);
2096
2097#ifdef CONFIG_IWLWIFI_DEBUG
2098 if (display) {
2099 if (full_log)
2100 bufsz = capacity * 48;
2101 else
2102 bufsz = size * 48;
2103 *buf = kmalloc(bufsz, GFP_KERNEL);
2104 if (!*buf)
2105 return -ENOMEM;
2106 }
2107 if (iwl_have_debug_level(IWL_DL_FW_ERRORS) || full_log) {
2108 /*
2109 * if uCode has wrapped back to top of log,
2110 * start at the oldest entry,
2111 * i.e the next one that uCode would fill.
2112 */
2113 if (num_wraps)
2114 pos = iwl_print_event_log(priv, next_entry,
2115 capacity - next_entry, mode,
2116 pos, buf, bufsz);
2117 /* (then/else) start at top of log */
2118 pos = iwl_print_event_log(priv, 0,
2119 next_entry, mode, pos, buf, bufsz);
2120 } else
2121 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
2122 next_entry, size, mode,
2123 pos, buf, bufsz);
2124#else
2125 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
2126 next_entry, size, mode,
2127 pos, buf, bufsz);
2128#endif
2129 return pos;
2130}
2131
2132static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
2133{
2134 unsigned int reload_msec;
2135 unsigned long reload_jiffies;
2136
2137#ifdef CONFIG_IWLWIFI_DEBUG
2138 if (iwl_have_debug_level(IWL_DL_FW_ERRORS))
2139 iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS);
2140#endif
2141
2142 /* uCode is no longer loaded. */
2143 priv->ucode_loaded = false;
2144
2145 /* Set the FW error flag -- cleared on iwl_down */
2146 set_bit(STATUS_FW_ERROR, &priv->status);
2147
2148 iwl_abort_notification_waits(&priv->notif_wait);
2149
2150 /* Keep the restart process from trying to send host
2151 * commands by clearing the ready bit */
2152 clear_bit(STATUS_READY, &priv->status);
2153
2154 wake_up(&priv->trans->wait_command_queue);
2155
2156 if (!ondemand) {
2157 /*
2158 * If firmware keep reloading, then it indicate something
2159 * serious wrong and firmware having problem to recover
2160 * from it. Instead of keep trying which will fill the syslog
2161 * and hang the system, let's just stop it
2162 */
2163 reload_jiffies = jiffies;
2164 reload_msec = jiffies_to_msecs((long) reload_jiffies -
2165 (long) priv->reload_jiffies);
2166 priv->reload_jiffies = reload_jiffies;
2167 if (reload_msec <= IWL_MIN_RELOAD_DURATION) {
2168 priv->reload_count++;
2169 if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) {
2170 IWL_ERR(priv, "BUG_ON, Stop restarting\n");
2171 return;
2172 }
2173 } else
2174 priv->reload_count = 0;
2175 }
2176
2177 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2178 if (iwlwifi_mod_params.restart_fw) {
2179 IWL_DEBUG_FW_ERRORS(priv,
2180 "Restarting adapter due to uCode error.\n");
2181 queue_work(priv->workqueue, &priv->restart);
2182 } else
2183 IWL_DEBUG_FW_ERRORS(priv,
2184 "Detected FW error, but not restarting\n");
2185 }
2186}
2187
2188void iwl_nic_error(struct iwl_op_mode *op_mode)
2189{
2190 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2191
2192 IWL_ERR(priv, "Loaded firmware version: %s\n",
2193 priv->fw->fw_version);
2194
2195 iwl_dump_nic_error_log(priv);
2196 iwl_dump_nic_event_log(priv, false, NULL, false);
2197
2198 iwlagn_fw_error(priv, false);
2199}
2200
2201void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
1433{ 2202{
1434 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 2203 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1435 2204
1436 if (!iwl_check_for_ct_kill(priv)) { 2205 if (!iwl_check_for_ct_kill(priv)) {
1437 IWL_ERR(priv, "Restarting adapter queue is full\n"); 2206 IWL_ERR(priv, "Restarting adapter queue is full\n");
1438 iwl_nic_error(op_mode); 2207 iwlagn_fw_error(priv, false);
1439 } 2208 }
1440} 2209}
1441 2210
1442static void iwl_nic_config(struct iwl_op_mode *op_mode) 2211void iwl_nic_config(struct iwl_op_mode *op_mode)
1443{ 2212{
1444 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 2213 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1445 2214
1446 cfg(priv)->lib->nic_config(priv); 2215 priv->lib->nic_config(priv);
1447} 2216}
1448 2217
1449static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, u8 ac) 2218static void iwl_wimax_active(struct iwl_op_mode *op_mode)
1450{ 2219{
1451 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 2220 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1452 2221
1453 set_bit(ac, &priv->transport_queue_stop); 2222 clear_bit(STATUS_READY, &priv->status);
1454 ieee80211_stop_queue(priv->hw, ac); 2223 IWL_ERR(priv, "RF is used by WiMAX\n");
2224}
2225
2226void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
2227{
2228 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2229 int mq = priv->queue_to_mac80211[queue];
2230
2231 if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
2232 return;
2233
2234 if (atomic_inc_return(&priv->queue_stop_count[mq]) > 1) {
2235 IWL_DEBUG_TX_QUEUES(priv,
2236 "queue %d (mac80211 %d) already stopped\n",
2237 queue, mq);
2238 return;
2239 }
2240
2241 set_bit(mq, &priv->transport_queue_stop);
2242 ieee80211_stop_queue(priv->hw, mq);
1455} 2243}
1456 2244
1457static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, u8 ac) 2245void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
1458{ 2246{
1459 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 2247 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2248 int mq = priv->queue_to_mac80211[queue];
1460 2249
1461 clear_bit(ac, &priv->transport_queue_stop); 2250 if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
2251 return;
2252
2253 if (atomic_dec_return(&priv->queue_stop_count[mq]) > 0) {
2254 IWL_DEBUG_TX_QUEUES(priv,
2255 "queue %d (mac80211 %d) already awake\n",
2256 queue, mq);
2257 return;
2258 }
2259
2260 clear_bit(mq, &priv->transport_queue_stop);
1462 2261
1463 if (!priv->passive_no_rx) 2262 if (!priv->passive_no_rx)
1464 ieee80211_wake_queue(priv->hw, ac); 2263 ieee80211_wake_queue(priv->hw, mq);
1465} 2264}
1466 2265
1467void iwlagn_lift_passive_no_rx(struct iwl_priv *priv) 2266void iwlagn_lift_passive_no_rx(struct iwl_priv *priv)
1468{ 2267{
1469 int ac; 2268 int mq;
1470 2269
1471 if (!priv->passive_no_rx) 2270 if (!priv->passive_no_rx)
1472 return; 2271 return;
1473 2272
1474 for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++) { 2273 for (mq = 0; mq < IWLAGN_FIRST_AMPDU_QUEUE; mq++) {
1475 if (!test_bit(ac, &priv->transport_queue_stop)) { 2274 if (!test_bit(mq, &priv->transport_queue_stop)) {
1476 IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d"); 2275 IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d", mq);
1477 ieee80211_wake_queue(priv->hw, ac); 2276 ieee80211_wake_queue(priv->hw, mq);
1478 } else { 2277 } else {
1479 IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d"); 2278 IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d", mq);
1480 } 2279 }
1481 } 2280 }
1482 2281
1483 priv->passive_no_rx = false; 2282 priv->passive_no_rx = false;
1484} 2283}
1485 2284
2285void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
2286{
2287 struct ieee80211_tx_info *info;
2288
2289 info = IEEE80211_SKB_CB(skb);
2290 kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1]));
2291 dev_kfree_skb_any(skb);
2292}
2293
2294void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
2295{
2296 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2297
2298 if (state)
2299 set_bit(STATUS_RF_KILL_HW, &priv->status);
2300 else
2301 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2302
2303 wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
2304}
2305
1486const struct iwl_op_mode_ops iwl_dvm_ops = { 2306const struct iwl_op_mode_ops iwl_dvm_ops = {
1487 .start = iwl_op_mode_dvm_start, 2307 .start = iwl_op_mode_dvm_start,
1488 .stop = iwl_op_mode_dvm_stop, 2308 .stop = iwl_op_mode_dvm_stop,
@@ -1494,6 +2314,7 @@ const struct iwl_op_mode_ops iwl_dvm_ops = {
1494 .nic_error = iwl_nic_error, 2314 .nic_error = iwl_nic_error,
1495 .cmd_queue_full = iwl_cmd_queue_full, 2315 .cmd_queue_full = iwl_cmd_queue_full,
1496 .nic_config = iwl_nic_config, 2316 .nic_config = iwl_nic_config,
2317 .wimax_active = iwl_wimax_active,
1497}; 2318};
1498 2319
1499/***************************************************************************** 2320/*****************************************************************************
@@ -1544,96 +2365,3 @@ static void __exit iwl_exit(void)
1544 2365
1545module_exit(iwl_exit); 2366module_exit(iwl_exit);
1546module_init(iwl_init); 2367module_init(iwl_init);
1547
1548#ifdef CONFIG_IWLWIFI_DEBUG
1549module_param_named(debug, iwlagn_mod_params.debug_level, uint,
1550 S_IRUGO | S_IWUSR);
1551MODULE_PARM_DESC(debug, "debug output mask");
1552#endif
1553
1554module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO);
1555MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
1556module_param_named(11n_disable, iwlagn_mod_params.disable_11n, uint, S_IRUGO);
1557MODULE_PARM_DESC(11n_disable,
1558 "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
1559module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K,
1560 int, S_IRUGO);
1561MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
1562module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO);
1563MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
1564
1565module_param_named(ucode_alternative,
1566 iwlagn_mod_params.wanted_ucode_alternative,
1567 int, S_IRUGO);
1568MODULE_PARM_DESC(ucode_alternative,
1569 "specify ucode alternative to use from ucode file");
1570
1571module_param_named(antenna_coupling, iwlagn_mod_params.ant_coupling,
1572 int, S_IRUGO);
1573MODULE_PARM_DESC(antenna_coupling,
1574 "specify antenna coupling in dB (defualt: 0 dB)");
1575
1576module_param_named(bt_ch_inhibition, iwlagn_mod_params.bt_ch_announce,
1577 bool, S_IRUGO);
1578MODULE_PARM_DESC(bt_ch_inhibition,
1579 "Enable BT channel inhibition (default: enable)");
1580
1581module_param_named(plcp_check, iwlagn_mod_params.plcp_check, bool, S_IRUGO);
1582MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
1583
1584module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
1585MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
1586
1587module_param_named(wd_disable, iwlagn_mod_params.wd_disable, int, S_IRUGO);
1588MODULE_PARM_DESC(wd_disable,
1589 "Disable stuck queue watchdog timer 0=system default, "
1590 "1=disable, 2=enable (default: 0)");
1591
1592/*
1593 * set bt_coex_active to true, uCode will do kill/defer
1594 * every time the priority line is asserted (BT is sending signals on the
1595 * priority line in the PCIx).
1596 * set bt_coex_active to false, uCode will ignore the BT activity and
1597 * perform the normal operation
1598 *
1599 * User might experience transmit issue on some platform due to WiFi/BT
1600 * co-exist problem. The possible behaviors are:
1601 * Able to scan and finding all the available AP
1602 * Not able to associate with any AP
1603 * On those platforms, WiFi communication can be restored by set
1604 * "bt_coex_active" module parameter to "false"
1605 *
1606 * default: bt_coex_active = true (BT_COEX_ENABLE)
1607 */
1608module_param_named(bt_coex_active, iwlagn_mod_params.bt_coex_active,
1609 bool, S_IRUGO);
1610MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)");
1611
1612module_param_named(led_mode, iwlagn_mod_params.led_mode, int, S_IRUGO);
1613MODULE_PARM_DESC(led_mode, "0=system default, "
1614 "1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)");
1615
1616module_param_named(power_save, iwlagn_mod_params.power_save,
1617 bool, S_IRUGO);
1618MODULE_PARM_DESC(power_save,
1619 "enable WiFi power management (default: disable)");
1620
1621module_param_named(power_level, iwlagn_mod_params.power_level,
1622 int, S_IRUGO);
1623MODULE_PARM_DESC(power_level,
1624 "default power save level (range from 1 - 5, default: 1)");
1625
1626module_param_named(auto_agg, iwlagn_mod_params.auto_agg,
1627 bool, S_IRUGO);
1628MODULE_PARM_DESC(auto_agg,
1629 "enable agg w/o check traffic load (default: enable)");
1630
1631/*
1632 * For now, keep using power level 1 instead of automatically
1633 * adjusting ...
1634 */
1635module_param_named(no_sleep_autoadjust, iwlagn_mod_params.no_sleep_autoadjust,
1636 bool, S_IRUGO);
1637MODULE_PARM_DESC(no_sleep_autoadjust,
1638 "don't automatically adjust sleep level "
1639 "according to maximum network latency (default: true)");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 3780a03f2716..79c0fe06f4db 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -64,6 +64,43 @@
64#define __iwl_agn_h__ 64#define __iwl_agn_h__
65 65
66#include "iwl-dev.h" 66#include "iwl-dev.h"
67#include "iwl-config.h"
68
69/* The first 11 queues (0-10) are used otherwise */
70#define IWLAGN_FIRST_AMPDU_QUEUE 11
71
72/* AUX (TX during scan dwell) queue */
73#define IWL_AUX_QUEUE 10
74
75/* device operations */
76extern struct iwl_lib_ops iwl1000_lib;
77extern struct iwl_lib_ops iwl2000_lib;
78extern struct iwl_lib_ops iwl2030_lib;
79extern struct iwl_lib_ops iwl5000_lib;
80extern struct iwl_lib_ops iwl5150_lib;
81extern struct iwl_lib_ops iwl6000_lib;
82extern struct iwl_lib_ops iwl6030_lib;
83
84
85#define TIME_UNIT 1024
86
87/*****************************************************
88* DRIVER STATUS FUNCTIONS
89******************************************************/
90#define STATUS_RF_KILL_HW 0
91#define STATUS_CT_KILL 1
92#define STATUS_ALIVE 2
93#define STATUS_READY 3
94#define STATUS_GEO_CONFIGURED 4
95#define STATUS_EXIT_PENDING 5
96#define STATUS_STATISTICS 6
97#define STATUS_SCANNING 7
98#define STATUS_SCAN_ABORTING 8
99#define STATUS_SCAN_HW 9
100#define STATUS_FW_ERROR 10
101#define STATUS_CHANNEL_SWITCH_PENDING 11
102#define STATUS_SCAN_COMPLETE 12
103#define STATUS_POWER_PMI 13
67 104
68struct iwl_ucode_capabilities; 105struct iwl_ucode_capabilities;
69 106
@@ -80,12 +117,9 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
80void iwl_down(struct iwl_priv *priv); 117void iwl_down(struct iwl_priv *priv);
81void iwl_cancel_deferred_work(struct iwl_priv *priv); 118void iwl_cancel_deferred_work(struct iwl_priv *priv);
82void iwlagn_prepare_restart(struct iwl_priv *priv); 119void iwlagn_prepare_restart(struct iwl_priv *priv);
83void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb);
84int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode, 120int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode,
85 struct iwl_rx_cmd_buffer *rxb, 121 struct iwl_rx_cmd_buffer *rxb,
86 struct iwl_device_cmd *cmd); 122 struct iwl_device_cmd *cmd);
87void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state);
88void iwl_nic_error(struct iwl_op_mode *op_mode);
89 123
90bool iwl_check_for_ct_kill(struct iwl_priv *priv); 124bool iwl_check_for_ct_kill(struct iwl_priv *priv);
91 125
@@ -103,6 +137,8 @@ int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id,
103 u32 flags, u16 len, const void *data); 137 u32 flags, u16 len, const void *data);
104 138
105/* RXON */ 139/* RXON */
140void iwl_connection_init_rx_config(struct iwl_priv *priv,
141 struct iwl_rxon_context *ctx);
106int iwlagn_set_pan_params(struct iwl_priv *priv); 142int iwlagn_set_pan_params(struct iwl_priv *priv);
107int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx); 143int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
108void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx); 144void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -113,11 +149,15 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
113 u32 changes); 149 u32 changes);
114void iwlagn_config_ht40(struct ieee80211_conf *conf, 150void iwlagn_config_ht40(struct ieee80211_conf *conf,
115 struct iwl_rxon_context *ctx); 151 struct iwl_rxon_context *ctx);
152void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
153void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
154 struct iwl_rxon_context *ctx);
155void iwl_set_flags_for_band(struct iwl_priv *priv,
156 struct iwl_rxon_context *ctx,
157 enum ieee80211_band band,
158 struct ieee80211_vif *vif);
116 159
117/* uCode */ 160/* uCode */
118int iwlagn_rx_calib_result(struct iwl_priv *priv,
119 struct iwl_rx_cmd_buffer *rxb,
120 struct iwl_device_cmd *cmd);
121int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type); 161int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
122void iwl_send_prio_tbl(struct iwl_priv *priv); 162void iwl_send_prio_tbl(struct iwl_priv *priv);
123int iwl_init_alive_start(struct iwl_priv *priv); 163int iwl_init_alive_start(struct iwl_priv *priv);
@@ -128,14 +168,25 @@ int iwl_send_calib_results(struct iwl_priv *priv);
128int iwl_calib_set(struct iwl_priv *priv, 168int iwl_calib_set(struct iwl_priv *priv,
129 const struct iwl_calib_hdr *cmd, int len); 169 const struct iwl_calib_hdr *cmd, int len);
130void iwl_calib_free_results(struct iwl_priv *priv); 170void iwl_calib_free_results(struct iwl_priv *priv);
171int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
172 char **buf, bool display);
173int iwlagn_hw_valid_rtc_data_addr(u32 addr);
131 174
132/* lib */ 175/* lib */
133int iwlagn_send_tx_power(struct iwl_priv *priv); 176int iwlagn_send_tx_power(struct iwl_priv *priv);
134void iwlagn_temperature(struct iwl_priv *priv); 177void iwlagn_temperature(struct iwl_priv *priv);
135u16 iwl_eeprom_calib_version(struct iwl_shared *shrd);
136int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control); 178int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
137void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control); 179void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
138int iwlagn_send_beacon_cmd(struct iwl_priv *priv); 180int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
181int iwl_send_statistics_request(struct iwl_priv *priv,
182 u8 flags, bool clear);
183
184static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
185 struct iwl_priv *priv, enum ieee80211_band band)
186{
187 return priv->hw->wiphy->bands[band];
188}
189
139#ifdef CONFIG_PM_SLEEP 190#ifdef CONFIG_PM_SLEEP
140int iwlagn_send_patterns(struct iwl_priv *priv, 191int iwlagn_send_patterns(struct iwl_priv *priv,
141 struct cfg80211_wowlan *wowlan); 192 struct cfg80211_wowlan *wowlan);
@@ -145,6 +196,7 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan);
145/* rx */ 196/* rx */
146int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); 197int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
147void iwl_setup_rx_handlers(struct iwl_priv *priv); 198void iwl_setup_rx_handlers(struct iwl_priv *priv);
199void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
148 200
149 201
150/* tx */ 202/* tx */
@@ -189,6 +241,31 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
189/* scan */ 241/* scan */
190void iwlagn_post_scan(struct iwl_priv *priv); 242void iwlagn_post_scan(struct iwl_priv *priv);
191void iwlagn_disable_roc(struct iwl_priv *priv); 243void iwlagn_disable_roc(struct iwl_priv *priv);
244int iwl_force_rf_reset(struct iwl_priv *priv, bool external);
245void iwl_init_scan_params(struct iwl_priv *priv);
246int iwl_scan_cancel(struct iwl_priv *priv);
247void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
248void iwl_force_scan_end(struct iwl_priv *priv);
249void iwl_internal_short_hw_scan(struct iwl_priv *priv);
250void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
251void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
252void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
253int __must_check iwl_scan_initiate(struct iwl_priv *priv,
254 struct ieee80211_vif *vif,
255 enum iwl_scan_type scan_type,
256 enum ieee80211_band band);
257
258/* For faster active scanning, scan will move to the next channel if fewer than
259 * PLCP_QUIET_THRESH packets are heard on this channel within
260 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
261 * time if it's a quiet channel (nothing responded to our probe, and there's
262 * no other traffic).
263 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
264#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
265#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
266
267#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
268
192 269
193/* bt coex */ 270/* bt coex */
194void iwlagn_send_advance_bt_config(struct iwl_priv *priv); 271void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
@@ -201,6 +278,12 @@ void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
201void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv); 278void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv);
202void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena); 279void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena);
203 280
281static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
282{
283 return priv->cfg->bt_params &&
284 priv->cfg->bt_params->advanced_bt_coexist;
285}
286
204#ifdef CONFIG_IWLWIFI_DEBUG 287#ifdef CONFIG_IWLWIFI_DEBUG
205const char *iwl_get_tx_fail_reason(u32 status); 288const char *iwl_get_tx_fail_reason(u32 status);
206const char *iwl_get_agg_tx_fail_reason(u16 status); 289const char *iwl_get_agg_tx_fail_reason(u16 status);
@@ -239,8 +322,6 @@ void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
239u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 322u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
240 const u8 *addr, bool is_ap, struct ieee80211_sta *sta); 323 const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
241 324
242void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
243 u8 sta_id, struct iwl_link_quality_cmd *link_cmd);
244int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 325int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
245 struct iwl_link_quality_cmd *lq, u8 flags, bool init); 326 struct iwl_link_quality_cmd *lq, u8 flags, bool init);
246int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, 327int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
@@ -248,6 +329,9 @@ int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
248int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 329int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
249 struct ieee80211_sta *sta); 330 struct ieee80211_sta *sta);
250 331
332bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
333 struct iwl_rxon_context *ctx,
334 struct ieee80211_sta_ht_cap *ht_cap);
251 335
252static inline int iwl_sta_id(struct ieee80211_sta *sta) 336static inline int iwl_sta_id(struct ieee80211_sta *sta)
253{ 337{
@@ -305,9 +389,6 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
305 return cpu_to_le32(flags|(u32)rate); 389 return cpu_to_le32(flags|(u32)rate);
306} 390}
307 391
308/* eeprom */
309void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac);
310
311extern int iwl_alive_start(struct iwl_priv *priv); 392extern int iwl_alive_start(struct iwl_priv *priv);
312/* svtool */ 393/* svtool */
313#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE 394#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
@@ -386,13 +467,35 @@ static inline int iwl_is_ready_rf(struct iwl_priv *priv)
386 return iwl_is_ready(priv); 467 return iwl_is_ready(priv);
387} 468}
388 469
470static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
471{
472 if (state)
473 set_bit(STATUS_POWER_PMI, &priv->status);
474 else
475 clear_bit(STATUS_POWER_PMI, &priv->status);
476 iwl_trans_set_pmi(priv->trans, state);
477}
478
479#ifdef CONFIG_IWLWIFI_DEBUGFS
480int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
481void iwl_dbgfs_unregister(struct iwl_priv *priv);
482#else
483static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
484{
485 return 0;
486}
487static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
488{
489}
490#endif /* CONFIG_IWLWIFI_DEBUGFS */
491
389#ifdef CONFIG_IWLWIFI_DEBUG 492#ifdef CONFIG_IWLWIFI_DEBUG
390#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \ 493#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \
391do { \ 494do { \
392 if (!iwl_is_rfkill((m))) \ 495 if (!iwl_is_rfkill((m))) \
393 IWL_ERR(m, fmt, ##args); \ 496 IWL_ERR(m, fmt, ##args); \
394 else \ 497 else \
395 __iwl_err(trans(m)->dev, true, \ 498 __iwl_err((m)->dev, true, \
396 !iwl_have_debug_level(IWL_DL_RADIO), \ 499 !iwl_have_debug_level(IWL_DL_RADIO), \
397 fmt, ##args); \ 500 fmt, ##args); \
398} while (0) 501} while (0)
@@ -402,8 +505,98 @@ do { \
402 if (!iwl_is_rfkill((m))) \ 505 if (!iwl_is_rfkill((m))) \
403 IWL_ERR(m, fmt, ##args); \ 506 IWL_ERR(m, fmt, ##args); \
404 else \ 507 else \
405 __iwl_err(trans(m)->dev, true, true, fmt, ##args); \ 508 __iwl_err((m)->dev, true, true, fmt, ##args); \
406} while (0) 509} while (0)
407#endif /* CONFIG_IWLWIFI_DEBUG */ 510#endif /* CONFIG_IWLWIFI_DEBUG */
408 511
512extern const char *iwl_dvm_cmd_strings[REPLY_MAX];
513
514static inline const char *iwl_dvm_get_cmd_string(u8 cmd)
515{
516 const char *s = iwl_dvm_cmd_strings[cmd];
517 if (s)
518 return s;
519 return "UNKNOWN";
520}
521
522/* API method exported for mvm hybrid state */
523void iwl_setup_deferred_work(struct iwl_priv *priv);
524int iwl_send_wimax_coex(struct iwl_priv *priv);
525int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
526void iwl_option_config(struct iwl_priv *priv);
527void iwl_set_hw_params(struct iwl_priv *priv);
528void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags);
529int iwl_init_drv(struct iwl_priv *priv);
530void iwl_uninit_drv(struct iwl_priv *priv);
531void iwl_send_bt_config(struct iwl_priv *priv);
532void iwl_rf_kill_ct_config(struct iwl_priv *priv);
533int iwl_setup_interface(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
534void iwl_teardown_interface(struct iwl_priv *priv,
535 struct ieee80211_vif *vif,
536 bool mode_change);
537int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
538void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
539void iwlagn_check_needed_chains(struct iwl_priv *priv,
540 struct iwl_rxon_context *ctx,
541 struct ieee80211_bss_conf *bss_conf);
542void iwlagn_chain_noise_reset(struct iwl_priv *priv);
543int iwlagn_update_beacon(struct iwl_priv *priv,
544 struct ieee80211_vif *vif);
545void iwl_tt_handler(struct iwl_priv *priv);
546void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode);
547void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue);
548void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state);
549void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb);
550void iwl_nic_error(struct iwl_op_mode *op_mode);
551void iwl_cmd_queue_full(struct iwl_op_mode *op_mode);
552void iwl_nic_config(struct iwl_op_mode *op_mode);
553int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
554 struct ieee80211_sta *sta, bool set);
555void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
556 enum ieee80211_rssi_event rssi_event);
557int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw);
558int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw);
559void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop);
560void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue);
561void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
562 struct ieee80211_channel_switch *ch_switch);
563int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
564 struct ieee80211_vif *vif,
565 struct ieee80211_sta *sta,
566 enum ieee80211_sta_state old_state,
567 enum ieee80211_sta_state new_state);
568int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
569 struct ieee80211_vif *vif,
570 enum ieee80211_ampdu_mlme_action action,
571 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
572 u8 buf_size);
573int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
574 struct ieee80211_vif *vif,
575 struct cfg80211_scan_request *req);
576void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
577 struct ieee80211_vif *vif,
578 enum sta_notify_cmd cmd,
579 struct ieee80211_sta *sta);
580void iwlagn_configure_filter(struct ieee80211_hw *hw,
581 unsigned int changed_flags,
582 unsigned int *total_flags,
583 u64 multicast);
584int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
585 struct ieee80211_vif *vif, u16 queue,
586 const struct ieee80211_tx_queue_params *params);
587void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
588 struct ieee80211_vif *vif,
589 struct cfg80211_gtk_rekey_data *data);
590void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
591 struct ieee80211_vif *vif,
592 struct ieee80211_key_conf *keyconf,
593 struct ieee80211_sta *sta,
594 u32 iv32, u16 *phase1key);
595int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
596 struct ieee80211_vif *vif,
597 struct ieee80211_sta *sta,
598 struct ieee80211_key_conf *key);
599void iwlagn_mac_stop(struct ieee80211_hw *hw);
600void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
601int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
409#endif /* __iwl_agn_h__ */ 602#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 9ed73e5154be..83a6930f3658 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -1877,9 +1877,16 @@ struct iwl_bt_cmd {
1877 1877
1878#define IWLAGN_BT3_T7_DEFAULT 1 1878#define IWLAGN_BT3_T7_DEFAULT 1
1879 1879
1880enum iwl_bt_kill_idx {
1881 IWL_BT_KILL_DEFAULT = 0,
1882 IWL_BT_KILL_OVERRIDE = 1,
1883 IWL_BT_KILL_REDUCE = 2,
1884};
1885
1880#define IWLAGN_BT_KILL_ACK_MASK_DEFAULT cpu_to_le32(0xffff0000) 1886#define IWLAGN_BT_KILL_ACK_MASK_DEFAULT cpu_to_le32(0xffff0000)
1881#define IWLAGN_BT_KILL_CTS_MASK_DEFAULT cpu_to_le32(0xffff0000) 1887#define IWLAGN_BT_KILL_CTS_MASK_DEFAULT cpu_to_le32(0xffff0000)
1882#define IWLAGN_BT_KILL_ACK_CTS_MASK_SCO cpu_to_le32(0xffffffff) 1888#define IWLAGN_BT_KILL_ACK_CTS_MASK_SCO cpu_to_le32(0xffffffff)
1889#define IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE cpu_to_le32(0)
1883 1890
1884#define IWLAGN_BT3_PRIO_SAMPLE_DEFAULT 2 1891#define IWLAGN_BT3_PRIO_SAMPLE_DEFAULT 2
1885 1892
@@ -1891,7 +1898,7 @@ struct iwl_bt_cmd {
1891#define IWLAGN_BT_VALID_3W_TIMERS cpu_to_le16(BIT(3)) 1898#define IWLAGN_BT_VALID_3W_TIMERS cpu_to_le16(BIT(3))
1892#define IWLAGN_BT_VALID_KILL_ACK_MASK cpu_to_le16(BIT(4)) 1899#define IWLAGN_BT_VALID_KILL_ACK_MASK cpu_to_le16(BIT(4))
1893#define IWLAGN_BT_VALID_KILL_CTS_MASK cpu_to_le16(BIT(5)) 1900#define IWLAGN_BT_VALID_KILL_CTS_MASK cpu_to_le16(BIT(5))
1894#define IWLAGN_BT_VALID_BT4_TIMES cpu_to_le16(BIT(6)) 1901#define IWLAGN_BT_VALID_REDUCED_TX_PWR cpu_to_le16(BIT(6))
1895#define IWLAGN_BT_VALID_3W_LUT cpu_to_le16(BIT(7)) 1902#define IWLAGN_BT_VALID_3W_LUT cpu_to_le16(BIT(7))
1896 1903
1897#define IWLAGN_BT_ALL_VALID_MSK (IWLAGN_BT_VALID_ENABLE_FLAGS | \ 1904#define IWLAGN_BT_ALL_VALID_MSK (IWLAGN_BT_VALID_ENABLE_FLAGS | \
@@ -1900,9 +1907,11 @@ struct iwl_bt_cmd {
1900 IWLAGN_BT_VALID_3W_TIMERS | \ 1907 IWLAGN_BT_VALID_3W_TIMERS | \
1901 IWLAGN_BT_VALID_KILL_ACK_MASK | \ 1908 IWLAGN_BT_VALID_KILL_ACK_MASK | \
1902 IWLAGN_BT_VALID_KILL_CTS_MASK | \ 1909 IWLAGN_BT_VALID_KILL_CTS_MASK | \
1903 IWLAGN_BT_VALID_BT4_TIMES | \ 1910 IWLAGN_BT_VALID_REDUCED_TX_PWR | \
1904 IWLAGN_BT_VALID_3W_LUT) 1911 IWLAGN_BT_VALID_3W_LUT)
1905 1912
1913#define IWLAGN_BT_DECISION_LUT_SIZE 12
1914
1906struct iwl_basic_bt_cmd { 1915struct iwl_basic_bt_cmd {
1907 u8 flags; 1916 u8 flags;
1908 u8 ledtime; /* unused */ 1917 u8 ledtime; /* unused */
@@ -1913,12 +1922,13 @@ struct iwl_basic_bt_cmd {
1913 u8 bt3_prio_sample_time; 1922 u8 bt3_prio_sample_time;
1914 u8 bt3_timer_t2_value; 1923 u8 bt3_timer_t2_value;
1915 __le16 bt4_reaction_time; /* unused */ 1924 __le16 bt4_reaction_time; /* unused */
1916 __le32 bt3_lookup_table[12]; 1925 __le32 bt3_lookup_table[IWLAGN_BT_DECISION_LUT_SIZE];
1917 __le16 bt4_decision_time; /* unused */ 1926 u8 reduce_txpower;
1927 u8 reserved;
1918 __le16 valid; 1928 __le16 valid;
1919}; 1929};
1920 1930
1921struct iwl6000_bt_cmd { 1931struct iwl_bt_cmd_v1 {
1922 struct iwl_basic_bt_cmd basic; 1932 struct iwl_basic_bt_cmd basic;
1923 u8 prio_boost; 1933 u8 prio_boost;
1924 /* 1934 /*
@@ -1929,7 +1939,7 @@ struct iwl6000_bt_cmd {
1929 __le16 rx_prio_boost; /* SW boost of WiFi rx priority */ 1939 __le16 rx_prio_boost; /* SW boost of WiFi rx priority */
1930}; 1940};
1931 1941
1932struct iwl2000_bt_cmd { 1942struct iwl_bt_cmd_v2 {
1933 struct iwl_basic_bt_cmd basic; 1943 struct iwl_basic_bt_cmd basic;
1934 __le32 prio_boost; 1944 __le32 prio_boost;
1935 /* 1945 /*
@@ -3634,6 +3644,9 @@ enum iwl_bt_coex_profile_traffic_load {
3634 (0x3<<BT_UART_MSG_2_FRAME7RESERVED_POS) 3644 (0x3<<BT_UART_MSG_2_FRAME7RESERVED_POS)
3635 3645
3636 3646
3647#define BT_ENABLE_REDUCED_TXPOWER_THRESHOLD (-62)
3648#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65)
3649
3637struct iwl_bt_uart_msg { 3650struct iwl_bt_uart_msg {
3638 u8 header; 3651 u8 header;
3639 u8 frame1; 3652 u8 frame1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index b515d657a0ad..67b28aa7f9be 100644
--- a/drivers/net/wireless/iwlwifi/iwl-shared.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -60,136 +60,29 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63#ifndef __iwl_shared_h__ 63#ifndef __IWL_CONFIG_H__
64#define __iwl_shared_h__ 64#define __IWL_CONFIG_H__
65 65
66#include <linux/types.h> 66#include <linux/types.h>
67#include <linux/spinlock.h>
68#include <linux/gfp.h>
69#include <net/mac80211.h> 67#include <net/mac80211.h>
70 68
71#include "iwl-commands.h"
72#include "iwl-fw.h"
73 69
74/** 70enum iwl_device_family {
75 * DOC: shared area - role and goal 71 IWL_DEVICE_FAMILY_UNDEFINED,
76 * 72 IWL_DEVICE_FAMILY_1000,
77 * The shared area contains all the data exported by the upper layer to the 73 IWL_DEVICE_FAMILY_100,
78 * other layers. Since the bus and transport layer shouldn't dereference 74 IWL_DEVICE_FAMILY_2000,
79 * iwl_priv, all the data needed by the upper layer and the transport / bus 75 IWL_DEVICE_FAMILY_2030,
80 * layer must be here. 76 IWL_DEVICE_FAMILY_105,
81 * The shared area also holds pointer to all the other layers. This allows a 77 IWL_DEVICE_FAMILY_135,
82 * layer to call a function from another layer. 78 IWL_DEVICE_FAMILY_5000,
83 * 79 IWL_DEVICE_FAMILY_5150,
84 * NOTE: All the layers hold a pointer to the shared area which must be shrd. 80 IWL_DEVICE_FAMILY_6000,
85 * A few macros assume that (_m)->shrd points to the shared area no matter 81 IWL_DEVICE_FAMILY_6000i,
86 * what _m is. 82 IWL_DEVICE_FAMILY_6005,
87 * 83 IWL_DEVICE_FAMILY_6030,
88 * gets notifications about enumeration, suspend, resume. 84 IWL_DEVICE_FAMILY_6050,
89 * For the moment, the bus layer is not a linux kernel module as itself, and 85 IWL_DEVICE_FAMILY_6150,
90 * the module_init function of the driver must call the bus specific
91 * registration functions. These functions are listed at the end of this file.
92 * For the moment, there is only one implementation of this interface: PCI-e.
93 * This implementation is iwl-pci.c
94 */
95
96struct iwl_priv;
97struct iwl_trans;
98struct iwl_sensitivity_ranges;
99struct iwl_trans_ops;
100
101#define DRV_NAME "iwlwifi"
102#define IWLWIFI_VERSION "in-tree:"
103#define DRV_COPYRIGHT "Copyright(c) 2003-2012 Intel Corporation"
104#define DRV_AUTHOR "<ilw@linux.intel.com>"
105
106extern struct iwl_mod_params iwlagn_mod_params;
107
108#define IWL_DISABLE_HT_ALL BIT(0)
109#define IWL_DISABLE_HT_TXAGG BIT(1)
110#define IWL_DISABLE_HT_RXAGG BIT(2)
111
112/**
113 * struct iwl_mod_params
114 *
115 * Holds the module parameters
116 *
117 * @sw_crypto: using hardware encryption, default = 0
118 * @disable_11n: disable 11n capabilities, default = 0,
119 * use IWL_DISABLE_HT_* constants
120 * @amsdu_size_8K: enable 8K amsdu size, default = 1
121 * @antenna: both antennas (use diversity), default = 0
122 * @restart_fw: restart firmware, default = 1
123 * @plcp_check: enable plcp health check, default = true
124 * @ack_check: disable ack health check, default = false
125 * @wd_disable: enable stuck queue check, default = 0
126 * @bt_coex_active: enable bt coex, default = true
127 * @led_mode: system default, default = 0
128 * @no_sleep_autoadjust: disable autoadjust, default = true
129 * @power_save: disable power save, default = false
130 * @power_level: power level, default = 1
131 * @debug_level: levels are IWL_DL_*
132 * @ant_coupling: antenna coupling in dB, default = 0
133 * @bt_ch_announce: BT channel inhibition, default = enable
134 * @wanted_ucode_alternative: ucode alternative to use, default = 1
135 * @auto_agg: enable agg. without check, default = true
136 */
137struct iwl_mod_params {
138 int sw_crypto;
139 unsigned int disable_11n;
140 int amsdu_size_8K;
141 int antenna;
142 int restart_fw;
143 bool plcp_check;
144 bool ack_check;
145 int wd_disable;
146 bool bt_coex_active;
147 int led_mode;
148 bool no_sleep_autoadjust;
149 bool power_save;
150 int power_level;
151 u32 debug_level;
152 int ant_coupling;
153 bool bt_ch_announce;
154 int wanted_ucode_alternative;
155 bool auto_agg;
156};
157
158/**
159 * struct iwl_hw_params
160 *
161 * Holds the module parameters
162 *
163 * @num_ampdu_queues: num of ampdu queues
164 * @tx_chains_num: Number of TX chains
165 * @rx_chains_num: Number of RX chains
166 * @valid_tx_ant: usable antennas for TX
167 * @valid_rx_ant: usable antennas for RX
168 * @ht40_channel: is 40MHz width possible: BIT(IEEE80211_BAND_XXX)
169 * @sku: sku read from EEPROM
170 * @rx_page_order: Rx buffer page order
171 * @ct_kill_threshold: temperature threshold - in hw dependent unit
172 * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
173 * relevant for 1000, 6000 and up
174 * @wd_timeout: TX queues watchdog timeout
175 * @struct iwl_sensitivity_ranges: range of sensitivity values
176 * @use_rts_for_aggregation: use rts/cts protection for HT traffic
177 */
178struct iwl_hw_params {
179 u8 num_ampdu_queues;
180 u8 tx_chains_num;
181 u8 rx_chains_num;
182 u8 valid_tx_ant;
183 u8 valid_rx_ant;
184 u8 ht40_channel;
185 bool use_rts_for_aggregation;
186 u16 sku;
187 u32 rx_page_order;
188 u32 ct_kill_threshold;
189 u32 ct_kill_exit_threshold;
190 unsigned int wd_timeout;
191
192 const struct iwl_sensitivity_ranges *sens;
193}; 86};
194 87
195/* 88/*
@@ -209,6 +102,34 @@ enum iwl_led_mode {
209}; 102};
210 103
211/* 104/*
105 * This is the threshold value of plcp error rate per 100mSecs. It is
106 * used to set and check for the validity of plcp_delta.
107 */
108#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN 1
109#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF 50
110#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF 100
111#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF 200
112#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX 255
113#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE 0
114
115/* TX queue watchdog timeouts in mSecs */
116#define IWL_WATCHHDOG_DISABLED 0
117#define IWL_DEF_WD_TIMEOUT 2000
118#define IWL_LONG_WD_TIMEOUT 10000
119#define IWL_MAX_WD_TIMEOUT 120000
120
121/* Antenna presence definitions */
122#define ANT_NONE 0x0
123#define ANT_A BIT(0)
124#define ANT_B BIT(1)
125#define ANT_C BIT(2)
126#define ANT_AB (ANT_A | ANT_B)
127#define ANT_AC (ANT_A | ANT_C)
128#define ANT_BC (ANT_B | ANT_C)
129#define ANT_ABC (ANT_A | ANT_B | ANT_C)
130
131
132/*
212 * @max_ll_items: max number of OTP blocks 133 * @max_ll_items: max number of OTP blocks
213 * @shadow_ram_support: shadow support for OTP memory 134 * @shadow_ram_support: shadow support for OTP memory
214 * @led_compensation: compensate on the led on/off time per HW according 135 * @led_compensation: compensate on the led on/off time per HW according
@@ -217,7 +138,6 @@ enum iwl_led_mode {
217 * @chain_noise_num_beacons: number of beacons used to compute chain noise 138 * @chain_noise_num_beacons: number of beacons used to compute chain noise
218 * @adv_thermal_throttle: support advance thermal throttle 139 * @adv_thermal_throttle: support advance thermal throttle
219 * @support_ct_kill_exit: support ct kill exit condition 140 * @support_ct_kill_exit: support ct kill exit condition
220 * @support_wimax_coexist: support wimax/wifi co-exist
221 * @plcp_delta_threshold: plcp error rate threshold used to trigger 141 * @plcp_delta_threshold: plcp error rate threshold used to trigger
222 * radio tuning when there is a high receiving plcp error rate 142 * radio tuning when there is a high receiving plcp error rate
223 * @chain_noise_scale: default chain noise scale used for gain computation 143 * @chain_noise_scale: default chain noise scale used for gain computation
@@ -226,12 +146,10 @@ enum iwl_led_mode {
226 * @shadow_reg_enable: HW shadhow register bit 146 * @shadow_reg_enable: HW shadhow register bit
227 * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up 147 * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
228 * @no_idle_support: do not support idle mode 148 * @no_idle_support: do not support idle mode
229 * wd_disable: disable watchdog timer
230 */ 149 */
231struct iwl_base_params { 150struct iwl_base_params {
232 int eeprom_size; 151 int eeprom_size;
233 int num_of_queues; /* def: HW dependent */ 152 int num_of_queues; /* def: HW dependent */
234 int num_of_ampdu_queues;/* def: HW dependent */
235 /* for iwl_apm_init() */ 153 /* for iwl_apm_init() */
236 u32 pll_cfg_val; 154 u32 pll_cfg_val;
237 155
@@ -240,7 +158,6 @@ struct iwl_base_params {
240 u16 led_compensation; 158 u16 led_compensation;
241 bool adv_thermal_throttle; 159 bool adv_thermal_throttle;
242 bool support_ct_kill_exit; 160 bool support_ct_kill_exit;
243 const bool support_wimax_coexist;
244 u8 plcp_delta_threshold; 161 u8 plcp_delta_threshold;
245 s32 chain_noise_scale; 162 s32 chain_noise_scale;
246 unsigned int wd_timeout; 163 unsigned int wd_timeout;
@@ -248,7 +165,6 @@ struct iwl_base_params {
248 const bool shadow_reg_enable; 165 const bool shadow_reg_enable;
249 const bool hd_v2; 166 const bool hd_v2;
250 const bool no_idle_support; 167 const bool no_idle_support;
251 const bool wd_disable;
252}; 168};
253 169
254/* 170/*
@@ -292,28 +208,21 @@ struct iwl_ht_params {
292 * @eeprom_ver: EEPROM version 208 * @eeprom_ver: EEPROM version
293 * @eeprom_calib_ver: EEPROM calibration version 209 * @eeprom_calib_ver: EEPROM calibration version
294 * @lib: pointer to the lib ops 210 * @lib: pointer to the lib ops
295 * @additional_nic_config: additional nic configuration
296 * @base_params: pointer to basic parameters 211 * @base_params: pointer to basic parameters
297 * @ht_params: point to ht patameters 212 * @ht_params: point to ht patameters
298 * @bt_params: pointer to bt parameters 213 * @bt_params: pointer to bt parameters
299 * @need_temp_offset_calib: need to perform temperature offset calibration 214 * @need_temp_offset_calib: need to perform temperature offset calibration
300 * @no_xtal_calib: some devices do not need crystal calibration data, 215 * @no_xtal_calib: some devices do not need crystal calibration data,
301 * don't send it to those 216 * don't send it to those
302 * @scan_rx_antennas: available antenna for scan operation
303 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) 217 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
304 * @adv_pm: advance power management 218 * @adv_pm: advance power management
305 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity 219 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
306 * @internal_wimax_coex: internal wifi/wimax combo device 220 * @internal_wimax_coex: internal wifi/wimax combo device
307 * @iq_invert: I/Q inversion
308 * @temp_offset_v2: support v2 of temperature offset calibration 221 * @temp_offset_v2: support v2 of temperature offset calibration
309 * 222 *
310 * We enable the driver to be backward compatible wrt API version. The 223 * We enable the driver to be backward compatible wrt. hardware features.
311 * driver specifies which APIs it supports (with @ucode_api_max being the 224 * API differences in uCode shouldn't be handled here but through TLVs
312 * highest and @ucode_api_min the lowest). Firmware will only be loaded if 225 * and/or the uCode API version instead.
313 * it has a supported API version.
314 *
315 * The ideal usage of this infrastructure is to treat a new ucode API
316 * release as a new hardware revision.
317 */ 226 */
318struct iwl_cfg { 227struct iwl_cfg {
319 /* params specific to an individual device within a device family */ 228 /* params specific to an individual device within a device family */
@@ -322,14 +231,13 @@ struct iwl_cfg {
322 const unsigned int ucode_api_max; 231 const unsigned int ucode_api_max;
323 const unsigned int ucode_api_ok; 232 const unsigned int ucode_api_ok;
324 const unsigned int ucode_api_min; 233 const unsigned int ucode_api_min;
234 const enum iwl_device_family device_family;
325 const u32 max_data_size; 235 const u32 max_data_size;
326 const u32 max_inst_size; 236 const u32 max_inst_size;
327 u8 valid_tx_ant; 237 u8 valid_tx_ant;
328 u8 valid_rx_ant; 238 u8 valid_rx_ant;
329 u16 eeprom_ver; 239 u16 eeprom_ver;
330 u16 eeprom_calib_ver; 240 u16 eeprom_calib_ver;
331 const struct iwl_lib_ops *lib;
332 void (*additional_nic_config)(struct iwl_priv *priv);
333 /* params not likely to change within a device family */ 241 /* params not likely to change within a device family */
334 const struct iwl_base_params *base_params; 242 const struct iwl_base_params *base_params;
335 /* params likely to change within a device family */ 243 /* params likely to change within a device family */
@@ -337,99 +245,11 @@ struct iwl_cfg {
337 const struct iwl_bt_params *bt_params; 245 const struct iwl_bt_params *bt_params;
338 const bool need_temp_offset_calib; /* if used set to true */ 246 const bool need_temp_offset_calib; /* if used set to true */
339 const bool no_xtal_calib; 247 const bool no_xtal_calib;
340 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
341 enum iwl_led_mode led_mode; 248 enum iwl_led_mode led_mode;
342 const bool adv_pm; 249 const bool adv_pm;
343 const bool rx_with_siso_diversity; 250 const bool rx_with_siso_diversity;
344 const bool internal_wimax_coex; 251 const bool internal_wimax_coex;
345 const bool iq_invert;
346 const bool temp_offset_v2; 252 const bool temp_offset_v2;
347}; 253};
348 254
349/** 255#endif /* __IWL_CONFIG_H__ */
350 * struct iwl_shared - shared fields for all the layers of the driver
351 *
352 * @status: STATUS_*
353 * @wowlan: are we running wowlan uCode
354 * @valid_contexts: microcode/device supports multiple contexts
355 * @bus: pointer to the bus layer data
356 * @cfg: see struct iwl_cfg
357 * @priv: pointer to the upper layer data
358 * @trans: pointer to the transport layer data
359 * @nic: pointer to the nic data
360 * @hw_params: see struct iwl_hw_params
361 * @lock: protect general shared data
362 * @eeprom: pointer to the eeprom/OTP image
363 * @ucode_type: indicator of loaded ucode image
364 * @device_pointers: pointers to ucode event tables
365 */
366struct iwl_shared {
367 unsigned long status;
368 u8 valid_contexts;
369
370 const struct iwl_cfg *cfg;
371 struct iwl_trans *trans;
372 void *drv;
373 struct iwl_hw_params hw_params;
374 const struct iwl_fw *fw;
375
376 /* eeprom -- this is in the card's little endian byte order */
377 u8 *eeprom;
378
379 /* ucode related variables */
380 enum iwl_ucode_type ucode_type;
381
382 struct {
383 u32 error_event_table;
384 u32 log_event_table;
385 } device_pointers;
386
387};
388
389/*Whatever _m is (iwl_trans, iwl_priv, these macros will work */
390#define cfg(_m) ((_m)->shrd->cfg)
391#define trans(_m) ((_m)->shrd->trans)
392#define hw_params(_m) ((_m)->shrd->hw_params)
393
394static inline bool iwl_have_debug_level(u32 level)
395{
396 return iwlagn_mod_params.debug_level & level;
397}
398
399enum iwl_rxon_context_id {
400 IWL_RXON_CTX_BSS,
401 IWL_RXON_CTX_PAN,
402
403 NUM_IWL_RXON_CTX
404};
405
406int iwlagn_hw_valid_rtc_data_addr(u32 addr);
407const char *get_cmd_string(u8 cmd);
408
409#define IWL_CMD(x) case x: return #x
410
411/*****************************************************
412* DRIVER STATUS FUNCTIONS
413******************************************************/
414#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
415/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
416#define STATUS_INT_ENABLED 2
417#define STATUS_RF_KILL_HW 3
418#define STATUS_CT_KILL 4
419#define STATUS_INIT 5
420#define STATUS_ALIVE 6
421#define STATUS_READY 7
422#define STATUS_TEMPERATURE 8
423#define STATUS_GEO_CONFIGURED 9
424#define STATUS_EXIT_PENDING 10
425#define STATUS_STATISTICS 12
426#define STATUS_SCANNING 13
427#define STATUS_SCAN_ABORTING 14
428#define STATUS_SCAN_HW 15
429#define STATUS_POWER_PMI 16
430#define STATUS_FW_ERROR 17
431#define STATUS_DEVICE_ENABLED 18
432#define STATUS_CHANNEL_SWITCH_PENDING 19
433#define STATUS_SCAN_COMPLETE 20
434
435#endif /* #__iwl_shared_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
deleted file mode 100644
index 46490d3b95b9..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ /dev/null
@@ -1,1480 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <net/mac80211.h>
35
36#include "iwl-eeprom.h"
37#include "iwl-debug.h"
38#include "iwl-core.h"
39#include "iwl-io.h"
40#include "iwl-power.h"
41#include "iwl-shared.h"
42#include "iwl-agn.h"
43#include "iwl-trans.h"
44
45const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
46
47#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
48#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
49static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
50 struct ieee80211_sta_ht_cap *ht_info,
51 enum ieee80211_band band)
52{
53 u16 max_bit_rate = 0;
54 u8 rx_chains_num = hw_params(priv).rx_chains_num;
55 u8 tx_chains_num = hw_params(priv).tx_chains_num;
56
57 ht_info->cap = 0;
58 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
59
60 ht_info->ht_supported = true;
61
62 if (cfg(priv)->ht_params &&
63 cfg(priv)->ht_params->ht_greenfield_support)
64 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
65 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
66 max_bit_rate = MAX_BIT_RATE_20_MHZ;
67 if (hw_params(priv).ht40_channel & BIT(band)) {
68 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
69 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
70 ht_info->mcs.rx_mask[4] = 0x01;
71 max_bit_rate = MAX_BIT_RATE_40_MHZ;
72 }
73
74 if (iwlagn_mod_params.amsdu_size_8K)
75 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
76
77 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
78 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
79
80 ht_info->mcs.rx_mask[0] = 0xFF;
81 if (rx_chains_num >= 2)
82 ht_info->mcs.rx_mask[1] = 0xFF;
83 if (rx_chains_num >= 3)
84 ht_info->mcs.rx_mask[2] = 0xFF;
85
86 /* Highest supported Rx data rate */
87 max_bit_rate *= rx_chains_num;
88 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
89 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
90
91 /* Tx MCS capabilities */
92 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
93 if (tx_chains_num != rx_chains_num) {
94 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
95 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
96 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
97 }
98}
99
100/**
101 * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
102 */
103int iwl_init_geos(struct iwl_priv *priv)
104{
105 struct iwl_channel_info *ch;
106 struct ieee80211_supported_band *sband;
107 struct ieee80211_channel *channels;
108 struct ieee80211_channel *geo_ch;
109 struct ieee80211_rate *rates;
110 int i = 0;
111 s8 max_tx_power = IWLAGN_TX_POWER_TARGET_POWER_MIN;
112
113 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
114 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
115 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
116 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
117 return 0;
118 }
119
120 channels = kcalloc(priv->channel_count,
121 sizeof(struct ieee80211_channel), GFP_KERNEL);
122 if (!channels)
123 return -ENOMEM;
124
125 rates = kcalloc(IWL_RATE_COUNT_LEGACY, sizeof(struct ieee80211_rate),
126 GFP_KERNEL);
127 if (!rates) {
128 kfree(channels);
129 return -ENOMEM;
130 }
131
132 /* 5.2GHz channels start after the 2.4GHz channels */
133 sband = &priv->bands[IEEE80211_BAND_5GHZ];
134 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
135 /* just OFDM */
136 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
137 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
138
139 if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
140 iwl_init_ht_hw_capab(priv, &sband->ht_cap,
141 IEEE80211_BAND_5GHZ);
142
143 sband = &priv->bands[IEEE80211_BAND_2GHZ];
144 sband->channels = channels;
145 /* OFDM & CCK */
146 sband->bitrates = rates;
147 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
148
149 if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
150 iwl_init_ht_hw_capab(priv, &sband->ht_cap,
151 IEEE80211_BAND_2GHZ);
152
153 priv->ieee_channels = channels;
154 priv->ieee_rates = rates;
155
156 for (i = 0; i < priv->channel_count; i++) {
157 ch = &priv->channel_info[i];
158
159 /* FIXME: might be removed if scan is OK */
160 if (!is_channel_valid(ch))
161 continue;
162
163 sband = &priv->bands[ch->band];
164
165 geo_ch = &sband->channels[sband->n_channels++];
166
167 geo_ch->center_freq =
168 ieee80211_channel_to_frequency(ch->channel, ch->band);
169 geo_ch->max_power = ch->max_power_avg;
170 geo_ch->max_antenna_gain = 0xff;
171 geo_ch->hw_value = ch->channel;
172
173 if (is_channel_valid(ch)) {
174 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
175 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
176
177 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
178 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
179
180 if (ch->flags & EEPROM_CHANNEL_RADAR)
181 geo_ch->flags |= IEEE80211_CHAN_RADAR;
182
183 geo_ch->flags |= ch->ht40_extension_channel;
184
185 if (ch->max_power_avg > max_tx_power)
186 max_tx_power = ch->max_power_avg;
187 } else {
188 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
189 }
190
191 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
192 ch->channel, geo_ch->center_freq,
193 is_channel_a_band(ch) ? "5.2" : "2.4",
194 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
195 "restricted" : "valid",
196 geo_ch->flags);
197 }
198
199 priv->tx_power_device_lmt = max_tx_power;
200 priv->tx_power_user_lmt = max_tx_power;
201 priv->tx_power_next = max_tx_power;
202
203 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
204 hw_params(priv).sku & EEPROM_SKU_CAP_BAND_52GHZ) {
205 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
206 "Please send your %s to maintainer.\n",
207 trans(priv)->hw_id_str);
208 hw_params(priv).sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
209 }
210
211 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
212 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
213 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
214
215 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
216
217 return 0;
218}
219
220/*
221 * iwl_free_geos - undo allocations in iwl_init_geos
222 */
223void iwl_free_geos(struct iwl_priv *priv)
224{
225 kfree(priv->ieee_channels);
226 kfree(priv->ieee_rates);
227 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
228}
229
230static bool iwl_is_channel_extension(struct iwl_priv *priv,
231 enum ieee80211_band band,
232 u16 channel, u8 extension_chan_offset)
233{
234 const struct iwl_channel_info *ch_info;
235
236 ch_info = iwl_get_channel_info(priv, band, channel);
237 if (!is_channel_valid(ch_info))
238 return false;
239
240 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
241 return !(ch_info->ht40_extension_channel &
242 IEEE80211_CHAN_NO_HT40PLUS);
243 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
244 return !(ch_info->ht40_extension_channel &
245 IEEE80211_CHAN_NO_HT40MINUS);
246
247 return false;
248}
249
250bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
251 struct iwl_rxon_context *ctx,
252 struct ieee80211_sta_ht_cap *ht_cap)
253{
254 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
255 return false;
256
257 /*
258 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
259 * the bit will not set if it is pure 40MHz case
260 */
261 if (ht_cap && !ht_cap->ht_supported)
262 return false;
263
264#ifdef CONFIG_IWLWIFI_DEBUGFS
265 if (priv->disable_ht40)
266 return false;
267#endif
268
269 return iwl_is_channel_extension(priv, priv->band,
270 le16_to_cpu(ctx->staging.channel),
271 ctx->ht.extension_chan_offset);
272}
273
274static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
275{
276 u16 new_val;
277 u16 beacon_factor;
278
279 /*
280 * If mac80211 hasn't given us a beacon interval, program
281 * the default into the device (not checking this here
282 * would cause the adjustment below to return the maximum
283 * value, which may break PAN.)
284 */
285 if (!beacon_val)
286 return DEFAULT_BEACON_INTERVAL;
287
288 /*
289 * If the beacon interval we obtained from the peer
290 * is too large, we'll have to wake up more often
291 * (and in IBSS case, we'll beacon too much)
292 *
293 * For example, if max_beacon_val is 4096, and the
294 * requested beacon interval is 7000, we'll have to
295 * use 3500 to be able to wake up on the beacons.
296 *
297 * This could badly influence beacon detection stats.
298 */
299
300 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
301 new_val = beacon_val / beacon_factor;
302
303 if (!new_val)
304 new_val = max_beacon_val;
305
306 return new_val;
307}
308
309int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
310{
311 u64 tsf;
312 s32 interval_tm, rem;
313 struct ieee80211_conf *conf = NULL;
314 u16 beacon_int;
315 struct ieee80211_vif *vif = ctx->vif;
316
317 conf = &priv->hw->conf;
318
319 lockdep_assert_held(&priv->mutex);
320
321 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
322
323 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
324 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
325
326 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
327
328 /*
329 * TODO: For IBSS we need to get atim_window from mac80211,
330 * for now just always use 0
331 */
332 ctx->timing.atim_window = 0;
333
334 if (ctx->ctxid == IWL_RXON_CTX_PAN &&
335 (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
336 iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
337 priv->contexts[IWL_RXON_CTX_BSS].vif &&
338 priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
339 ctx->timing.beacon_interval =
340 priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
341 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
342 } else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
343 iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
344 priv->contexts[IWL_RXON_CTX_PAN].vif &&
345 priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
346 (!iwl_is_associated_ctx(ctx) || !ctx->vif ||
347 !ctx->vif->bss_conf.beacon_int)) {
348 ctx->timing.beacon_interval =
349 priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
350 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
351 } else {
352 beacon_int = iwl_adjust_beacon_interval(beacon_int,
353 IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
354 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
355 }
356
357 ctx->beacon_int = beacon_int;
358
359 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
360 interval_tm = beacon_int * TIME_UNIT;
361 rem = do_div(tsf, interval_tm);
362 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
363
364 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
365
366 IWL_DEBUG_ASSOC(priv,
367 "beacon interval %d beacon timer %d beacon tim %d\n",
368 le16_to_cpu(ctx->timing.beacon_interval),
369 le32_to_cpu(ctx->timing.beacon_init_val),
370 le16_to_cpu(ctx->timing.atim_window));
371
372 return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
373 CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
374}
375
376void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
377 int hw_decrypt)
378{
379 struct iwl_rxon_cmd *rxon = &ctx->staging;
380
381 if (hw_decrypt)
382 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
383 else
384 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
385
386}
387
388/* validate RXON structure is valid */
389int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
390{
391 struct iwl_rxon_cmd *rxon = &ctx->staging;
392 u32 errors = 0;
393
394 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
395 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
396 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
397 errors |= BIT(0);
398 }
399 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
400 IWL_WARN(priv, "check 2.4G: wrong radar\n");
401 errors |= BIT(1);
402 }
403 } else {
404 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
405 IWL_WARN(priv, "check 5.2G: not short slot!\n");
406 errors |= BIT(2);
407 }
408 if (rxon->flags & RXON_FLG_CCK_MSK) {
409 IWL_WARN(priv, "check 5.2G: CCK!\n");
410 errors |= BIT(3);
411 }
412 }
413 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
414 IWL_WARN(priv, "mac/bssid mcast!\n");
415 errors |= BIT(4);
416 }
417
418 /* make sure basic rates 6Mbps and 1Mbps are supported */
419 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
420 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
421 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
422 errors |= BIT(5);
423 }
424
425 if (le16_to_cpu(rxon->assoc_id) > 2007) {
426 IWL_WARN(priv, "aid > 2007\n");
427 errors |= BIT(6);
428 }
429
430 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
431 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
432 IWL_WARN(priv, "CCK and short slot\n");
433 errors |= BIT(7);
434 }
435
436 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
437 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
438 IWL_WARN(priv, "CCK and auto detect");
439 errors |= BIT(8);
440 }
441
442 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
443 RXON_FLG_TGG_PROTECT_MSK)) ==
444 RXON_FLG_TGG_PROTECT_MSK) {
445 IWL_WARN(priv, "TGg but no auto-detect\n");
446 errors |= BIT(9);
447 }
448
449 if (rxon->channel == 0) {
450 IWL_WARN(priv, "zero channel is invalid\n");
451 errors |= BIT(10);
452 }
453
454 WARN(errors, "Invalid RXON (%#x), channel %d",
455 errors, le16_to_cpu(rxon->channel));
456
457 return errors ? -EINVAL : 0;
458}
459
460/**
461 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
462 * @priv: staging_rxon is compared to active_rxon
463 *
464 * If the RXON structure is changing enough to require a new tune,
465 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
466 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
467 */
468int iwl_full_rxon_required(struct iwl_priv *priv,
469 struct iwl_rxon_context *ctx)
470{
471 const struct iwl_rxon_cmd *staging = &ctx->staging;
472 const struct iwl_rxon_cmd *active = &ctx->active;
473
474#define CHK(cond) \
475 if ((cond)) { \
476 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
477 return 1; \
478 }
479
480#define CHK_NEQ(c1, c2) \
481 if ((c1) != (c2)) { \
482 IWL_DEBUG_INFO(priv, "need full RXON - " \
483 #c1 " != " #c2 " - %d != %d\n", \
484 (c1), (c2)); \
485 return 1; \
486 }
487
488 /* These items are only settable from the full RXON command */
489 CHK(!iwl_is_associated_ctx(ctx));
490 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
491 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
492 CHK(compare_ether_addr(staging->wlap_bssid_addr,
493 active->wlap_bssid_addr));
494 CHK_NEQ(staging->dev_type, active->dev_type);
495 CHK_NEQ(staging->channel, active->channel);
496 CHK_NEQ(staging->air_propagation, active->air_propagation);
497 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
498 active->ofdm_ht_single_stream_basic_rates);
499 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
500 active->ofdm_ht_dual_stream_basic_rates);
501 CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
502 active->ofdm_ht_triple_stream_basic_rates);
503 CHK_NEQ(staging->assoc_id, active->assoc_id);
504
505 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
506 * be updated with the RXON_ASSOC command -- however only some
507 * flag transitions are allowed using RXON_ASSOC */
508
509 /* Check if we are not switching bands */
510 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
511 active->flags & RXON_FLG_BAND_24G_MSK);
512
513 /* Check if we are switching association toggle */
514 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
515 active->filter_flags & RXON_FILTER_ASSOC_MSK);
516
517#undef CHK
518#undef CHK_NEQ
519
520 return 0;
521}
522
523static void _iwl_set_rxon_ht(struct iwl_priv *priv,
524 struct iwl_ht_config *ht_conf,
525 struct iwl_rxon_context *ctx)
526{
527 struct iwl_rxon_cmd *rxon = &ctx->staging;
528
529 if (!ctx->ht.enabled) {
530 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
531 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
532 RXON_FLG_HT40_PROT_MSK |
533 RXON_FLG_HT_PROT_MSK);
534 return;
535 }
536
537 /* FIXME: if the definition of ht.protection changed, the "translation"
538 * will be needed for rxon->flags
539 */
540 rxon->flags |= cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
541
542 /* Set up channel bandwidth:
543 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
544 /* clear the HT channel mode before set the mode */
545 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
546 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
547 if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) {
548 /* pure ht40 */
549 if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
550 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
551 /* Note: control channel is opposite of extension channel */
552 switch (ctx->ht.extension_chan_offset) {
553 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
554 rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
555 break;
556 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
557 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
558 break;
559 }
560 } else {
561 /* Note: control channel is opposite of extension channel */
562 switch (ctx->ht.extension_chan_offset) {
563 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
564 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
565 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
566 break;
567 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
568 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
569 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
570 break;
571 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
572 default:
573 /* channel location only valid if in Mixed mode */
574 IWL_ERR(priv, "invalid extension channel offset\n");
575 break;
576 }
577 }
578 } else {
579 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
580 }
581
582 iwlagn_set_rxon_chain(priv, ctx);
583
584 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
585 "extension channel offset 0x%x\n",
586 le32_to_cpu(rxon->flags), ctx->ht.protection,
587 ctx->ht.extension_chan_offset);
588}
589
590void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
591{
592 struct iwl_rxon_context *ctx;
593
594 for_each_context(priv, ctx)
595 _iwl_set_rxon_ht(priv, ht_conf, ctx);
596}
597
598/* Return valid, unused, channel for a passive scan to reset the RF */
599u8 iwl_get_single_channel_number(struct iwl_priv *priv,
600 enum ieee80211_band band)
601{
602 const struct iwl_channel_info *ch_info;
603 int i;
604 u8 channel = 0;
605 u8 min, max;
606 struct iwl_rxon_context *ctx;
607
608 if (band == IEEE80211_BAND_5GHZ) {
609 min = 14;
610 max = priv->channel_count;
611 } else {
612 min = 0;
613 max = 14;
614 }
615
616 for (i = min; i < max; i++) {
617 bool busy = false;
618
619 for_each_context(priv, ctx) {
620 busy = priv->channel_info[i].channel ==
621 le16_to_cpu(ctx->staging.channel);
622 if (busy)
623 break;
624 }
625
626 if (busy)
627 continue;
628
629 channel = priv->channel_info[i].channel;
630 ch_info = iwl_get_channel_info(priv, band, channel);
631 if (is_channel_valid(ch_info))
632 break;
633 }
634
635 return channel;
636}
637
638/**
639 * iwl_set_rxon_channel - Set the band and channel values in staging RXON
640 * @ch: requested channel as a pointer to struct ieee80211_channel
641
642 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
643 * in the staging RXON flag structure based on the ch->band
644 */
645void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
646 struct iwl_rxon_context *ctx)
647{
648 enum ieee80211_band band = ch->band;
649 u16 channel = ch->hw_value;
650
651 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
652 (priv->band == band))
653 return;
654
655 ctx->staging.channel = cpu_to_le16(channel);
656 if (band == IEEE80211_BAND_5GHZ)
657 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
658 else
659 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
660
661 priv->band = band;
662
663 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
664
665}
666
667void iwl_set_flags_for_band(struct iwl_priv *priv,
668 struct iwl_rxon_context *ctx,
669 enum ieee80211_band band,
670 struct ieee80211_vif *vif)
671{
672 if (band == IEEE80211_BAND_5GHZ) {
673 ctx->staging.flags &=
674 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
675 | RXON_FLG_CCK_MSK);
676 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
677 } else {
678 /* Copied from iwl_post_associate() */
679 if (vif && vif->bss_conf.use_short_slot)
680 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
681 else
682 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
683
684 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
685 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
686 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
687 }
688}
689
690/*
691 * initialize rxon structure with default values from eeprom
692 */
693void iwl_connection_init_rx_config(struct iwl_priv *priv,
694 struct iwl_rxon_context *ctx)
695{
696 const struct iwl_channel_info *ch_info;
697
698 memset(&ctx->staging, 0, sizeof(ctx->staging));
699
700 if (!ctx->vif) {
701 ctx->staging.dev_type = ctx->unused_devtype;
702 } else switch (ctx->vif->type) {
703 case NL80211_IFTYPE_AP:
704 ctx->staging.dev_type = ctx->ap_devtype;
705 break;
706
707 case NL80211_IFTYPE_STATION:
708 ctx->staging.dev_type = ctx->station_devtype;
709 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
710 break;
711
712 case NL80211_IFTYPE_ADHOC:
713 ctx->staging.dev_type = ctx->ibss_devtype;
714 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
715 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
716 RXON_FILTER_ACCEPT_GRP_MSK;
717 break;
718
719 default:
720 IWL_ERR(priv, "Unsupported interface type %d\n",
721 ctx->vif->type);
722 break;
723 }
724
725#if 0
726 /* TODO: Figure out when short_preamble would be set and cache from
727 * that */
728 if (!hw_to_local(priv->hw)->short_preamble)
729 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
730 else
731 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
732#endif
733
734 ch_info = iwl_get_channel_info(priv, priv->band,
735 le16_to_cpu(ctx->active.channel));
736
737 if (!ch_info)
738 ch_info = &priv->channel_info[0];
739
740 ctx->staging.channel = cpu_to_le16(ch_info->channel);
741 priv->band = ch_info->band;
742
743 iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
744
745 ctx->staging.ofdm_basic_rates =
746 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
747 ctx->staging.cck_basic_rates =
748 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
749
750 /* clear both MIX and PURE40 mode flag */
751 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
752 RXON_FLG_CHANNEL_MODE_PURE_40);
753 if (ctx->vif)
754 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
755
756 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
757 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
758 ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
759}
760
761void iwl_set_rate(struct iwl_priv *priv)
762{
763 const struct ieee80211_supported_band *hw = NULL;
764 struct ieee80211_rate *rate;
765 struct iwl_rxon_context *ctx;
766 int i;
767
768 hw = iwl_get_hw_mode(priv, priv->band);
769 if (!hw) {
770 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
771 return;
772 }
773
774 priv->active_rate = 0;
775
776 for (i = 0; i < hw->n_bitrates; i++) {
777 rate = &(hw->bitrates[i]);
778 if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
779 priv->active_rate |= (1 << rate->hw_value);
780 }
781
782 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
783
784 for_each_context(priv, ctx) {
785 ctx->staging.cck_basic_rates =
786 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
787
788 ctx->staging.ofdm_basic_rates =
789 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
790 }
791}
792
793void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
794{
795 /*
796 * MULTI-FIXME
797 * See iwlagn_mac_channel_switch.
798 */
799 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
800
801 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
802 return;
803
804 if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
805 ieee80211_chswitch_done(ctx->vif, is_success);
806}
807
808#ifdef CONFIG_IWLWIFI_DEBUG
809void iwl_print_rx_config_cmd(struct iwl_priv *priv,
810 enum iwl_rxon_context_id ctxid)
811{
812 struct iwl_rxon_context *ctx = &priv->contexts[ctxid];
813 struct iwl_rxon_cmd *rxon = &ctx->staging;
814
815 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
816 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
817 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
818 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
819 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
820 le32_to_cpu(rxon->filter_flags));
821 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
822 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
823 rxon->ofdm_basic_rates);
824 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
825 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
826 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
827 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
828}
829#endif
830
831static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
832{
833 unsigned int reload_msec;
834 unsigned long reload_jiffies;
835
836#ifdef CONFIG_IWLWIFI_DEBUG
837 if (iwl_have_debug_level(IWL_DL_FW_ERRORS))
838 iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS);
839#endif
840
841 /* uCode is no longer loaded. */
842 priv->ucode_loaded = false;
843
844 /* Set the FW error flag -- cleared on iwl_down */
845 set_bit(STATUS_FW_ERROR, &priv->shrd->status);
846
847 /* Cancel currently queued command. */
848 clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
849
850 iwl_abort_notification_waits(&priv->notif_wait);
851
852 /* Keep the restart process from trying to send host
853 * commands by clearing the ready bit */
854 clear_bit(STATUS_READY, &priv->status);
855
856 wake_up(&trans(priv)->wait_command_queue);
857
858 if (!ondemand) {
859 /*
860 * If firmware keep reloading, then it indicate something
861 * serious wrong and firmware having problem to recover
862 * from it. Instead of keep trying which will fill the syslog
863 * and hang the system, let's just stop it
864 */
865 reload_jiffies = jiffies;
866 reload_msec = jiffies_to_msecs((long) reload_jiffies -
867 (long) priv->reload_jiffies);
868 priv->reload_jiffies = reload_jiffies;
869 if (reload_msec <= IWL_MIN_RELOAD_DURATION) {
870 priv->reload_count++;
871 if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) {
872 IWL_ERR(priv, "BUG_ON, Stop restarting\n");
873 return;
874 }
875 } else
876 priv->reload_count = 0;
877 }
878
879 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
880 if (iwlagn_mod_params.restart_fw) {
881 IWL_DEBUG_FW_ERRORS(priv,
882 "Restarting adapter due to uCode error.\n");
883 queue_work(priv->workqueue, &priv->restart);
884 } else
885 IWL_DEBUG_FW_ERRORS(priv,
886 "Detected FW error, but not restarting\n");
887 }
888}
889
890int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
891{
892 int ret;
893 s8 prev_tx_power;
894 bool defer;
895 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
896
897 lockdep_assert_held(&priv->mutex);
898
899 if (priv->tx_power_user_lmt == tx_power && !force)
900 return 0;
901
902 if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
903 IWL_WARN(priv,
904 "Requested user TXPOWER %d below lower limit %d.\n",
905 tx_power,
906 IWLAGN_TX_POWER_TARGET_POWER_MIN);
907 return -EINVAL;
908 }
909
910 if (tx_power > priv->tx_power_device_lmt) {
911 IWL_WARN(priv,
912 "Requested user TXPOWER %d above upper limit %d.\n",
913 tx_power, priv->tx_power_device_lmt);
914 return -EINVAL;
915 }
916
917 if (!iwl_is_ready_rf(priv))
918 return -EIO;
919
920 /* scan complete and commit_rxon use tx_power_next value,
921 * it always need to be updated for newest request */
922 priv->tx_power_next = tx_power;
923
924 /* do not set tx power when scanning or channel changing */
925 defer = test_bit(STATUS_SCANNING, &priv->status) ||
926 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
927 if (defer && !force) {
928 IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
929 return 0;
930 }
931
932 prev_tx_power = priv->tx_power_user_lmt;
933 priv->tx_power_user_lmt = tx_power;
934
935 ret = iwlagn_send_tx_power(priv);
936
937 /* if fail to set tx_power, restore the orig. tx power */
938 if (ret) {
939 priv->tx_power_user_lmt = prev_tx_power;
940 priv->tx_power_next = prev_tx_power;
941 }
942 return ret;
943}
944
945void iwl_send_bt_config(struct iwl_priv *priv)
946{
947 struct iwl_bt_cmd bt_cmd = {
948 .lead_time = BT_LEAD_TIME_DEF,
949 .max_kill = BT_MAX_KILL_DEF,
950 .kill_ack_mask = 0,
951 .kill_cts_mask = 0,
952 };
953
954 if (!iwlagn_mod_params.bt_coex_active)
955 bt_cmd.flags = BT_COEX_DISABLE;
956 else
957 bt_cmd.flags = BT_COEX_ENABLE;
958
959 priv->bt_enable_flag = bt_cmd.flags;
960 IWL_DEBUG_INFO(priv, "BT coex %s\n",
961 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
962
963 if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
964 CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd))
965 IWL_ERR(priv, "failed to send BT Coex Config\n");
966}
967
968int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
969{
970 struct iwl_statistics_cmd statistics_cmd = {
971 .configuration_flags =
972 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
973 };
974
975 if (flags & CMD_ASYNC)
976 return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
977 CMD_ASYNC,
978 sizeof(struct iwl_statistics_cmd),
979 &statistics_cmd);
980 else
981 return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
982 CMD_SYNC,
983 sizeof(struct iwl_statistics_cmd),
984 &statistics_cmd);
985}
986
987
988
989
990#ifdef CONFIG_IWLWIFI_DEBUGFS
991
992#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
993
994void iwl_reset_traffic_log(struct iwl_priv *priv)
995{
996 priv->tx_traffic_idx = 0;
997 priv->rx_traffic_idx = 0;
998 if (priv->tx_traffic)
999 memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1000 if (priv->rx_traffic)
1001 memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1002}
1003
1004int iwl_alloc_traffic_mem(struct iwl_priv *priv)
1005{
1006 u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
1007
1008 if (iwl_have_debug_level(IWL_DL_TX)) {
1009 if (!priv->tx_traffic) {
1010 priv->tx_traffic =
1011 kzalloc(traffic_size, GFP_KERNEL);
1012 if (!priv->tx_traffic)
1013 return -ENOMEM;
1014 }
1015 }
1016 if (iwl_have_debug_level(IWL_DL_RX)) {
1017 if (!priv->rx_traffic) {
1018 priv->rx_traffic =
1019 kzalloc(traffic_size, GFP_KERNEL);
1020 if (!priv->rx_traffic)
1021 return -ENOMEM;
1022 }
1023 }
1024 iwl_reset_traffic_log(priv);
1025 return 0;
1026}
1027
1028void iwl_free_traffic_mem(struct iwl_priv *priv)
1029{
1030 kfree(priv->tx_traffic);
1031 priv->tx_traffic = NULL;
1032
1033 kfree(priv->rx_traffic);
1034 priv->rx_traffic = NULL;
1035}
1036
1037void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
1038 u16 length, struct ieee80211_hdr *header)
1039{
1040 __le16 fc;
1041 u16 len;
1042
1043 if (likely(!iwl_have_debug_level(IWL_DL_TX)))
1044 return;
1045
1046 if (!priv->tx_traffic)
1047 return;
1048
1049 fc = header->frame_control;
1050 if (ieee80211_is_data(fc)) {
1051 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1052 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1053 memcpy((priv->tx_traffic +
1054 (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1055 header, len);
1056 priv->tx_traffic_idx =
1057 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1058 }
1059}
1060
1061void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
1062 u16 length, struct ieee80211_hdr *header)
1063{
1064 __le16 fc;
1065 u16 len;
1066
1067 if (likely(!iwl_have_debug_level(IWL_DL_RX)))
1068 return;
1069
1070 if (!priv->rx_traffic)
1071 return;
1072
1073 fc = header->frame_control;
1074 if (ieee80211_is_data(fc)) {
1075 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1076 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1077 memcpy((priv->rx_traffic +
1078 (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1079 header, len);
1080 priv->rx_traffic_idx =
1081 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1082 }
1083}
1084
1085const char *get_mgmt_string(int cmd)
1086{
1087 switch (cmd) {
1088 IWL_CMD(MANAGEMENT_ASSOC_REQ);
1089 IWL_CMD(MANAGEMENT_ASSOC_RESP);
1090 IWL_CMD(MANAGEMENT_REASSOC_REQ);
1091 IWL_CMD(MANAGEMENT_REASSOC_RESP);
1092 IWL_CMD(MANAGEMENT_PROBE_REQ);
1093 IWL_CMD(MANAGEMENT_PROBE_RESP);
1094 IWL_CMD(MANAGEMENT_BEACON);
1095 IWL_CMD(MANAGEMENT_ATIM);
1096 IWL_CMD(MANAGEMENT_DISASSOC);
1097 IWL_CMD(MANAGEMENT_AUTH);
1098 IWL_CMD(MANAGEMENT_DEAUTH);
1099 IWL_CMD(MANAGEMENT_ACTION);
1100 default:
1101 return "UNKNOWN";
1102
1103 }
1104}
1105
1106const char *get_ctrl_string(int cmd)
1107{
1108 switch (cmd) {
1109 IWL_CMD(CONTROL_BACK_REQ);
1110 IWL_CMD(CONTROL_BACK);
1111 IWL_CMD(CONTROL_PSPOLL);
1112 IWL_CMD(CONTROL_RTS);
1113 IWL_CMD(CONTROL_CTS);
1114 IWL_CMD(CONTROL_ACK);
1115 IWL_CMD(CONTROL_CFEND);
1116 IWL_CMD(CONTROL_CFENDACK);
1117 default:
1118 return "UNKNOWN";
1119
1120 }
1121}
1122
1123void iwl_clear_traffic_stats(struct iwl_priv *priv)
1124{
1125 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
1126 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
1127}
1128
1129/*
1130 * if CONFIG_IWLWIFI_DEBUGFS defined, iwl_update_stats function will
1131 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass.
1132 * Use debugFs to display the rx/rx_statistics
1133 * if CONFIG_IWLWIFI_DEBUGFS not being defined, then no MGMT and CTRL
1134 * information will be recorded, but DATA pkt still will be recorded
1135 * for the reason of iwl_led.c need to control the led blinking based on
1136 * number of tx and rx data.
1137 *
1138 */
1139void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
1140{
1141 struct traffic_stats *stats;
1142
1143 if (is_tx)
1144 stats = &priv->tx_stats;
1145 else
1146 stats = &priv->rx_stats;
1147
1148 if (ieee80211_is_mgmt(fc)) {
1149 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1150 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
1151 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
1152 break;
1153 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
1154 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
1155 break;
1156 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
1157 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
1158 break;
1159 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
1160 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
1161 break;
1162 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
1163 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
1164 break;
1165 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
1166 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
1167 break;
1168 case cpu_to_le16(IEEE80211_STYPE_BEACON):
1169 stats->mgmt[MANAGEMENT_BEACON]++;
1170 break;
1171 case cpu_to_le16(IEEE80211_STYPE_ATIM):
1172 stats->mgmt[MANAGEMENT_ATIM]++;
1173 break;
1174 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
1175 stats->mgmt[MANAGEMENT_DISASSOC]++;
1176 break;
1177 case cpu_to_le16(IEEE80211_STYPE_AUTH):
1178 stats->mgmt[MANAGEMENT_AUTH]++;
1179 break;
1180 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
1181 stats->mgmt[MANAGEMENT_DEAUTH]++;
1182 break;
1183 case cpu_to_le16(IEEE80211_STYPE_ACTION):
1184 stats->mgmt[MANAGEMENT_ACTION]++;
1185 break;
1186 }
1187 } else if (ieee80211_is_ctl(fc)) {
1188 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1189 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
1190 stats->ctrl[CONTROL_BACK_REQ]++;
1191 break;
1192 case cpu_to_le16(IEEE80211_STYPE_BACK):
1193 stats->ctrl[CONTROL_BACK]++;
1194 break;
1195 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
1196 stats->ctrl[CONTROL_PSPOLL]++;
1197 break;
1198 case cpu_to_le16(IEEE80211_STYPE_RTS):
1199 stats->ctrl[CONTROL_RTS]++;
1200 break;
1201 case cpu_to_le16(IEEE80211_STYPE_CTS):
1202 stats->ctrl[CONTROL_CTS]++;
1203 break;
1204 case cpu_to_le16(IEEE80211_STYPE_ACK):
1205 stats->ctrl[CONTROL_ACK]++;
1206 break;
1207 case cpu_to_le16(IEEE80211_STYPE_CFEND):
1208 stats->ctrl[CONTROL_CFEND]++;
1209 break;
1210 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
1211 stats->ctrl[CONTROL_CFENDACK]++;
1212 break;
1213 }
1214 } else {
1215 /* data */
1216 stats->data_cnt++;
1217 stats->data_bytes += len;
1218 }
1219}
1220#endif
1221
1222static void iwl_force_rf_reset(struct iwl_priv *priv)
1223{
1224 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1225 return;
1226
1227 if (!iwl_is_any_associated(priv)) {
1228 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
1229 return;
1230 }
1231 /*
1232 * There is no easy and better way to force reset the radio,
1233 * the only known method is switching channel which will force to
1234 * reset and tune the radio.
1235 * Use internal short scan (single channel) operation to should
1236 * achieve this objective.
1237 * Driver should reset the radio when number of consecutive missed
1238 * beacon, or any other uCode error condition detected.
1239 */
1240 IWL_DEBUG_INFO(priv, "perform radio reset.\n");
1241 iwl_internal_short_hw_scan(priv);
1242}
1243
1244
1245int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
1246{
1247 struct iwl_force_reset *force_reset;
1248
1249 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1250 return -EINVAL;
1251
1252 if (mode >= IWL_MAX_FORCE_RESET) {
1253 IWL_DEBUG_INFO(priv, "invalid reset request.\n");
1254 return -EINVAL;
1255 }
1256 force_reset = &priv->force_reset[mode];
1257 force_reset->reset_request_count++;
1258 if (!external) {
1259 if (force_reset->last_force_reset_jiffies &&
1260 time_after(force_reset->last_force_reset_jiffies +
1261 force_reset->reset_duration, jiffies)) {
1262 IWL_DEBUG_INFO(priv, "force reset rejected\n");
1263 force_reset->reset_reject_count++;
1264 return -EAGAIN;
1265 }
1266 }
1267 force_reset->reset_success_count++;
1268 force_reset->last_force_reset_jiffies = jiffies;
1269 IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
1270 switch (mode) {
1271 case IWL_RF_RESET:
1272 iwl_force_rf_reset(priv);
1273 break;
1274 case IWL_FW_RESET:
1275 /*
1276 * if the request is from external(ex: debugfs),
1277 * then always perform the request in regardless the module
1278 * parameter setting
1279 * if the request is from internal (uCode error or driver
1280 * detect failure), then fw_restart module parameter
1281 * need to be check before performing firmware reload
1282 */
1283 if (!external && !iwlagn_mod_params.restart_fw) {
1284 IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
1285 "module parameter setting\n");
1286 break;
1287 }
1288 IWL_ERR(priv, "On demand firmware reload\n");
1289 iwlagn_fw_error(priv, true);
1290 break;
1291 }
1292 return 0;
1293}
1294
1295
1296int iwl_cmd_echo_test(struct iwl_priv *priv)
1297{
1298 int ret;
1299 struct iwl_host_cmd cmd = {
1300 .id = REPLY_ECHO,
1301 .len = { 0 },
1302 .flags = CMD_SYNC,
1303 };
1304
1305 ret = iwl_dvm_send_cmd(priv, &cmd);
1306 if (ret)
1307 IWL_ERR(priv, "echo testing fail: 0X%x\n", ret);
1308 else
1309 IWL_DEBUG_INFO(priv, "echo testing pass\n");
1310 return ret;
1311}
1312
1313static inline int iwl_check_stuck_queue(struct iwl_priv *priv, int txq)
1314{
1315 if (iwl_trans_check_stuck_queue(trans(priv), txq)) {
1316 int ret;
1317 ret = iwl_force_reset(priv, IWL_FW_RESET, false);
1318 return (ret == -EAGAIN) ? 0 : 1;
1319 }
1320 return 0;
1321}
1322
1323/*
1324 * Making watchdog tick be a quarter of timeout assure we will
1325 * discover the queue hung between timeout and 1.25*timeout
1326 */
1327#define IWL_WD_TICK(timeout) ((timeout) / 4)
1328
1329/*
1330 * Watchdog timer callback, we check each tx queue for stuck, if if hung
1331 * we reset the firmware. If everything is fine just rearm the timer.
1332 */
1333void iwl_bg_watchdog(unsigned long data)
1334{
1335 struct iwl_priv *priv = (struct iwl_priv *)data;
1336 int cnt;
1337 unsigned long timeout;
1338
1339 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1340 return;
1341
1342 if (iwl_is_rfkill(priv))
1343 return;
1344
1345 timeout = hw_params(priv).wd_timeout;
1346 if (timeout == 0)
1347 return;
1348
1349 /* monitor and check for stuck queues */
1350 for (cnt = 0; cnt < cfg(priv)->base_params->num_of_queues; cnt++)
1351 if (iwl_check_stuck_queue(priv, cnt))
1352 return;
1353
1354 mod_timer(&priv->watchdog, jiffies +
1355 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1356}
1357
1358void iwl_setup_watchdog(struct iwl_priv *priv)
1359{
1360 unsigned int timeout = hw_params(priv).wd_timeout;
1361
1362 if (!iwlagn_mod_params.wd_disable) {
1363 /* use system default */
1364 if (timeout && !cfg(priv)->base_params->wd_disable)
1365 mod_timer(&priv->watchdog,
1366 jiffies +
1367 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1368 else
1369 del_timer(&priv->watchdog);
1370 } else {
1371 /* module parameter overwrite default configuration */
1372 if (timeout && iwlagn_mod_params.wd_disable == 2)
1373 mod_timer(&priv->watchdog,
1374 jiffies +
1375 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1376 else
1377 del_timer(&priv->watchdog);
1378 }
1379}
1380
1381/**
1382 * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time
1383 * @priv -- pointer to iwl_priv data structure
1384 * @tsf_bits -- number of bits need to shift for masking)
1385 */
1386static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
1387 u16 tsf_bits)
1388{
1389 return (1 << tsf_bits) - 1;
1390}
1391
1392/**
1393 * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time
1394 * @priv -- pointer to iwl_priv data structure
1395 * @tsf_bits -- number of bits need to shift for masking)
1396 */
1397static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv,
1398 u16 tsf_bits)
1399{
1400 return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
1401}
1402
1403/*
1404 * extended beacon time format
1405 * time in usec will be changed into a 32-bit value in extended:internal format
1406 * the extended part is the beacon counts
1407 * the internal part is the time in usec within one beacon interval
1408 */
1409u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
1410{
1411 u32 quot;
1412 u32 rem;
1413 u32 interval = beacon_interval * TIME_UNIT;
1414
1415 if (!interval || !usec)
1416 return 0;
1417
1418 quot = (usec / interval) &
1419 (iwl_beacon_time_mask_high(priv, IWLAGN_EXT_BEACON_TIME_POS) >>
1420 IWLAGN_EXT_BEACON_TIME_POS);
1421 rem = (usec % interval) & iwl_beacon_time_mask_low(priv,
1422 IWLAGN_EXT_BEACON_TIME_POS);
1423
1424 return (quot << IWLAGN_EXT_BEACON_TIME_POS) + rem;
1425}
1426
1427/* base is usually what we get from ucode with each received frame,
1428 * the same as HW timer counter counting down
1429 */
1430__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
1431 u32 addon, u32 beacon_interval)
1432{
1433 u32 base_low = base & iwl_beacon_time_mask_low(priv,
1434 IWLAGN_EXT_BEACON_TIME_POS);
1435 u32 addon_low = addon & iwl_beacon_time_mask_low(priv,
1436 IWLAGN_EXT_BEACON_TIME_POS);
1437 u32 interval = beacon_interval * TIME_UNIT;
1438 u32 res = (base & iwl_beacon_time_mask_high(priv,
1439 IWLAGN_EXT_BEACON_TIME_POS)) +
1440 (addon & iwl_beacon_time_mask_high(priv,
1441 IWLAGN_EXT_BEACON_TIME_POS));
1442
1443 if (base_low > addon_low)
1444 res += base_low - addon_low;
1445 else if (base_low < addon_low) {
1446 res += interval + base_low - addon_low;
1447 res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
1448 } else
1449 res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
1450
1451 return cpu_to_le32(res);
1452}
1453
1454void iwl_nic_error(struct iwl_op_mode *op_mode)
1455{
1456 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1457
1458 iwlagn_fw_error(priv, false);
1459}
1460
1461void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
1462{
1463 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1464
1465 if (state)
1466 set_bit(STATUS_RF_KILL_HW, &priv->status);
1467 else
1468 clear_bit(STATUS_RF_KILL_HW, &priv->status);
1469
1470 wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
1471}
1472
1473void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
1474{
1475 struct ieee80211_tx_info *info;
1476
1477 info = IEEE80211_SKB_CB(skb);
1478 kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1]));
1479 dev_kfree_skb_any(skb);
1480}
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
deleted file mode 100644
index 635eb685edeb..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ /dev/null
@@ -1,234 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_core_h__
64#define __iwl_core_h__
65
66#include "iwl-dev.h"
67#include "iwl-io.h"
68
69/************************
70 * forward declarations *
71 ************************/
72struct iwl_host_cmd;
73struct iwl_cmd;
74
75#define TIME_UNIT 1024
76
77struct iwl_lib_ops {
78 /* set hw dependent parameters */
79 void (*set_hw_params)(struct iwl_priv *priv);
80 int (*set_channel_switch)(struct iwl_priv *priv,
81 struct ieee80211_channel_switch *ch_switch);
82 /* device specific configuration */
83 void (*nic_config)(struct iwl_priv *priv);
84
85 /* eeprom operations (as defined in iwl-eeprom.h) */
86 struct iwl_eeprom_ops eeprom_ops;
87
88 /* temperature */
89 void (*temperature)(struct iwl_priv *priv);
90};
91
92/***************************
93 * L i b *
94 ***************************/
95
96void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
97 int hw_decrypt);
98int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
99int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
100void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
101 struct iwl_rxon_context *ctx);
102void iwl_set_flags_for_band(struct iwl_priv *priv,
103 struct iwl_rxon_context *ctx,
104 enum ieee80211_band band,
105 struct ieee80211_vif *vif);
106u8 iwl_get_single_channel_number(struct iwl_priv *priv,
107 enum ieee80211_band band);
108void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
109bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
110 struct iwl_rxon_context *ctx,
111 struct ieee80211_sta_ht_cap *ht_cap);
112void iwl_connection_init_rx_config(struct iwl_priv *priv,
113 struct iwl_rxon_context *ctx);
114void iwl_set_rate(struct iwl_priv *priv);
115int iwl_cmd_echo_test(struct iwl_priv *priv);
116#ifdef CONFIG_IWLWIFI_DEBUGFS
117int iwl_alloc_traffic_mem(struct iwl_priv *priv);
118void iwl_free_traffic_mem(struct iwl_priv *priv);
119void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
120 u16 length, struct ieee80211_hdr *header);
121void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
122 u16 length, struct ieee80211_hdr *header);
123const char *get_mgmt_string(int cmd);
124const char *get_ctrl_string(int cmd);
125void iwl_clear_traffic_stats(struct iwl_priv *priv);
126void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
127 u16 len);
128void iwl_reset_traffic_log(struct iwl_priv *priv);
129
130#else
131static inline int iwl_alloc_traffic_mem(struct iwl_priv *priv)
132{
133 return 0;
134}
135static inline void iwl_free_traffic_mem(struct iwl_priv *priv)
136{
137}
138static inline void iwl_reset_traffic_log(struct iwl_priv *priv)
139{
140}
141static inline void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
142 u16 length, struct ieee80211_hdr *header)
143{
144}
145static inline void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
146 u16 length, struct ieee80211_hdr *header)
147{
148}
149static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx,
150 __le16 fc, u16 len)
151{
152}
153#endif
154
155/*****************************************************
156* RX
157******************************************************/
158void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
159
160void iwl_setup_watchdog(struct iwl_priv *priv);
161/*****************************************************
162 * TX power
163 ****************************************************/
164int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
165
166/*******************************************************************************
167 * Scanning
168 ******************************************************************************/
169void iwl_init_scan_params(struct iwl_priv *priv);
170int iwl_scan_cancel(struct iwl_priv *priv);
171void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
172void iwl_force_scan_end(struct iwl_priv *priv);
173void iwl_internal_short_hw_scan(struct iwl_priv *priv);
174int iwl_force_reset(struct iwl_priv *priv, int mode, bool external);
175void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
176void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
177void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
178int __must_check iwl_scan_initiate(struct iwl_priv *priv,
179 struct ieee80211_vif *vif,
180 enum iwl_scan_type scan_type,
181 enum ieee80211_band band);
182
183/* For faster active scanning, scan will move to the next channel if fewer than
184 * PLCP_QUIET_THRESH packets are heard on this channel within
185 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
186 * time if it's a quiet channel (nothing responded to our probe, and there's
187 * no other traffic).
188 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
189#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
190#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
191
192#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
193
194/* traffic log definitions */
195#define IWL_TRAFFIC_ENTRIES (256)
196#define IWL_TRAFFIC_ENTRY_SIZE (64)
197
198/*****************************************************
199 * S e n d i n g H o s t C o m m a n d s *
200 *****************************************************/
201
202void iwl_bg_watchdog(unsigned long data);
203u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval);
204__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
205 u32 addon, u32 beacon_interval);
206
207
208/*****************************************************
209* GEOS
210******************************************************/
211int iwl_init_geos(struct iwl_priv *priv);
212void iwl_free_geos(struct iwl_priv *priv);
213
214extern void iwl_send_bt_config(struct iwl_priv *priv);
215extern int iwl_send_statistics_request(struct iwl_priv *priv,
216 u8 flags, bool clear);
217
218int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
219
220static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
221 struct iwl_priv *priv, enum ieee80211_band band)
222{
223 return priv->hw->wiphy->bands[band];
224}
225
226static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
227{
228 return cfg(priv)->bt_params &&
229 cfg(priv)->bt_params->advanced_bt_coexist;
230}
231
232extern bool bt_siso_mode;
233
234#endif /* __iwl_core_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 5f96ce105f08..59750543fce7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -430,6 +430,9 @@
430#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c) 430#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c)
431#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) 431#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
432 432
433/* Used to enable DBGM */
434#define HBUS_TARG_TEST_REG (HBUS_BASE+0x05c)
435
433/* 436/*
434 * Per-Tx-queue write pointer (index, really!) 437 * Per-Tx-queue write pointer (index, really!)
435 * Indicates index to next TFD that driver will fill (1 past latest filled). 438 * Indicates index to next TFD that driver will fill (1 past latest filled).
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.c b/drivers/net/wireless/iwlwifi/iwl-debug.c
index 059efabda184..2d1b42847b9b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.c
@@ -63,6 +63,7 @@
63 63
64#include <linux/interrupt.h> 64#include <linux/interrupt.h>
65#include "iwl-debug.h" 65#include "iwl-debug.h"
66#include "iwl-devtrace.h"
66 67
67#define __iwl_fn(fn) \ 68#define __iwl_fn(fn) \
68void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \ 69void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index a6b32a11e103..8376b842bdba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -29,10 +29,13 @@
29#ifndef __iwl_debug_h__ 29#ifndef __iwl_debug_h__
30#define __iwl_debug_h__ 30#define __iwl_debug_h__
31 31
32#include "iwl-shared.h" 32#include "iwl-modparams.h"
33#include "iwl-devtrace.h"
34 33
35struct iwl_priv; 34
35static inline bool iwl_have_debug_level(u32 level)
36{
37 return iwlwifi_mod_params.debug_level & level;
38}
36 39
37void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace, 40void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace,
38 const char *fmt, ...); 41 const char *fmt, ...);
@@ -41,10 +44,10 @@ void __iwl_info(struct device *dev, const char *fmt, ...);
41void __iwl_crit(struct device *dev, const char *fmt, ...); 44void __iwl_crit(struct device *dev, const char *fmt, ...);
42 45
43/* No matter what is m (priv, bus, trans), this will work */ 46/* No matter what is m (priv, bus, trans), this will work */
44#define IWL_ERR(m, f, a...) __iwl_err(trans(m)->dev, false, false, f, ## a) 47#define IWL_ERR(m, f, a...) __iwl_err((m)->dev, false, false, f, ## a)
45#define IWL_WARN(m, f, a...) __iwl_warn(trans(m)->dev, f, ## a) 48#define IWL_WARN(m, f, a...) __iwl_warn((m)->dev, f, ## a)
46#define IWL_INFO(m, f, a...) __iwl_info(trans(m)->dev, f, ## a) 49#define IWL_INFO(m, f, a...) __iwl_info((m)->dev, f, ## a)
47#define IWL_CRIT(m, f, a...) __iwl_crit(trans(m)->dev, f, ## a) 50#define IWL_CRIT(m, f, a...) __iwl_crit((m)->dev, f, ## a)
48 51
49#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING) 52#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
50void __iwl_dbg(struct device *dev, 53void __iwl_dbg(struct device *dev,
@@ -65,9 +68,9 @@ do { \
65} while (0) 68} while (0)
66 69
67#define IWL_DEBUG(m, level, fmt, args...) \ 70#define IWL_DEBUG(m, level, fmt, args...) \
68 __iwl_dbg(trans(m)->dev, level, false, __func__, fmt, ##args) 71 __iwl_dbg((m)->dev, level, false, __func__, fmt, ##args)
69#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \ 72#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
70 __iwl_dbg(trans(m)->dev, level, true, __func__, fmt, ##args) 73 __iwl_dbg((m)->dev, level, true, __func__, fmt, ##args)
71 74
72#ifdef CONFIG_IWLWIFI_DEBUG 75#ifdef CONFIG_IWLWIFI_DEBUG
73#define iwl_print_hex_dump(m, level, p, len) \ 76#define iwl_print_hex_dump(m, level, p, len) \
@@ -80,19 +83,6 @@ do { \
80#define iwl_print_hex_dump(m, level, p, len) 83#define iwl_print_hex_dump(m, level, p, len)
81#endif /* CONFIG_IWLWIFI_DEBUG */ 84#endif /* CONFIG_IWLWIFI_DEBUG */
82 85
83#ifdef CONFIG_IWLWIFI_DEBUGFS
84int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
85void iwl_dbgfs_unregister(struct iwl_priv *priv);
86#else
87static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
88{
89 return 0;
90}
91static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
92{
93}
94#endif /* CONFIG_IWLWIFI_DEBUGFS */
95
96/* 86/*
97 * To use the debug system: 87 * To use the debug system:
98 * 88 *
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 2bbaebd99ad4..e7c157e5ebeb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -37,9 +37,9 @@
37 37
38#include "iwl-dev.h" 38#include "iwl-dev.h"
39#include "iwl-debug.h" 39#include "iwl-debug.h"
40#include "iwl-core.h"
41#include "iwl-io.h" 40#include "iwl-io.h"
42#include "iwl-agn.h" 41#include "iwl-agn.h"
42#include "iwl-modparams.h"
43 43
44/* create and remove of files */ 44/* create and remove of files */
45#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ 45#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
@@ -111,105 +111,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
111 .llseek = generic_file_llseek, \ 111 .llseek = generic_file_llseek, \
112}; 112};
113 113
114static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
115 char __user *user_buf,
116 size_t count, loff_t *ppos) {
117
118 struct iwl_priv *priv = file->private_data;
119 char *buf;
120 int pos = 0;
121
122 int cnt;
123 ssize_t ret;
124 const size_t bufsz = 100 +
125 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
126 buf = kzalloc(bufsz, GFP_KERNEL);
127 if (!buf)
128 return -ENOMEM;
129 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
130 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
131 pos += scnprintf(buf + pos, bufsz - pos,
132 "\t%25s\t\t: %u\n",
133 get_mgmt_string(cnt),
134 priv->tx_stats.mgmt[cnt]);
135 }
136 pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
137 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
138 pos += scnprintf(buf + pos, bufsz - pos,
139 "\t%25s\t\t: %u\n",
140 get_ctrl_string(cnt),
141 priv->tx_stats.ctrl[cnt]);
142 }
143 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
144 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
145 priv->tx_stats.data_cnt);
146 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
147 priv->tx_stats.data_bytes);
148 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
149 kfree(buf);
150 return ret;
151}
152
153static ssize_t iwl_dbgfs_clear_traffic_statistics_write(struct file *file,
154 const char __user *user_buf,
155 size_t count, loff_t *ppos)
156{
157 struct iwl_priv *priv = file->private_data;
158 u32 clear_flag;
159 char buf[8];
160 int buf_size;
161
162 memset(buf, 0, sizeof(buf));
163 buf_size = min(count, sizeof(buf) - 1);
164 if (copy_from_user(buf, user_buf, buf_size))
165 return -EFAULT;
166 if (sscanf(buf, "%x", &clear_flag) != 1)
167 return -EFAULT;
168 iwl_clear_traffic_stats(priv);
169
170 return count;
171}
172
173static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
174 char __user *user_buf,
175 size_t count, loff_t *ppos) {
176
177 struct iwl_priv *priv = file->private_data;
178 char *buf;
179 int pos = 0;
180 int cnt;
181 ssize_t ret;
182 const size_t bufsz = 100 +
183 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
184 buf = kzalloc(bufsz, GFP_KERNEL);
185 if (!buf)
186 return -ENOMEM;
187
188 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
189 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
190 pos += scnprintf(buf + pos, bufsz - pos,
191 "\t%25s\t\t: %u\n",
192 get_mgmt_string(cnt),
193 priv->rx_stats.mgmt[cnt]);
194 }
195 pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
196 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
197 pos += scnprintf(buf + pos, bufsz - pos,
198 "\t%25s\t\t: %u\n",
199 get_ctrl_string(cnt),
200 priv->rx_stats.ctrl[cnt]);
201 }
202 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
203 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
204 priv->rx_stats.data_cnt);
205 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
206 priv->rx_stats.data_bytes);
207
208 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
209 kfree(buf);
210 return ret;
211}
212
213static ssize_t iwl_dbgfs_sram_read(struct file *file, 114static ssize_t iwl_dbgfs_sram_read(struct file *file,
214 char __user *user_buf, 115 char __user *user_buf,
215 size_t count, loff_t *ppos) 116 size_t count, loff_t *ppos)
@@ -230,11 +131,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
230 /* default is to dump the entire data segment */ 131 /* default is to dump the entire data segment */
231 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { 132 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
232 priv->dbgfs_sram_offset = 0x800000; 133 priv->dbgfs_sram_offset = 0x800000;
233 if (!priv->ucode_loaded) { 134 if (!priv->ucode_loaded)
234 IWL_ERR(priv, "No uCode has been loadded.\n");
235 return -EINVAL; 135 return -EINVAL;
236 } 136 img = &priv->fw->img[priv->cur_ucode];
237 img = &priv->fw->img[priv->shrd->ucode_type];
238 priv->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; 137 priv->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
239 } 138 }
240 len = priv->dbgfs_sram_len; 139 len = priv->dbgfs_sram_len;
@@ -259,7 +158,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
259 sram = priv->dbgfs_sram_offset & ~0x3; 158 sram = priv->dbgfs_sram_offset & ~0x3;
260 159
261 /* read the first u32 from sram */ 160 /* read the first u32 from sram */
262 val = iwl_read_targ_mem(trans(priv), sram); 161 val = iwl_read_targ_mem(priv->trans, sram);
263 162
264 for (; len; len--) { 163 for (; len; len--) {
265 /* put the address at the start of every line */ 164 /* put the address at the start of every line */
@@ -278,7 +177,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
278 if (++offset == 4) { 177 if (++offset == 4) {
279 sram += 4; 178 sram += 4;
280 offset = 0; 179 offset = 0;
281 val = iwl_read_targ_mem(trans(priv), sram); 180 val = iwl_read_targ_mem(priv->trans, sram);
282 } 181 }
283 182
284 /* put in extra spaces and split lines for human readability */ 183 /* put in extra spaces and split lines for human readability */
@@ -369,14 +268,19 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
369 i, station->sta.sta.addr, 268 i, station->sta.sta.addr,
370 station->sta.station_flags_msk); 269 station->sta.station_flags_msk);
371 pos += scnprintf(buf + pos, bufsz - pos, 270 pos += scnprintf(buf + pos, bufsz - pos,
372 "TID\tseq_num\trate_n_flags\n"); 271 "TID seqno next_rclmd "
272 "rate_n_flags state txq\n");
373 273
374 for (j = 0; j < IWL_MAX_TID_COUNT; j++) { 274 for (j = 0; j < IWL_MAX_TID_COUNT; j++) {
375 tid_data = &priv->tid_data[i][j]; 275 tid_data = &priv->tid_data[i][j];
376 pos += scnprintf(buf + pos, bufsz - pos, 276 pos += scnprintf(buf + pos, bufsz - pos,
377 "%d:\t%#x\t%#x", 277 "%d: 0x%.4x 0x%.4x 0x%.8x "
278 "%d %.2d",
378 j, tid_data->seq_number, 279 j, tid_data->seq_number,
379 tid_data->agg.rate_n_flags); 280 tid_data->next_reclaimed,
281 tid_data->agg.rate_n_flags,
282 tid_data->agg.state,
283 tid_data->agg.txq_id);
380 284
381 if (tid_data->agg.wait_for_ba) 285 if (tid_data->agg.wait_for_ba)
382 pos += scnprintf(buf + pos, bufsz - pos, 286 pos += scnprintf(buf + pos, bufsz - pos,
@@ -403,30 +307,25 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
403 const u8 *ptr; 307 const u8 *ptr;
404 char *buf; 308 char *buf;
405 u16 eeprom_ver; 309 u16 eeprom_ver;
406 size_t eeprom_len = cfg(priv)->base_params->eeprom_size; 310 size_t eeprom_len = priv->cfg->base_params->eeprom_size;
407 buf_size = 4 * eeprom_len + 256; 311 buf_size = 4 * eeprom_len + 256;
408 312
409 if (eeprom_len % 16) { 313 if (eeprom_len % 16)
410 IWL_ERR(priv, "NVM size is not multiple of 16.\n");
411 return -ENODATA; 314 return -ENODATA;
412 }
413 315
414 ptr = priv->shrd->eeprom; 316 ptr = priv->eeprom;
415 if (!ptr) { 317 if (!ptr)
416 IWL_ERR(priv, "Invalid EEPROM/OTP memory\n");
417 return -ENOMEM; 318 return -ENOMEM;
418 }
419 319
420 /* 4 characters for byte 0xYY */ 320 /* 4 characters for byte 0xYY */
421 buf = kzalloc(buf_size, GFP_KERNEL); 321 buf = kzalloc(buf_size, GFP_KERNEL);
422 if (!buf) { 322 if (!buf)
423 IWL_ERR(priv, "Can not allocate Buffer\n");
424 return -ENOMEM; 323 return -ENOMEM;
425 } 324
426 eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION); 325 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
427 pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, " 326 pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, "
428 "version: 0x%x\n", 327 "version: 0x%x\n",
429 (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP) 328 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
430 ? "OTP" : "EEPROM", eeprom_ver); 329 ? "OTP" : "EEPROM", eeprom_ver);
431 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) { 330 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
432 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); 331 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
@@ -456,10 +355,8 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
456 return -EAGAIN; 355 return -EAGAIN;
457 356
458 buf = kzalloc(bufsz, GFP_KERNEL); 357 buf = kzalloc(bufsz, GFP_KERNEL);
459 if (!buf) { 358 if (!buf)
460 IWL_ERR(priv, "Can not allocate Buffer\n");
461 return -ENOMEM; 359 return -ENOMEM;
462 }
463 360
464 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ); 361 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
465 if (supp_band) { 362 if (supp_band) {
@@ -521,8 +418,6 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
521 int pos = 0; 418 int pos = 0;
522 const size_t bufsz = sizeof(buf); 419 const size_t bufsz = sizeof(buf);
523 420
524 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
525 test_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status));
526 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n", 421 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
527 test_bit(STATUS_RF_KILL_HW, &priv->status)); 422 test_bit(STATUS_RF_KILL_HW, &priv->status));
528 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n", 423 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
@@ -544,9 +439,9 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
544 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n", 439 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
545 test_bit(STATUS_SCAN_HW, &priv->status)); 440 test_bit(STATUS_SCAN_HW, &priv->status));
546 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n", 441 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
547 test_bit(STATUS_POWER_PMI, &priv->shrd->status)); 442 test_bit(STATUS_POWER_PMI, &priv->status));
548 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n", 443 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
549 test_bit(STATUS_FW_ERROR, &priv->shrd->status)); 444 test_bit(STATUS_FW_ERROR, &priv->status));
550 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 445 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
551} 446}
552 447
@@ -563,16 +458,14 @@ static ssize_t iwl_dbgfs_rx_handlers_read(struct file *file,
563 ssize_t ret; 458 ssize_t ret;
564 459
565 buf = kzalloc(bufsz, GFP_KERNEL); 460 buf = kzalloc(bufsz, GFP_KERNEL);
566 if (!buf) { 461 if (!buf)
567 IWL_ERR(priv, "Can not allocate Buffer\n");
568 return -ENOMEM; 462 return -ENOMEM;
569 }
570 463
571 for (cnt = 0; cnt < REPLY_MAX; cnt++) { 464 for (cnt = 0; cnt < REPLY_MAX; cnt++) {
572 if (priv->rx_handlers_stats[cnt] > 0) 465 if (priv->rx_handlers_stats[cnt] > 0)
573 pos += scnprintf(buf + pos, bufsz - pos, 466 pos += scnprintf(buf + pos, bufsz - pos,
574 "\tRx handler[%36s]:\t\t %u\n", 467 "\tRx handler[%36s]:\t\t %u\n",
575 get_cmd_string(cnt), 468 iwl_dvm_get_cmd_string(cnt),
576 priv->rx_handlers_stats[cnt]); 469 priv->rx_handlers_stats[cnt]);
577 } 470 }
578 471
@@ -680,11 +573,8 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
680 return -EFAULT; 573 return -EFAULT;
681 if (!iwl_is_any_associated(priv)) 574 if (!iwl_is_any_associated(priv))
682 priv->disable_ht40 = ht40 ? true : false; 575 priv->disable_ht40 = ht40 ? true : false;
683 else { 576 else
684 IWL_ERR(priv, "Sta associated with AP - "
685 "Change to 40MHz channel support is not allowed\n");
686 return -EINVAL; 577 return -EINVAL;
687 }
688 578
689 return count; 579 return count;
690} 580}
@@ -816,87 +706,6 @@ DEBUGFS_READ_FILE_OPS(temperature);
816DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override); 706DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
817DEBUGFS_READ_FILE_OPS(current_sleep_command); 707DEBUGFS_READ_FILE_OPS(current_sleep_command);
818 708
819static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
820 char __user *user_buf,
821 size_t count, loff_t *ppos)
822{
823 struct iwl_priv *priv = file->private_data;
824 int pos = 0, ofs = 0;
825 int cnt = 0, entry;
826
827 char *buf;
828 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
829 (cfg(priv)->base_params->num_of_queues * 32 * 8) + 400;
830 const u8 *ptr;
831 ssize_t ret;
832
833 buf = kzalloc(bufsz, GFP_KERNEL);
834 if (!buf) {
835 IWL_ERR(priv, "Can not allocate buffer\n");
836 return -ENOMEM;
837 }
838 if (priv->tx_traffic && iwl_have_debug_level(IWL_DL_TX)) {
839 ptr = priv->tx_traffic;
840 pos += scnprintf(buf + pos, bufsz - pos,
841 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
842 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
843 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
844 entry++, ofs += 16) {
845 pos += scnprintf(buf + pos, bufsz - pos,
846 "0x%.4x ", ofs);
847 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
848 buf + pos, bufsz - pos, 0);
849 pos += strlen(buf + pos);
850 if (bufsz - pos > 0)
851 buf[pos++] = '\n';
852 }
853 }
854 }
855
856 if (priv->rx_traffic && iwl_have_debug_level(IWL_DL_RX)) {
857 ptr = priv->rx_traffic;
858 pos += scnprintf(buf + pos, bufsz - pos,
859 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
860 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
861 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
862 entry++, ofs += 16) {
863 pos += scnprintf(buf + pos, bufsz - pos,
864 "0x%.4x ", ofs);
865 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
866 buf + pos, bufsz - pos, 0);
867 pos += strlen(buf + pos);
868 if (bufsz - pos > 0)
869 buf[pos++] = '\n';
870 }
871 }
872 }
873
874 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
875 kfree(buf);
876 return ret;
877}
878
879static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
880 const char __user *user_buf,
881 size_t count, loff_t *ppos)
882{
883 struct iwl_priv *priv = file->private_data;
884 char buf[8];
885 int buf_size;
886 int traffic_log;
887
888 memset(buf, 0, sizeof(buf));
889 buf_size = min(count, sizeof(buf) - 1);
890 if (copy_from_user(buf, user_buf, buf_size))
891 return -EFAULT;
892 if (sscanf(buf, "%d", &traffic_log) != 1)
893 return -EFAULT;
894 if (traffic_log == 0)
895 iwl_reset_traffic_log(priv);
896
897 return count;
898}
899
900static const char *fmt_value = " %-30s %10u\n"; 709static const char *fmt_value = " %-30s %10u\n";
901static const char *fmt_hex = " %-30s 0x%02X\n"; 710static const char *fmt_hex = " %-30s 0x%02X\n";
902static const char *fmt_table = " %-30s %10u %10u %10u %10u\n"; 711static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
@@ -947,10 +756,8 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
947 return -EAGAIN; 756 return -EAGAIN;
948 757
949 buf = kzalloc(bufsz, GFP_KERNEL); 758 buf = kzalloc(bufsz, GFP_KERNEL);
950 if (!buf) { 759 if (!buf)
951 IWL_ERR(priv, "Can not allocate Buffer\n");
952 return -ENOMEM; 760 return -ENOMEM;
953 }
954 761
955 /* 762 /*
956 * the statistic information display here is based on 763 * the statistic information display here is based on
@@ -1376,10 +1183,8 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1376 return -EAGAIN; 1183 return -EAGAIN;
1377 1184
1378 buf = kzalloc(bufsz, GFP_KERNEL); 1185 buf = kzalloc(bufsz, GFP_KERNEL);
1379 if (!buf) { 1186 if (!buf)
1380 IWL_ERR(priv, "Can not allocate Buffer\n");
1381 return -ENOMEM; 1187 return -ENOMEM;
1382 }
1383 1188
1384 /* the statistic information display here is based on 1189 /* the statistic information display here is based on
1385 * the last statistics notification from uCode 1190 * the last statistics notification from uCode
@@ -1536,17 +1341,17 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1536 if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) { 1341 if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
1537 pos += scnprintf(buf + pos, bufsz - pos, 1342 pos += scnprintf(buf + pos, bufsz - pos,
1538 "tx power: (1/2 dB step)\n"); 1343 "tx power: (1/2 dB step)\n");
1539 if ((hw_params(priv).valid_tx_ant & ANT_A) && 1344 if ((priv->hw_params.valid_tx_ant & ANT_A) &&
1540 tx->tx_power.ant_a) 1345 tx->tx_power.ant_a)
1541 pos += scnprintf(buf + pos, bufsz - pos, 1346 pos += scnprintf(buf + pos, bufsz - pos,
1542 fmt_hex, "antenna A:", 1347 fmt_hex, "antenna A:",
1543 tx->tx_power.ant_a); 1348 tx->tx_power.ant_a);
1544 if ((hw_params(priv).valid_tx_ant & ANT_B) && 1349 if ((priv->hw_params.valid_tx_ant & ANT_B) &&
1545 tx->tx_power.ant_b) 1350 tx->tx_power.ant_b)
1546 pos += scnprintf(buf + pos, bufsz - pos, 1351 pos += scnprintf(buf + pos, bufsz - pos,
1547 fmt_hex, "antenna B:", 1352 fmt_hex, "antenna B:",
1548 tx->tx_power.ant_b); 1353 tx->tx_power.ant_b);
1549 if ((hw_params(priv).valid_tx_ant & ANT_C) && 1354 if ((priv->hw_params.valid_tx_ant & ANT_C) &&
1550 tx->tx_power.ant_c) 1355 tx->tx_power.ant_c)
1551 pos += scnprintf(buf + pos, bufsz - pos, 1356 pos += scnprintf(buf + pos, bufsz - pos,
1552 fmt_hex, "antenna C:", 1357 fmt_hex, "antenna C:",
@@ -1578,10 +1383,8 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
1578 return -EAGAIN; 1383 return -EAGAIN;
1579 1384
1580 buf = kzalloc(bufsz, GFP_KERNEL); 1385 buf = kzalloc(bufsz, GFP_KERNEL);
1581 if (!buf) { 1386 if (!buf)
1582 IWL_ERR(priv, "Can not allocate Buffer\n");
1583 return -ENOMEM; 1387 return -ENOMEM;
1584 }
1585 1388
1586 /* the statistic information display here is based on 1389 /* the statistic information display here is based on
1587 * the last statistics notification from uCode 1390 * the last statistics notification from uCode
@@ -1704,16 +1507,11 @@ static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
1704 ret = iwl_send_statistics_request(priv, CMD_SYNC, false); 1507 ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
1705 mutex_unlock(&priv->mutex); 1508 mutex_unlock(&priv->mutex);
1706 1509
1707 if (ret) { 1510 if (ret)
1708 IWL_ERR(priv,
1709 "Error sending statistics request: %zd\n", ret);
1710 return -EAGAIN; 1511 return -EAGAIN;
1711 }
1712 buf = kzalloc(bufsz, GFP_KERNEL); 1512 buf = kzalloc(bufsz, GFP_KERNEL);
1713 if (!buf) { 1513 if (!buf)
1714 IWL_ERR(priv, "Can not allocate Buffer\n");
1715 return -ENOMEM; 1514 return -ENOMEM;
1716 }
1717 1515
1718 /* 1516 /*
1719 * the statistic information display here is based on 1517 * the statistic information display here is based on
@@ -1790,10 +1588,8 @@ static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file,
1790 return -EAGAIN; 1588 return -EAGAIN;
1791 1589
1792 buf = kzalloc(bufsz, GFP_KERNEL); 1590 buf = kzalloc(bufsz, GFP_KERNEL);
1793 if (!buf) { 1591 if (!buf)
1794 IWL_ERR(priv, "Can not allocate Buffer\n");
1795 return -ENOMEM; 1592 return -ENOMEM;
1796 }
1797 1593
1798 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_TX_Error:\n"); 1594 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_TX_Error:\n");
1799 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t\t%u\n", 1595 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t\t%u\n",
@@ -1933,10 +1729,8 @@ static ssize_t iwl_dbgfs_sensitivity_read(struct file *file,
1933 1729
1934 data = &priv->sensitivity_data; 1730 data = &priv->sensitivity_data;
1935 buf = kzalloc(bufsz, GFP_KERNEL); 1731 buf = kzalloc(bufsz, GFP_KERNEL);
1936 if (!buf) { 1732 if (!buf)
1937 IWL_ERR(priv, "Can not allocate Buffer\n");
1938 return -ENOMEM; 1733 return -ENOMEM;
1939 }
1940 1734
1941 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n", 1735 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
1942 data->auto_corr_ofdm); 1736 data->auto_corr_ofdm);
@@ -2014,10 +1808,8 @@ static ssize_t iwl_dbgfs_chain_noise_read(struct file *file,
2014 1808
2015 data = &priv->chain_noise_data; 1809 data = &priv->chain_noise_data;
2016 buf = kzalloc(bufsz, GFP_KERNEL); 1810 buf = kzalloc(bufsz, GFP_KERNEL);
2017 if (!buf) { 1811 if (!buf)
2018 IWL_ERR(priv, "Can not allocate Buffer\n");
2019 return -ENOMEM; 1812 return -ENOMEM;
2020 }
2021 1813
2022 pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n", 1814 pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
2023 data->active_chains); 1815 data->active_chains);
@@ -2068,7 +1860,7 @@ static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
2068 const size_t bufsz = sizeof(buf); 1860 const size_t bufsz = sizeof(buf);
2069 u32 pwrsave_status; 1861 u32 pwrsave_status;
2070 1862
2071 pwrsave_status = iwl_read32(trans(priv), CSR_GP_CNTRL) & 1863 pwrsave_status = iwl_read32(priv->trans, CSR_GP_CNTRL) &
2072 CSR_GP_REG_POWER_SAVE_STATUS_MSK; 1864 CSR_GP_REG_POWER_SAVE_STATUS_MSK;
2073 1865
2074 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: "); 1866 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
@@ -2262,59 +2054,39 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
2262 return count; 2054 return count;
2263} 2055}
2264 2056
2265static ssize_t iwl_dbgfs_force_reset_read(struct file *file, 2057static ssize_t iwl_dbgfs_rf_reset_read(struct file *file,
2266 char __user *user_buf, 2058 char __user *user_buf,
2267 size_t count, loff_t *ppos) 2059 size_t count, loff_t *ppos)
2268{ 2060{
2269 struct iwl_priv *priv = file->private_data; 2061 struct iwl_priv *priv = file->private_data;
2270 int i, pos = 0; 2062 int pos = 0;
2271 char buf[300]; 2063 char buf[300];
2272 const size_t bufsz = sizeof(buf); 2064 const size_t bufsz = sizeof(buf);
2273 struct iwl_force_reset *force_reset; 2065 struct iwl_rf_reset *rf_reset = &priv->rf_reset;
2066
2067 pos += scnprintf(buf + pos, bufsz - pos,
2068 "RF reset statistics\n");
2069 pos += scnprintf(buf + pos, bufsz - pos,
2070 "\tnumber of reset request: %d\n",
2071 rf_reset->reset_request_count);
2072 pos += scnprintf(buf + pos, bufsz - pos,
2073 "\tnumber of reset request success: %d\n",
2074 rf_reset->reset_success_count);
2075 pos += scnprintf(buf + pos, bufsz - pos,
2076 "\tnumber of reset request reject: %d\n",
2077 rf_reset->reset_reject_count);
2274 2078
2275 for (i = 0; i < IWL_MAX_FORCE_RESET; i++) {
2276 force_reset = &priv->force_reset[i];
2277 pos += scnprintf(buf + pos, bufsz - pos,
2278 "Force reset method %d\n", i);
2279 pos += scnprintf(buf + pos, bufsz - pos,
2280 "\tnumber of reset request: %d\n",
2281 force_reset->reset_request_count);
2282 pos += scnprintf(buf + pos, bufsz - pos,
2283 "\tnumber of reset request success: %d\n",
2284 force_reset->reset_success_count);
2285 pos += scnprintf(buf + pos, bufsz - pos,
2286 "\tnumber of reset request reject: %d\n",
2287 force_reset->reset_reject_count);
2288 pos += scnprintf(buf + pos, bufsz - pos,
2289 "\treset duration: %lu\n",
2290 force_reset->reset_duration);
2291 }
2292 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2079 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2293} 2080}
2294 2081
2295static ssize_t iwl_dbgfs_force_reset_write(struct file *file, 2082static ssize_t iwl_dbgfs_rf_reset_write(struct file *file,
2296 const char __user *user_buf, 2083 const char __user *user_buf,
2297 size_t count, loff_t *ppos) { 2084 size_t count, loff_t *ppos) {
2298 2085
2299 struct iwl_priv *priv = file->private_data; 2086 struct iwl_priv *priv = file->private_data;
2300 char buf[8]; 2087 int ret;
2301 int buf_size;
2302 int reset, ret;
2303 2088
2304 memset(buf, 0, sizeof(buf)); 2089 ret = iwl_force_rf_reset(priv, true);
2305 buf_size = min(count, sizeof(buf) - 1);
2306 if (copy_from_user(buf, user_buf, buf_size))
2307 return -EFAULT;
2308 if (sscanf(buf, "%d", &reset) != 1)
2309 return -EINVAL;
2310 switch (reset) {
2311 case IWL_RF_RESET:
2312 case IWL_FW_RESET:
2313 ret = iwl_force_reset(priv, reset, true);
2314 break;
2315 default:
2316 return -EINVAL;
2317 }
2318 return ret ? ret : count; 2090 return ret ? ret : count;
2319} 2091}
2320 2092
@@ -2342,29 +2114,6 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
2342 return count; 2114 return count;
2343} 2115}
2344 2116
2345static ssize_t iwl_dbgfs_wd_timeout_write(struct file *file,
2346 const char __user *user_buf,
2347 size_t count, loff_t *ppos)
2348{
2349 struct iwl_priv *priv = file->private_data;
2350 char buf[8];
2351 int buf_size;
2352 int timeout;
2353
2354 memset(buf, 0, sizeof(buf));
2355 buf_size = min(count, sizeof(buf) - 1);
2356 if (copy_from_user(buf, user_buf, buf_size))
2357 return -EFAULT;
2358 if (sscanf(buf, "%d", &timeout) != 1)
2359 return -EINVAL;
2360 if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
2361 timeout = IWL_DEF_WD_TIMEOUT;
2362
2363 hw_params(priv).wd_timeout = timeout;
2364 iwl_setup_watchdog(priv);
2365 return count;
2366}
2367
2368static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file, 2117static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
2369 char __user *user_buf, 2118 char __user *user_buf,
2370 size_t count, loff_t *ppos) { 2119 size_t count, loff_t *ppos) {
@@ -2420,10 +2169,10 @@ static ssize_t iwl_dbgfs_protection_mode_read(struct file *file,
2420 char buf[40]; 2169 char buf[40];
2421 const size_t bufsz = sizeof(buf); 2170 const size_t bufsz = sizeof(buf);
2422 2171
2423 if (cfg(priv)->ht_params) 2172 if (priv->cfg->ht_params)
2424 pos += scnprintf(buf + pos, bufsz - pos, 2173 pos += scnprintf(buf + pos, bufsz - pos,
2425 "use %s for aggregation\n", 2174 "use %s for aggregation\n",
2426 (hw_params(priv).use_rts_for_aggregation) ? 2175 (priv->hw_params.use_rts_for_aggregation) ?
2427 "rts/cts" : "cts-to-self"); 2176 "rts/cts" : "cts-to-self");
2428 else 2177 else
2429 pos += scnprintf(buf + pos, bufsz - pos, "N/A"); 2178 pos += scnprintf(buf + pos, bufsz - pos, "N/A");
@@ -2440,7 +2189,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
2440 int buf_size; 2189 int buf_size;
2441 int rts; 2190 int rts;
2442 2191
2443 if (!cfg(priv)->ht_params) 2192 if (!priv->cfg->ht_params)
2444 return -EINVAL; 2193 return -EINVAL;
2445 2194
2446 memset(buf, 0, sizeof(buf)); 2195 memset(buf, 0, sizeof(buf));
@@ -2450,12 +2199,29 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
2450 if (sscanf(buf, "%d", &rts) != 1) 2199 if (sscanf(buf, "%d", &rts) != 1)
2451 return -EINVAL; 2200 return -EINVAL;
2452 if (rts) 2201 if (rts)
2453 hw_params(priv).use_rts_for_aggregation = true; 2202 priv->hw_params.use_rts_for_aggregation = true;
2454 else 2203 else
2455 hw_params(priv).use_rts_for_aggregation = false; 2204 priv->hw_params.use_rts_for_aggregation = false;
2456 return count; 2205 return count;
2457} 2206}
2458 2207
2208static int iwl_cmd_echo_test(struct iwl_priv *priv)
2209{
2210 int ret;
2211 struct iwl_host_cmd cmd = {
2212 .id = REPLY_ECHO,
2213 .len = { 0 },
2214 .flags = CMD_SYNC,
2215 };
2216
2217 ret = iwl_dvm_send_cmd(priv, &cmd);
2218 if (ret)
2219 IWL_ERR(priv, "echo testing fail: 0X%x\n", ret);
2220 else
2221 IWL_DEBUG_INFO(priv, "echo testing pass\n");
2222 return ret;
2223}
2224
2459static ssize_t iwl_dbgfs_echo_test_write(struct file *file, 2225static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
2460 const char __user *user_buf, 2226 const char __user *user_buf,
2461 size_t count, loff_t *ppos) 2227 size_t count, loff_t *ppos)
@@ -2473,9 +2239,93 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
2473 return count; 2239 return count;
2474} 2240}
2475 2241
2476DEBUGFS_READ_FILE_OPS(rx_statistics); 2242static ssize_t iwl_dbgfs_log_event_read(struct file *file,
2477DEBUGFS_READ_FILE_OPS(tx_statistics); 2243 char __user *user_buf,
2478DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); 2244 size_t count, loff_t *ppos)
2245{
2246 struct iwl_priv *priv = file->private_data;
2247 char *buf;
2248 int pos = 0;
2249 ssize_t ret = -ENOMEM;
2250
2251 ret = pos = iwl_dump_nic_event_log(priv, true, &buf, true);
2252 if (buf) {
2253 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2254 kfree(buf);
2255 }
2256 return ret;
2257}
2258
2259static ssize_t iwl_dbgfs_log_event_write(struct file *file,
2260 const char __user *user_buf,
2261 size_t count, loff_t *ppos)
2262{
2263 struct iwl_priv *priv = file->private_data;
2264 u32 event_log_flag;
2265 char buf[8];
2266 int buf_size;
2267
2268 memset(buf, 0, sizeof(buf));
2269 buf_size = min(count, sizeof(buf) - 1);
2270 if (copy_from_user(buf, user_buf, buf_size))
2271 return -EFAULT;
2272 if (sscanf(buf, "%d", &event_log_flag) != 1)
2273 return -EFAULT;
2274 if (event_log_flag == 1)
2275 iwl_dump_nic_event_log(priv, true, NULL, false);
2276
2277 return count;
2278}
2279
2280static ssize_t iwl_dbgfs_calib_disabled_read(struct file *file,
2281 char __user *user_buf,
2282 size_t count, loff_t *ppos)
2283{
2284 struct iwl_priv *priv = file->private_data;
2285 char buf[120];
2286 int pos = 0;
2287 const size_t bufsz = sizeof(buf);
2288
2289 pos += scnprintf(buf + pos, bufsz - pos,
2290 "Sensitivity calibrations %s\n",
2291 (priv->calib_disabled &
2292 IWL_SENSITIVITY_CALIB_DISABLED) ?
2293 "DISABLED" : "ENABLED");
2294 pos += scnprintf(buf + pos, bufsz - pos,
2295 "Chain noise calibrations %s\n",
2296 (priv->calib_disabled &
2297 IWL_CHAIN_NOISE_CALIB_DISABLED) ?
2298 "DISABLED" : "ENABLED");
2299 pos += scnprintf(buf + pos, bufsz - pos,
2300 "Tx power calibrations %s\n",
2301 (priv->calib_disabled &
2302 IWL_TX_POWER_CALIB_DISABLED) ?
2303 "DISABLED" : "ENABLED");
2304
2305 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2306}
2307
2308static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
2309 const char __user *user_buf,
2310 size_t count, loff_t *ppos)
2311{
2312 struct iwl_priv *priv = file->private_data;
2313 char buf[8];
2314 u32 calib_disabled;
2315 int buf_size;
2316
2317 memset(buf, 0, sizeof(buf));
2318 buf_size = min(count, sizeof(buf) - 1);
2319 if (copy_from_user(buf, user_buf, buf_size))
2320 return -EFAULT;
2321 if (sscanf(buf, "%x", &calib_disabled) != 1)
2322 return -EFAULT;
2323
2324 priv->calib_disabled = calib_disabled;
2325
2326 return count;
2327}
2328
2479DEBUGFS_READ_FILE_OPS(ucode_rx_stats); 2329DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
2480DEBUGFS_READ_FILE_OPS(ucode_tx_stats); 2330DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
2481DEBUGFS_READ_FILE_OPS(ucode_general_stats); 2331DEBUGFS_READ_FILE_OPS(ucode_general_stats);
@@ -2483,20 +2333,20 @@ DEBUGFS_READ_FILE_OPS(sensitivity);
2483DEBUGFS_READ_FILE_OPS(chain_noise); 2333DEBUGFS_READ_FILE_OPS(chain_noise);
2484DEBUGFS_READ_FILE_OPS(power_save_status); 2334DEBUGFS_READ_FILE_OPS(power_save_status);
2485DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics); 2335DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
2486DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
2487DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing); 2336DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
2488DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon); 2337DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
2489DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta); 2338DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
2490DEBUGFS_READ_WRITE_FILE_OPS(force_reset); 2339DEBUGFS_READ_WRITE_FILE_OPS(rf_reset);
2491DEBUGFS_READ_FILE_OPS(rxon_flags); 2340DEBUGFS_READ_FILE_OPS(rxon_flags);
2492DEBUGFS_READ_FILE_OPS(rxon_filter_flags); 2341DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
2493DEBUGFS_WRITE_FILE_OPS(txfifo_flush); 2342DEBUGFS_WRITE_FILE_OPS(txfifo_flush);
2494DEBUGFS_READ_FILE_OPS(ucode_bt_stats); 2343DEBUGFS_READ_FILE_OPS(ucode_bt_stats);
2495DEBUGFS_WRITE_FILE_OPS(wd_timeout);
2496DEBUGFS_READ_FILE_OPS(bt_traffic); 2344DEBUGFS_READ_FILE_OPS(bt_traffic);
2497DEBUGFS_READ_WRITE_FILE_OPS(protection_mode); 2345DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
2498DEBUGFS_READ_FILE_OPS(reply_tx_error); 2346DEBUGFS_READ_FILE_OPS(reply_tx_error);
2499DEBUGFS_WRITE_FILE_OPS(echo_test); 2347DEBUGFS_WRITE_FILE_OPS(echo_test);
2348DEBUGFS_READ_WRITE_FILE_OPS(log_event);
2349DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
2500 2350
2501/* 2351/*
2502 * Create the debugfs files and directories 2352 * Create the debugfs files and directories
@@ -2537,15 +2387,11 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
2537 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR); 2387 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
2538 DEBUGFS_ADD_FILE(temperature, dir_data, S_IRUSR); 2388 DEBUGFS_ADD_FILE(temperature, dir_data, S_IRUSR);
2539 2389
2540 DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
2541 DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
2542 DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
2543 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR); 2390 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
2544 DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR); 2391 DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
2545 DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
2546 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR); 2392 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
2547 DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR); 2393 DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
2548 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR); 2394 DEBUGFS_ADD_FILE(rf_reset, dir_debug, S_IWUSR | S_IRUSR);
2549 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR); 2395 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
2550 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR); 2396 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
2551 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); 2397 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
@@ -2558,17 +2404,16 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
2558 DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR); 2404 DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR);
2559 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); 2405 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
2560 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); 2406 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
2561 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
2562 DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR); 2407 DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
2408 DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR);
2409
2563 if (iwl_advanced_bt_coexist(priv)) 2410 if (iwl_advanced_bt_coexist(priv))
2564 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR); 2411 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
2565 2412
2566 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, 2413 /* Calibrations disabled/enabled status*/
2567 &priv->disable_sens_cal); 2414 DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR);
2568 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
2569 &priv->disable_chain_noise_cal);
2570 2415
2571 if (iwl_trans_dbgfs_register(trans(priv), dir_debug)) 2416 if (iwl_trans_dbgfs_register(priv->trans, dir_debug))
2572 goto err; 2417 goto err;
2573 return 0; 2418 return 0;
2574 2419
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 16956b777f96..70062379d0ec 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -38,6 +38,7 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/mutex.h> 39#include <linux/mutex.h>
40 40
41#include "iwl-fw.h"
41#include "iwl-eeprom.h" 42#include "iwl-eeprom.h"
42#include "iwl-csr.h" 43#include "iwl-csr.h"
43#include "iwl-debug.h" 44#include "iwl-debug.h"
@@ -47,12 +48,9 @@
47#include "iwl-agn-rs.h" 48#include "iwl-agn-rs.h"
48#include "iwl-agn-tt.h" 49#include "iwl-agn-tt.h"
49#include "iwl-trans.h" 50#include "iwl-trans.h"
50#include "iwl-shared.h"
51#include "iwl-op-mode.h" 51#include "iwl-op-mode.h"
52#include "iwl-notif-wait.h" 52#include "iwl-notif-wait.h"
53 53
54struct iwl_tx_queue;
55
56/* CT-KILL constants */ 54/* CT-KILL constants */
57#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ 55#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
58#define CT_KILL_THRESHOLD 114 /* in Celsius */ 56#define CT_KILL_THRESHOLD 114 /* in Celsius */
@@ -196,6 +194,7 @@ struct iwl_qos_info {
196 * These states relate to a specific RA / TID. 194 * These states relate to a specific RA / TID.
197 * 195 *
198 * @IWL_AGG_OFF: aggregation is not used 196 * @IWL_AGG_OFF: aggregation is not used
197 * @IWL_AGG_STARTING: aggregation are starting (between start and oper)
199 * @IWL_AGG_ON: aggregation session is up 198 * @IWL_AGG_ON: aggregation session is up
200 * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the 199 * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
201 * HW queue to be empty from packets for this RA /TID. 200 * HW queue to be empty from packets for this RA /TID.
@@ -204,6 +203,7 @@ struct iwl_qos_info {
204 */ 203 */
205enum iwl_agg_state { 204enum iwl_agg_state {
206 IWL_AGG_OFF = 0, 205 IWL_AGG_OFF = 0,
206 IWL_AGG_STARTING,
207 IWL_AGG_ON, 207 IWL_AGG_ON,
208 IWL_EMPTYING_HW_QUEUE_ADDBA, 208 IWL_EMPTYING_HW_QUEUE_ADDBA,
209 IWL_EMPTYING_HW_QUEUE_DELBA, 209 IWL_EMPTYING_HW_QUEUE_DELBA,
@@ -220,8 +220,7 @@ enum iwl_agg_state {
220 * Tx response (REPLY_TX), and the block ack notification 220 * Tx response (REPLY_TX), and the block ack notification
221 * (REPLY_COMPRESSED_BA). 221 * (REPLY_COMPRESSED_BA).
222 * @state: state of the BA agreement establishment / tear down. 222 * @state: state of the BA agreement establishment / tear down.
223 * @txq_id: Tx queue used by the BA session - used by the transport layer. 223 * @txq_id: Tx queue used by the BA session
224 * Needed by the upper layer for debugfs only.
225 * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or 224 * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
226 * the first packet to be sent in legacy HW queue in Tx AGG stop flow. 225 * the first packet to be sent in legacy HW queue in Tx AGG stop flow.
227 * Basically when next_reclaimed reaches ssn, we can tell mac80211 that 226 * Basically when next_reclaimed reaches ssn, we can tell mac80211 that
@@ -507,44 +506,6 @@ struct reply_agg_tx_error_statistics {
507 u32 unknown; 506 u32 unknown;
508}; 507};
509 508
510/* management statistics */
511enum iwl_mgmt_stats {
512 MANAGEMENT_ASSOC_REQ = 0,
513 MANAGEMENT_ASSOC_RESP,
514 MANAGEMENT_REASSOC_REQ,
515 MANAGEMENT_REASSOC_RESP,
516 MANAGEMENT_PROBE_REQ,
517 MANAGEMENT_PROBE_RESP,
518 MANAGEMENT_BEACON,
519 MANAGEMENT_ATIM,
520 MANAGEMENT_DISASSOC,
521 MANAGEMENT_AUTH,
522 MANAGEMENT_DEAUTH,
523 MANAGEMENT_ACTION,
524 MANAGEMENT_MAX,
525};
526/* control statistics */
527enum iwl_ctrl_stats {
528 CONTROL_BACK_REQ = 0,
529 CONTROL_BACK,
530 CONTROL_PSPOLL,
531 CONTROL_RTS,
532 CONTROL_CTS,
533 CONTROL_ACK,
534 CONTROL_CFEND,
535 CONTROL_CFENDACK,
536 CONTROL_MAX,
537};
538
539struct traffic_stats {
540#ifdef CONFIG_IWLWIFI_DEBUGFS
541 u32 mgmt[MANAGEMENT_MAX];
542 u32 ctrl[CONTROL_MAX];
543 u32 data_cnt;
544 u64 data_bytes;
545#endif
546};
547
548/* 509/*
549 * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds 510 * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
550 * to perform continuous uCode event logging operation if enabled 511 * to perform continuous uCode event logging operation if enabled
@@ -571,24 +532,7 @@ struct iwl_event_log {
571 int wraps_more_count; 532 int wraps_more_count;
572}; 533};
573 534
574/*
575 * This is the threshold value of plcp error rate per 100mSecs. It is
576 * used to set and check for the validity of plcp_delta.
577 */
578#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (1)
579#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
580#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100)
581#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200)
582#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
583#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE (0)
584
585#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3) 535#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
586#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
587
588/* TX queue watchdog timeouts in mSecs */
589#define IWL_DEF_WD_TIMEOUT (2000)
590#define IWL_LONG_WD_TIMEOUT (10000)
591#define IWL_MAX_WD_TIMEOUT (120000)
592 536
593/* BT Antenna Coupling Threshold (dB) */ 537/* BT Antenna Coupling Threshold (dB) */
594#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35) 538#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35)
@@ -598,18 +542,18 @@ struct iwl_event_log {
598#define IWL_MAX_CONTINUE_RELOAD_CNT 4 542#define IWL_MAX_CONTINUE_RELOAD_CNT 4
599 543
600 544
601enum iwl_reset { 545struct iwl_rf_reset {
602 IWL_RF_RESET = 0,
603 IWL_FW_RESET,
604 IWL_MAX_FORCE_RESET,
605};
606
607struct iwl_force_reset {
608 int reset_request_count; 546 int reset_request_count;
609 int reset_success_count; 547 int reset_success_count;
610 int reset_reject_count; 548 int reset_reject_count;
611 unsigned long reset_duration; 549 unsigned long last_reset_jiffies;
612 unsigned long last_force_reset_jiffies; 550};
551
552enum iwl_rxon_context_id {
553 IWL_RXON_CTX_BSS,
554 IWL_RXON_CTX_PAN,
555
556 NUM_IWL_RXON_CTX
613}; 557};
614 558
615/* extend beacon time format bit shifting */ 559/* extend beacon time format bit shifting */
@@ -623,6 +567,10 @@ struct iwl_force_reset {
623struct iwl_rxon_context { 567struct iwl_rxon_context {
624 struct ieee80211_vif *vif; 568 struct ieee80211_vif *vif;
625 569
570 u8 mcast_queue;
571 u8 ac_to_queue[IEEE80211_NUM_ACS];
572 u8 ac_to_fifo[IEEE80211_NUM_ACS];
573
626 /* 574 /*
627 * We could use the vif to indicate active, but we 575 * We could use the vif to indicate active, but we
628 * also need it to be active during disabling when 576 * also need it to be active during disabling when
@@ -677,6 +625,52 @@ enum iwl_scan_type {
677 IWL_SCAN_ROC, 625 IWL_SCAN_ROC,
678}; 626};
679 627
628/**
629 * struct iwl_hw_params
630 *
631 * Holds the module parameters
632 *
633 * @tx_chains_num: Number of TX chains
634 * @rx_chains_num: Number of RX chains
635 * @valid_tx_ant: usable antennas for TX
636 * @valid_rx_ant: usable antennas for RX
637 * @ht40_channel: is 40MHz width possible: BIT(IEEE80211_BAND_XXX)
638 * @sku: sku read from EEPROM
639 * @ct_kill_threshold: temperature threshold - in hw dependent unit
640 * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
641 * relevant for 1000, 6000 and up
642 * @struct iwl_sensitivity_ranges: range of sensitivity values
643 * @use_rts_for_aggregation: use rts/cts protection for HT traffic
644 */
645struct iwl_hw_params {
646 u8 tx_chains_num;
647 u8 rx_chains_num;
648 u8 valid_tx_ant;
649 u8 valid_rx_ant;
650 u8 ht40_channel;
651 bool use_rts_for_aggregation;
652 u16 sku;
653 u32 ct_kill_threshold;
654 u32 ct_kill_exit_threshold;
655
656 const struct iwl_sensitivity_ranges *sens;
657};
658
659struct iwl_lib_ops {
660 /* set hw dependent parameters */
661 void (*set_hw_params)(struct iwl_priv *priv);
662 int (*set_channel_switch)(struct iwl_priv *priv,
663 struct ieee80211_channel_switch *ch_switch);
664 /* device specific configuration */
665 void (*nic_config)(struct iwl_priv *priv);
666
667 /* eeprom operations (as defined in iwl-eeprom.h) */
668 struct iwl_eeprom_ops eeprom_ops;
669
670 /* temperature */
671 void (*temperature)(struct iwl_priv *priv);
672};
673
680#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE 674#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
681struct iwl_testmode_trace { 675struct iwl_testmode_trace {
682 u32 buff_size; 676 u32 buff_size;
@@ -701,6 +695,17 @@ struct iwl_wipan_noa_data {
701 u8 data[]; 695 u8 data[];
702}; 696};
703 697
698/* Calibration disabling bit mask */
699enum {
700 IWL_CALIB_ENABLE_ALL = 0,
701
702 IWL_SENSITIVITY_CALIB_DISABLED = BIT(0),
703 IWL_CHAIN_NOISE_CALIB_DISABLED = BIT(1),
704 IWL_TX_POWER_CALIB_DISABLED = BIT(2),
705
706 IWL_CALIB_DISABLE_ALL = 0xFFFFFFFF,
707};
708
704#define IWL_OP_MODE_GET_DVM(_iwl_op_mode) \ 709#define IWL_OP_MODE_GET_DVM(_iwl_op_mode) \
705 ((struct iwl_priv *) ((_iwl_op_mode)->op_mode_specific)) 710 ((struct iwl_priv *) ((_iwl_op_mode)->op_mode_specific))
706 711
@@ -710,9 +715,11 @@ struct iwl_wipan_noa_data {
710 715
711struct iwl_priv { 716struct iwl_priv {
712 717
713 /*data shared among all the driver's layers */ 718 struct iwl_trans *trans;
714 struct iwl_shared *shrd; 719 struct device *dev; /* for debug prints only */
720 const struct iwl_cfg *cfg;
715 const struct iwl_fw *fw; 721 const struct iwl_fw *fw;
722 const struct iwl_lib_ops *lib;
716 unsigned long status; 723 unsigned long status;
717 724
718 spinlock_t sta_lock; 725 spinlock_t sta_lock;
@@ -720,6 +727,11 @@ struct iwl_priv {
720 727
721 unsigned long transport_queue_stop; 728 unsigned long transport_queue_stop;
722 bool passive_no_rx; 729 bool passive_no_rx;
730#define IWL_INVALID_MAC80211_QUEUE 0xff
731 u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
732 atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];
733
734 unsigned long agg_q_alloc[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
723 735
724 /* ieee device used by generic ieee processing code */ 736 /* ieee device used by generic ieee processing code */
725 struct ieee80211_hw *hw; 737 struct ieee80211_hw *hw;
@@ -730,7 +742,10 @@ struct iwl_priv {
730 742
731 struct workqueue_struct *workqueue; 743 struct workqueue_struct *workqueue;
732 744
745 struct iwl_hw_params hw_params;
746
733 enum ieee80211_band band; 747 enum ieee80211_band band;
748 u8 valid_contexts;
734 749
735 void (*pre_rx_handler)(struct iwl_priv *priv, 750 void (*pre_rx_handler)(struct iwl_priv *priv,
736 struct iwl_rx_cmd_buffer *rxb); 751 struct iwl_rx_cmd_buffer *rxb);
@@ -763,8 +778,8 @@ struct iwl_priv {
763 /*counters */ 778 /*counters */
764 u32 rx_handlers_stats[REPLY_MAX]; 779 u32 rx_handlers_stats[REPLY_MAX];
765 780
766 /* force reset */ 781 /* rf reset */
767 struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET]; 782 struct iwl_rf_reset rf_reset;
768 783
769 /* firmware reload counter and timestamp */ 784 /* firmware reload counter and timestamp */
770 unsigned long reload_jiffies; 785 unsigned long reload_jiffies;
@@ -810,8 +825,6 @@ struct iwl_priv {
810 825
811 __le16 switch_channel; 826 __le16 switch_channel;
812 827
813 u16 active_rate;
814
815 u8 start_calib; 828 u8 start_calib;
816 struct iwl_sensitivity_data sensitivity_data; 829 struct iwl_sensitivity_data sensitivity_data;
817 struct iwl_chain_noise_data chain_noise_data; 830 struct iwl_chain_noise_data chain_noise_data;
@@ -825,10 +838,6 @@ struct iwl_priv {
825 838
826 int activity_timer_active; 839 int activity_timer_active;
827 840
828 /* counts mgmt, ctl, and data packets */
829 struct traffic_stats tx_stats;
830 struct traffic_stats rx_stats;
831
832 struct iwl_power_mgr power_data; 841 struct iwl_power_mgr power_data;
833 struct iwl_tt_mgmt thermal_throttle; 842 struct iwl_tt_mgmt thermal_throttle;
834 843
@@ -912,6 +921,7 @@ struct iwl_priv {
912 __le32 kill_ack_mask; 921 __le32 kill_ack_mask;
913 __le32 kill_cts_mask; 922 __le32 kill_cts_mask;
914 __le16 bt_valid; 923 __le16 bt_valid;
924 bool reduced_txpower;
915 u16 bt_on_thresh; 925 u16 bt_on_thresh;
916 u16 bt_duration; 926 u16 bt_duration;
917 u16 dynamic_frag_thresh; 927 u16 dynamic_frag_thresh;
@@ -948,23 +958,21 @@ struct iwl_priv {
948 958
949#ifdef CONFIG_IWLWIFI_DEBUGFS 959#ifdef CONFIG_IWLWIFI_DEBUGFS
950 /* debugfs */ 960 /* debugfs */
951 u16 tx_traffic_idx;
952 u16 rx_traffic_idx;
953 u8 *tx_traffic;
954 u8 *rx_traffic;
955 struct dentry *debugfs_dir; 961 struct dentry *debugfs_dir;
956 u32 dbgfs_sram_offset, dbgfs_sram_len; 962 u32 dbgfs_sram_offset, dbgfs_sram_len;
957 bool disable_ht40; 963 bool disable_ht40;
958 void *wowlan_sram; 964 void *wowlan_sram;
959#endif /* CONFIG_IWLWIFI_DEBUGFS */ 965#endif /* CONFIG_IWLWIFI_DEBUGFS */
960 966
967 /* eeprom -- this is in the card's little endian byte order */
968 u8 *eeprom;
969 enum iwl_nvm_type nvm_device_type;
970
961 struct work_struct txpower_work; 971 struct work_struct txpower_work;
962 u32 disable_sens_cal; 972 u32 calib_disabled;
963 u32 disable_chain_noise_cal;
964 struct work_struct run_time_calib_work; 973 struct work_struct run_time_calib_work;
965 struct timer_list statistics_periodic; 974 struct timer_list statistics_periodic;
966 struct timer_list ucode_trace; 975 struct timer_list ucode_trace;
967 struct timer_list watchdog;
968 976
969 struct iwl_event_log event_log; 977 struct iwl_event_log event_log;
970 978
@@ -982,10 +990,18 @@ struct iwl_priv {
982 __le64 replay_ctr; 990 __le64 replay_ctr;
983 __le16 last_seq_ctl; 991 __le16 last_seq_ctl;
984 bool have_rekey_data; 992 bool have_rekey_data;
993
994 /* device_pointers: pointers to ucode event tables */
995 struct {
996 u32 error_event_table;
997 u32 log_event_table;
998 } device_pointers;
999
1000 /* indicator of loaded ucode image */
1001 enum iwl_ucode_type cur_ucode;
985}; /*iwl_priv */ 1002}; /*iwl_priv */
986 1003
987extern struct kmem_cache *iwl_tx_cmd_pool; 1004extern struct kmem_cache *iwl_tx_cmd_pool;
988extern struct iwl_mod_params iwlagn_mod_params;
989 1005
990static inline struct iwl_rxon_context * 1006static inline struct iwl_rxon_context *
991iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif) 1007iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
@@ -998,7 +1014,7 @@ iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
998#define for_each_context(priv, ctx) \ 1014#define for_each_context(priv, ctx) \
999 for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \ 1015 for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
1000 ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \ 1016 ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
1001 if (priv->shrd->valid_contexts & BIT(ctx->ctxid)) 1017 if (priv->valid_contexts & BIT(ctx->ctxid))
1002 1018
1003static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx) 1019static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx)
1004{ 1020{
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 6f312c77af5e..3c72bad0ae56 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -66,10 +66,13 @@
66#include <linux/module.h> 66#include <linux/module.h>
67 67
68#include "iwl-drv.h" 68#include "iwl-drv.h"
69#include "iwl-debug.h"
69#include "iwl-trans.h" 70#include "iwl-trans.h"
70#include "iwl-shared.h"
71#include "iwl-op-mode.h" 71#include "iwl-op-mode.h"
72#include "iwl-agn-hw.h" 72#include "iwl-agn-hw.h"
73#include "iwl-fw.h"
74#include "iwl-config.h"
75#include "iwl-modparams.h"
73 76
74/* private includes */ 77/* private includes */
75#include "iwl-fw-file.h" 78#include "iwl-fw-file.h"
@@ -77,8 +80,10 @@
77/** 80/**
78 * struct iwl_drv - drv common data 81 * struct iwl_drv - drv common data
79 * @fw: the iwl_fw structure 82 * @fw: the iwl_fw structure
80 * @shrd: pointer to common shared structure
81 * @op_mode: the running op_mode 83 * @op_mode: the running op_mode
84 * @trans: transport layer
85 * @dev: for debug prints only
86 * @cfg: configuration struct
82 * @fw_index: firmware revision to try loading 87 * @fw_index: firmware revision to try loading
83 * @firmware_name: composite filename of ucode file to load 88 * @firmware_name: composite filename of ucode file to load
84 * @request_firmware_complete: the firmware has been obtained from user space 89 * @request_firmware_complete: the firmware has been obtained from user space
@@ -86,8 +91,10 @@
86struct iwl_drv { 91struct iwl_drv {
87 struct iwl_fw fw; 92 struct iwl_fw fw;
88 93
89 struct iwl_shared *shrd;
90 struct iwl_op_mode *op_mode; 94 struct iwl_op_mode *op_mode;
95 struct iwl_trans *trans;
96 struct device *dev;
97 const struct iwl_cfg *cfg;
91 98
92 int fw_index; /* firmware we're trying to load */ 99 int fw_index; /* firmware we're trying to load */
93 char firmware_name[25]; /* name of firmware file to load */ 100 char firmware_name[25]; /* name of firmware file to load */
@@ -110,7 +117,7 @@ struct fw_sec {
110static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc) 117static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc)
111{ 118{
112 if (desc->v_addr) 119 if (desc->v_addr)
113 dma_free_coherent(trans(drv)->dev, desc->len, 120 dma_free_coherent(drv->trans->dev, desc->len,
114 desc->v_addr, desc->p_addr); 121 desc->v_addr, desc->p_addr);
115 desc->v_addr = NULL; 122 desc->v_addr = NULL;
116 desc->len = 0; 123 desc->len = 0;
@@ -138,7 +145,7 @@ static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
138 return -EINVAL; 145 return -EINVAL;
139 } 146 }
140 147
141 desc->v_addr = dma_alloc_coherent(trans(drv)->dev, sec->size, 148 desc->v_addr = dma_alloc_coherent(drv->trans->dev, sec->size,
142 &desc->p_addr, GFP_KERNEL); 149 &desc->p_addr, GFP_KERNEL);
143 if (!desc->v_addr) 150 if (!desc->v_addr)
144 return -ENOMEM; 151 return -ENOMEM;
@@ -156,8 +163,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
156 163
157static int iwl_request_firmware(struct iwl_drv *drv, bool first) 164static int iwl_request_firmware(struct iwl_drv *drv, bool first)
158{ 165{
159 const struct iwl_cfg *cfg = cfg(drv); 166 const char *name_pre = drv->cfg->fw_name_pre;
160 const char *name_pre = cfg->fw_name_pre;
161 char tag[8]; 167 char tag[8];
162 168
163 if (first) { 169 if (first) {
@@ -166,14 +172,14 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
166 strcpy(tag, UCODE_EXPERIMENTAL_TAG); 172 strcpy(tag, UCODE_EXPERIMENTAL_TAG);
167 } else if (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) { 173 } else if (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
168#endif 174#endif
169 drv->fw_index = cfg->ucode_api_max; 175 drv->fw_index = drv->cfg->ucode_api_max;
170 sprintf(tag, "%d", drv->fw_index); 176 sprintf(tag, "%d", drv->fw_index);
171 } else { 177 } else {
172 drv->fw_index--; 178 drv->fw_index--;
173 sprintf(tag, "%d", drv->fw_index); 179 sprintf(tag, "%d", drv->fw_index);
174 } 180 }
175 181
176 if (drv->fw_index < cfg->ucode_api_min) { 182 if (drv->fw_index < drv->cfg->ucode_api_min) {
177 IWL_ERR(drv, "no suitable firmware found!\n"); 183 IWL_ERR(drv, "no suitable firmware found!\n");
178 return -ENOENT; 184 return -ENOENT;
179 } 185 }
@@ -186,7 +192,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
186 drv->firmware_name); 192 drv->firmware_name);
187 193
188 return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name, 194 return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
189 trans(drv)->dev, 195 drv->trans->dev,
190 GFP_KERNEL, drv, iwl_ucode_callback); 196 GFP_KERNEL, drv, iwl_ucode_callback);
191} 197}
192 198
@@ -284,6 +290,7 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
284 290
285 sec->offset = le32_to_cpu(sec_parse->offset); 291 sec->offset = le32_to_cpu(sec_parse->offset);
286 sec->data = sec_parse->data; 292 sec->data = sec_parse->data;
293 sec->size = size - sizeof(sec_parse->offset);
287 294
288 ++img->sec_counter; 295 ++img->sec_counter;
289 296
@@ -414,9 +421,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
414 struct iwl_ucode_tlv *tlv; 421 struct iwl_ucode_tlv *tlv;
415 size_t len = ucode_raw->size; 422 size_t len = ucode_raw->size;
416 const u8 *data; 423 const u8 *data;
417 int wanted_alternative = iwlagn_mod_params.wanted_ucode_alternative;
418 int tmp;
419 u64 alternatives;
420 u32 tlv_len; 424 u32 tlv_len;
421 enum iwl_ucode_tlv_type tlv_type; 425 enum iwl_ucode_tlv_type tlv_type;
422 const u8 *tlv_data; 426 const u8 *tlv_data;
@@ -434,23 +438,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
434 return -EINVAL; 438 return -EINVAL;
435 } 439 }
436 440
437 /*
438 * Check which alternatives are present, and "downgrade"
439 * when the chosen alternative is not present, warning
440 * the user when that happens. Some files may not have
441 * any alternatives, so don't warn in that case.
442 */
443 alternatives = le64_to_cpu(ucode->alternatives);
444 tmp = wanted_alternative;
445 if (wanted_alternative > 63)
446 wanted_alternative = 63;
447 while (wanted_alternative && !(alternatives & BIT(wanted_alternative)))
448 wanted_alternative--;
449 if (wanted_alternative && wanted_alternative != tmp)
450 IWL_WARN(drv,
451 "uCode alternative %d not available, choosing %d\n",
452 tmp, wanted_alternative);
453
454 drv->fw.ucode_ver = le32_to_cpu(ucode->ver); 441 drv->fw.ucode_ver = le32_to_cpu(ucode->ver);
455 build = le32_to_cpu(ucode->build); 442 build = le32_to_cpu(ucode->build);
456 443
@@ -475,14 +462,11 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
475 len -= sizeof(*ucode); 462 len -= sizeof(*ucode);
476 463
477 while (len >= sizeof(*tlv)) { 464 while (len >= sizeof(*tlv)) {
478 u16 tlv_alt;
479
480 len -= sizeof(*tlv); 465 len -= sizeof(*tlv);
481 tlv = (void *)data; 466 tlv = (void *)data;
482 467
483 tlv_len = le32_to_cpu(tlv->length); 468 tlv_len = le32_to_cpu(tlv->length);
484 tlv_type = le16_to_cpu(tlv->type); 469 tlv_type = le32_to_cpu(tlv->type);
485 tlv_alt = le16_to_cpu(tlv->alternative);
486 tlv_data = tlv->data; 470 tlv_data = tlv->data;
487 471
488 if (len < tlv_len) { 472 if (len < tlv_len) {
@@ -493,14 +477,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
493 len -= ALIGN(tlv_len, 4); 477 len -= ALIGN(tlv_len, 4);
494 data += sizeof(*tlv) + ALIGN(tlv_len, 4); 478 data += sizeof(*tlv) + ALIGN(tlv_len, 4);
495 479
496 /*
497 * Alternative 0 is always valid.
498 *
499 * Skip alternative TLVs that are not selected.
500 */
501 if (tlv_alt != 0 && tlv_alt != wanted_alternative)
502 continue;
503
504 switch (tlv_type) { 480 switch (tlv_type) {
505 case IWL_UCODE_TLV_INST: 481 case IWL_UCODE_TLV_INST:
506 set_sec_data(pieces, IWL_UCODE_REGULAR, 482 set_sec_data(pieces, IWL_UCODE_REGULAR,
@@ -755,14 +731,13 @@ static int validate_sec_sizes(struct iwl_drv *drv,
755static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) 731static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
756{ 732{
757 struct iwl_drv *drv = context; 733 struct iwl_drv *drv = context;
758 const struct iwl_cfg *cfg = cfg(drv);
759 struct iwl_fw *fw = &drv->fw; 734 struct iwl_fw *fw = &drv->fw;
760 struct iwl_ucode_header *ucode; 735 struct iwl_ucode_header *ucode;
761 int err; 736 int err;
762 struct iwl_firmware_pieces pieces; 737 struct iwl_firmware_pieces pieces;
763 const unsigned int api_max = cfg->ucode_api_max; 738 const unsigned int api_max = drv->cfg->ucode_api_max;
764 unsigned int api_ok = cfg->ucode_api_ok; 739 unsigned int api_ok = drv->cfg->ucode_api_ok;
765 const unsigned int api_min = cfg->ucode_api_min; 740 const unsigned int api_min = drv->cfg->ucode_api_min;
766 u32 api_ver; 741 u32 api_ver;
767 int i; 742 int i;
768 743
@@ -838,46 +813,10 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
838 IWL_INFO(drv, "loaded firmware version %s", drv->fw.fw_version); 813 IWL_INFO(drv, "loaded firmware version %s", drv->fw.fw_version);
839 814
840 /* 815 /*
841 * For any of the failures below (before allocating pci memory)
842 * we will try to load a version with a smaller API -- maybe the
843 * user just got a corrupted version of the latest API.
844 */
845
846 IWL_DEBUG_INFO(drv, "f/w package hdr ucode version raw = 0x%x\n",
847 drv->fw.ucode_ver);
848 IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %Zd\n",
849 get_sec_size(&pieces, IWL_UCODE_REGULAR,
850 IWL_UCODE_SECTION_INST));
851 IWL_DEBUG_INFO(drv, "f/w package hdr runtime data size = %Zd\n",
852 get_sec_size(&pieces, IWL_UCODE_REGULAR,
853 IWL_UCODE_SECTION_DATA));
854 IWL_DEBUG_INFO(drv, "f/w package hdr init inst size = %Zd\n",
855 get_sec_size(&pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST));
856 IWL_DEBUG_INFO(drv, "f/w package hdr init data size = %Zd\n",
857 get_sec_size(&pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA));
858
859 /* Verify that uCode images will fit in card's SRAM */
860 if (get_sec_size(&pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) >
861 cfg->max_inst_size) {
862 IWL_ERR(drv, "uCode instr len %Zd too large to fit in\n",
863 get_sec_size(&pieces, IWL_UCODE_REGULAR,
864 IWL_UCODE_SECTION_INST));
865 goto try_again;
866 }
867
868 if (get_sec_size(&pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) >
869 cfg->max_data_size) {
870 IWL_ERR(drv, "uCode data len %Zd too large to fit in\n",
871 get_sec_size(&pieces, IWL_UCODE_REGULAR,
872 IWL_UCODE_SECTION_DATA));
873 goto try_again;
874 }
875
876 /*
877 * In mvm uCode there is no difference between data and instructions 816 * In mvm uCode there is no difference between data and instructions
878 * sections. 817 * sections.
879 */ 818 */
880 if (!fw->mvm_fw && validate_sec_sizes(drv, &pieces, cfg)) 819 if (!fw->mvm_fw && validate_sec_sizes(drv, &pieces, drv->cfg))
881 goto try_again; 820 goto try_again;
882 821
883 /* Allocate ucode buffers for card's bus-master loading ... */ 822 /* Allocate ucode buffers for card's bus-master loading ... */
@@ -901,14 +840,14 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
901 fw->init_evtlog_size = (pieces.init_evtlog_size - 16)/12; 840 fw->init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
902 else 841 else
903 fw->init_evtlog_size = 842 fw->init_evtlog_size =
904 cfg->base_params->max_event_log_size; 843 drv->cfg->base_params->max_event_log_size;
905 fw->init_errlog_ptr = pieces.init_errlog_ptr; 844 fw->init_errlog_ptr = pieces.init_errlog_ptr;
906 fw->inst_evtlog_ptr = pieces.inst_evtlog_ptr; 845 fw->inst_evtlog_ptr = pieces.inst_evtlog_ptr;
907 if (pieces.inst_evtlog_size) 846 if (pieces.inst_evtlog_size)
908 fw->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12; 847 fw->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
909 else 848 else
910 fw->inst_evtlog_size = 849 fw->inst_evtlog_size =
911 cfg->base_params->max_event_log_size; 850 drv->cfg->base_params->max_event_log_size;
912 fw->inst_errlog_ptr = pieces.inst_errlog_ptr; 851 fw->inst_errlog_ptr = pieces.inst_errlog_ptr;
913 852
914 /* 853 /*
@@ -924,7 +863,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
924 release_firmware(ucode_raw); 863 release_firmware(ucode_raw);
925 complete(&drv->request_firmware_complete); 864 complete(&drv->request_firmware_complete);
926 865
927 drv->op_mode = iwl_dvm_ops.start(drv->shrd->trans, &drv->fw); 866 drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw);
928 867
929 if (!drv->op_mode) 868 if (!drv->op_mode)
930 goto out_unbind; 869 goto out_unbind;
@@ -944,42 +883,38 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
944 release_firmware(ucode_raw); 883 release_firmware(ucode_raw);
945 out_unbind: 884 out_unbind:
946 complete(&drv->request_firmware_complete); 885 complete(&drv->request_firmware_complete);
947 device_release_driver(trans(drv)->dev); 886 device_release_driver(drv->trans->dev);
948} 887}
949 888
950int iwl_drv_start(struct iwl_shared *shrd, 889struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
951 struct iwl_trans *trans, const struct iwl_cfg *cfg) 890 const struct iwl_cfg *cfg)
952{ 891{
953 struct iwl_drv *drv; 892 struct iwl_drv *drv;
954 int ret; 893 int ret;
955 894
956 shrd->cfg = cfg;
957
958 drv = kzalloc(sizeof(*drv), GFP_KERNEL); 895 drv = kzalloc(sizeof(*drv), GFP_KERNEL);
959 if (!drv) { 896 if (!drv)
960 dev_printk(KERN_ERR, trans->dev, "Couldn't allocate iwl_drv"); 897 return NULL;
961 return -ENOMEM; 898
962 } 899 drv->trans = trans;
963 drv->shrd = shrd; 900 drv->dev = trans->dev;
964 shrd->drv = drv; 901 drv->cfg = cfg;
965 902
966 init_completion(&drv->request_firmware_complete); 903 init_completion(&drv->request_firmware_complete);
967 904
968 ret = iwl_request_firmware(drv, true); 905 ret = iwl_request_firmware(drv, true);
969 906
970 if (ret) { 907 if (ret) {
971 dev_printk(KERN_ERR, trans->dev, "Couldn't request the fw"); 908 IWL_ERR(trans, "Couldn't request the fw\n");
972 kfree(drv); 909 kfree(drv);
973 shrd->drv = NULL; 910 drv = NULL;
974 } 911 }
975 912
976 return ret; 913 return drv;
977} 914}
978 915
979void iwl_drv_stop(struct iwl_shared *shrd) 916void iwl_drv_stop(struct iwl_drv *drv)
980{ 917{
981 struct iwl_drv *drv = shrd->drv;
982
983 wait_for_completion(&drv->request_firmware_complete); 918 wait_for_completion(&drv->request_firmware_complete);
984 919
985 /* op_mode can be NULL if its start failed */ 920 /* op_mode can be NULL if its start failed */
@@ -989,5 +924,95 @@ void iwl_drv_stop(struct iwl_shared *shrd)
989 iwl_dealloc_ucode(drv); 924 iwl_dealloc_ucode(drv);
990 925
991 kfree(drv); 926 kfree(drv);
992 shrd->drv = NULL;
993} 927}
928
929
930/* shared module parameters */
931struct iwl_mod_params iwlwifi_mod_params = {
932 .amsdu_size_8K = 1,
933 .restart_fw = 1,
934 .plcp_check = true,
935 .bt_coex_active = true,
936 .power_level = IWL_POWER_INDEX_1,
937 .bt_ch_announce = true,
938 .auto_agg = true,
939 /* the rest are 0 by default */
940};
941
942#ifdef CONFIG_IWLWIFI_DEBUG
943module_param_named(debug, iwlwifi_mod_params.debug_level, uint,
944 S_IRUGO | S_IWUSR);
945MODULE_PARM_DESC(debug, "debug output mask");
946#endif
947
948module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO);
949MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
950module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO);
951MODULE_PARM_DESC(11n_disable,
952 "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
953module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
954 int, S_IRUGO);
955MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
956module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO);
957MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
958
959module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling,
960 int, S_IRUGO);
961MODULE_PARM_DESC(antenna_coupling,
962 "specify antenna coupling in dB (defualt: 0 dB)");
963
964module_param_named(bt_ch_inhibition, iwlwifi_mod_params.bt_ch_announce,
965 bool, S_IRUGO);
966MODULE_PARM_DESC(bt_ch_inhibition,
967 "Enable BT channel inhibition (default: enable)");
968
969module_param_named(plcp_check, iwlwifi_mod_params.plcp_check, bool, S_IRUGO);
970MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
971
972module_param_named(wd_disable, iwlwifi_mod_params.wd_disable, int, S_IRUGO);
973MODULE_PARM_DESC(wd_disable,
974 "Disable stuck queue watchdog timer 0=system default, "
975 "1=disable, 2=enable (default: 0)");
976
977/*
978 * set bt_coex_active to true, uCode will do kill/defer
979 * every time the priority line is asserted (BT is sending signals on the
980 * priority line in the PCIx).
981 * set bt_coex_active to false, uCode will ignore the BT activity and
982 * perform the normal operation
983 *
984 * User might experience transmit issue on some platform due to WiFi/BT
985 * co-exist problem. The possible behaviors are:
986 * Able to scan and finding all the available AP
987 * Not able to associate with any AP
988 * On those platforms, WiFi communication can be restored by set
989 * "bt_coex_active" module parameter to "false"
990 *
991 * default: bt_coex_active = true (BT_COEX_ENABLE)
992 */
993module_param_named(bt_coex_active, iwlwifi_mod_params.bt_coex_active,
994 bool, S_IRUGO);
995MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)");
996
997module_param_named(led_mode, iwlwifi_mod_params.led_mode, int, S_IRUGO);
998MODULE_PARM_DESC(led_mode, "0=system default, "
999 "1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)");
1000
1001module_param_named(power_save, iwlwifi_mod_params.power_save,
1002 bool, S_IRUGO);
1003MODULE_PARM_DESC(power_save,
1004 "enable WiFi power management (default: disable)");
1005
1006module_param_named(power_level, iwlwifi_mod_params.power_level,
1007 int, S_IRUGO);
1008MODULE_PARM_DESC(power_level,
1009 "default power save level (range from 1 - 5, default: 1)");
1010
1011module_param_named(auto_agg, iwlwifi_mod_params.auto_agg,
1012 bool, S_IRUGO);
1013MODULE_PARM_DESC(auto_agg,
1014 "enable agg w/o check traffic load (default: enable)");
1015
1016module_param_named(5ghz_disable, iwlwifi_mod_params.disable_5ghz,
1017 bool, S_IRUGO);
1018MODULE_PARM_DESC(5ghz_disable, "disable 5GHz band (default: 0 [enabled])");
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 3b771c1d9096..2cbf137b25bf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -63,7 +63,12 @@
63#ifndef __iwl_drv_h__ 63#ifndef __iwl_drv_h__
64#define __iwl_drv_h__ 64#define __iwl_drv_h__
65 65
66#include "iwl-shared.h" 66/* for all modules */
67#define DRV_NAME "iwlwifi"
68#define IWLWIFI_VERSION "in-tree:"
69#define DRV_COPYRIGHT "Copyright(c) 2003-2012 Intel Corporation"
70#define DRV_AUTHOR "<ilw@linux.intel.com>"
71
67 72
68/** 73/**
69 * DOC: Driver system flows - drv component 74 * DOC: Driver system flows - drv component
@@ -90,34 +95,32 @@
90 * 8) iwl_ucode_callback starts the wifi implementation to matches the fw 95 * 8) iwl_ucode_callback starts the wifi implementation to matches the fw
91 */ 96 */
92 97
98struct iwl_drv;
99struct iwl_trans;
100struct iwl_cfg;
93/** 101/**
94 * iwl_drv_start - start the drv 102 * iwl_drv_start - start the drv
95 * 103 *
96 * @shrd: the shrd area
97 * @trans_ops: the ops of the transport 104 * @trans_ops: the ops of the transport
98 * @cfg: device specific constants / virtual functions 105 * @cfg: device specific constants / virtual functions
99 * 106 *
100 * TODO: review the parameters given to this function
101 *
102 * starts the driver: fetches the firmware. This should be called by bus 107 * starts the driver: fetches the firmware. This should be called by bus
103 * specific system flows implementations. For example, the bus specific probe 108 * specific system flows implementations. For example, the bus specific probe
104 * function should do bus related operations only, and then call to this 109 * function should do bus related operations only, and then call to this
105 * function. 110 * function. It returns the driver object or %NULL if an error occured.
106 */ 111 */
107int iwl_drv_start(struct iwl_shared *shrd, 112struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
108 struct iwl_trans *trans, const struct iwl_cfg *cfg); 113 const struct iwl_cfg *cfg);
109 114
110/** 115/**
111 * iwl_drv_stop - stop the drv 116 * iwl_drv_stop - stop the drv
112 * 117 *
113 * @shrd: the shrd area 118 * @drv:
114 *
115 * TODO: review the parameters given to this function
116 * 119 *
117 * Stop the driver. This should be called by bus specific system flows 120 * Stop the driver. This should be called by bus specific system flows
118 * implementations. For example, the bus specific remove function should first 121 * implementations. For example, the bus specific remove function should first
119 * call this function and then do the bus related operations only. 122 * call this function and then do the bus related operations only.
120 */ 123 */
121void iwl_drv_stop(struct iwl_shared *shrd); 124void iwl_drv_stop(struct iwl_drv *drv);
122 125
123#endif /* __iwl_drv_h__ */ 126#endif /* __iwl_drv_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 23cea42b9495..50c58911e718 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -68,9 +68,7 @@
68 68
69#include <net/mac80211.h> 69#include <net/mac80211.h>
70 70
71#include "iwl-commands.h"
72#include "iwl-dev.h" 71#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h" 72#include "iwl-debug.h"
75#include "iwl-agn.h" 73#include "iwl-agn.h"
76#include "iwl-eeprom.h" 74#include "iwl-eeprom.h"
@@ -187,33 +185,33 @@ static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
187 185
188} 186}
189 187
190static int iwl_eeprom_verify_signature(struct iwl_trans *trans) 188static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
191{ 189{
192 u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & 190 u32 gp = iwl_read32(priv->trans, CSR_EEPROM_GP) &
193 CSR_EEPROM_GP_VALID_MSK; 191 CSR_EEPROM_GP_VALID_MSK;
194 int ret = 0; 192 int ret = 0;
195 193
196 IWL_DEBUG_EEPROM(trans, "EEPROM signature=0x%08x\n", gp); 194 IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
197 switch (gp) { 195 switch (gp) {
198 case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP: 196 case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
199 if (trans->nvm_device_type != NVM_DEVICE_TYPE_OTP) { 197 if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
200 IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n", 198 IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n",
201 gp); 199 gp);
202 ret = -ENOENT; 200 ret = -ENOENT;
203 } 201 }
204 break; 202 break;
205 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: 203 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
206 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: 204 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
207 if (trans->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) { 205 if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
208 IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp); 206 IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp);
209 ret = -ENOENT; 207 ret = -ENOENT;
210 } 208 }
211 break; 209 break;
212 case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP: 210 case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
213 default: 211 default:
214 IWL_ERR(trans, "bad EEPROM/OTP signature, type=%s, " 212 IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, "
215 "EEPROM_GP=0x%08x\n", 213 "EEPROM_GP=0x%08x\n",
216 (trans->nvm_device_type == NVM_DEVICE_TYPE_OTP) 214 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
217 ? "OTP" : "EEPROM", gp); 215 ? "OTP" : "EEPROM", gp);
218 ret = -ENOENT; 216 ret = -ENOENT;
219 break; 217 break;
@@ -221,11 +219,11 @@ static int iwl_eeprom_verify_signature(struct iwl_trans *trans)
221 return ret; 219 return ret;
222} 220}
223 221
224u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset) 222u16 iwl_eeprom_query16(struct iwl_priv *priv, size_t offset)
225{ 223{
226 if (!shrd->eeprom) 224 if (!priv->eeprom)
227 return 0; 225 return 0;
228 return (u16)shrd->eeprom[offset] | ((u16)shrd->eeprom[offset + 1] << 8); 226 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
229} 227}
230 228
231int iwl_eeprom_check_version(struct iwl_priv *priv) 229int iwl_eeprom_check_version(struct iwl_priv *priv)
@@ -233,11 +231,11 @@ int iwl_eeprom_check_version(struct iwl_priv *priv)
233 u16 eeprom_ver; 231 u16 eeprom_ver;
234 u16 calib_ver; 232 u16 calib_ver;
235 233
236 eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION); 234 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
237 calib_ver = iwl_eeprom_calib_version(priv->shrd); 235 calib_ver = iwl_eeprom_calib_version(priv);
238 236
239 if (eeprom_ver < cfg(priv)->eeprom_ver || 237 if (eeprom_ver < priv->cfg->eeprom_ver ||
240 calib_ver < cfg(priv)->eeprom_calib_ver) 238 calib_ver < priv->cfg->eeprom_calib_ver)
241 goto err; 239 goto err;
242 240
243 IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n", 241 IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
@@ -247,58 +245,115 @@ int iwl_eeprom_check_version(struct iwl_priv *priv)
247err: 245err:
248 IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x " 246 IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
249 "CALIB=0x%x < 0x%x\n", 247 "CALIB=0x%x < 0x%x\n",
250 eeprom_ver, cfg(priv)->eeprom_ver, 248 eeprom_ver, priv->cfg->eeprom_ver,
251 calib_ver, cfg(priv)->eeprom_calib_ver); 249 calib_ver, priv->cfg->eeprom_calib_ver);
252 return -EINVAL; 250 return -EINVAL;
253 251
254} 252}
255 253
256int iwl_eeprom_init_hw_params(struct iwl_priv *priv) 254int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
257{ 255{
258 struct iwl_shared *shrd = priv->shrd;
259 u16 radio_cfg; 256 u16 radio_cfg;
260 257
261 hw_params(priv).sku = iwl_eeprom_query16(shrd, EEPROM_SKU_CAP); 258 priv->hw_params.sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
262 if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE && 259 if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE &&
263 !cfg(priv)->ht_params) { 260 !priv->cfg->ht_params) {
264 IWL_ERR(priv, "Invalid 11n configuration\n"); 261 IWL_ERR(priv, "Invalid 11n configuration\n");
265 return -EINVAL; 262 return -EINVAL;
266 } 263 }
267 264
268 if (!hw_params(priv).sku) { 265 if (!priv->hw_params.sku) {
269 IWL_ERR(priv, "Invalid device sku\n"); 266 IWL_ERR(priv, "Invalid device sku\n");
270 return -EINVAL; 267 return -EINVAL;
271 } 268 }
272 269
273 IWL_INFO(priv, "Device SKU: 0x%X\n", hw_params(priv).sku); 270 IWL_INFO(priv, "Device SKU: 0x%X\n", priv->hw_params.sku);
274 271
275 radio_cfg = iwl_eeprom_query16(shrd, EEPROM_RADIO_CONFIG); 272 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
276 273
277 hw_params(priv).valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); 274 priv->hw_params.valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
278 hw_params(priv).valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg); 275 priv->hw_params.valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
279 276
280 /* check overrides (some devices have wrong EEPROM) */ 277 /* check overrides (some devices have wrong EEPROM) */
281 if (cfg(priv)->valid_tx_ant) 278 if (priv->cfg->valid_tx_ant)
282 hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant; 279 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
283 if (cfg(priv)->valid_rx_ant) 280 if (priv->cfg->valid_rx_ant)
284 hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant; 281 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
285 282
286 if (!hw_params(priv).valid_tx_ant || !hw_params(priv).valid_rx_ant) { 283 if (!priv->hw_params.valid_tx_ant || !priv->hw_params.valid_rx_ant) {
287 IWL_ERR(priv, "Invalid chain (0x%X, 0x%X)\n", 284 IWL_ERR(priv, "Invalid chain (0x%X, 0x%X)\n",
288 hw_params(priv).valid_tx_ant, 285 priv->hw_params.valid_tx_ant,
289 hw_params(priv).valid_rx_ant); 286 priv->hw_params.valid_rx_ant);
290 return -EINVAL; 287 return -EINVAL;
291 } 288 }
292 289
293 IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n", 290 IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
294 hw_params(priv).valid_tx_ant, hw_params(priv).valid_rx_ant); 291 priv->hw_params.valid_tx_ant, priv->hw_params.valid_rx_ant);
295 292
296 return 0; 293 return 0;
297} 294}
298 295
299void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac) 296u16 iwl_eeprom_calib_version(struct iwl_priv *priv)
300{ 297{
301 const u8 *addr = iwl_eeprom_query_addr(shrd, 298 struct iwl_eeprom_calib_hdr *hdr;
299
300 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
301 EEPROM_CALIB_ALL);
302 return hdr->version;
303}
304
305static u32 eeprom_indirect_address(struct iwl_priv *priv, u32 address)
306{
307 u16 offset = 0;
308
309 if ((address & INDIRECT_ADDRESS) == 0)
310 return address;
311
312 switch (address & INDIRECT_TYPE_MSK) {
313 case INDIRECT_HOST:
314 offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
315 break;
316 case INDIRECT_GENERAL:
317 offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
318 break;
319 case INDIRECT_REGULATORY:
320 offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
321 break;
322 case INDIRECT_TXP_LIMIT:
323 offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
324 break;
325 case INDIRECT_TXP_LIMIT_SIZE:
326 offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
327 break;
328 case INDIRECT_CALIBRATION:
329 offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
330 break;
331 case INDIRECT_PROCESS_ADJST:
332 offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
333 break;
334 case INDIRECT_OTHERS:
335 offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
336 break;
337 default:
338 IWL_ERR(priv, "illegal indirect type: 0x%X\n",
339 address & INDIRECT_TYPE_MSK);
340 break;
341 }
342
343 /* translate the offset from words to byte */
344 return (address & ADDRESS_MSK) + (offset << 1);
345}
346
347const u8 *iwl_eeprom_query_addr(struct iwl_priv *priv, size_t offset)
348{
349 u32 address = eeprom_indirect_address(priv, offset);
350 BUG_ON(address >= priv->cfg->base_params->eeprom_size);
351 return &priv->eeprom[address];
352}
353
354void iwl_eeprom_get_mac(struct iwl_priv *priv, u8 *mac)
355{
356 const u8 *addr = iwl_eeprom_query_addr(priv,
302 EEPROM_MAC_ADDRESS); 357 EEPROM_MAC_ADDRESS);
303 memcpy(mac, addr, ETH_ALEN); 358 memcpy(mac, addr, ETH_ALEN);
304} 359}
@@ -376,7 +431,7 @@ static int iwl_init_otp_access(struct iwl_trans *trans)
376 * CSR auto clock gate disable bit - 431 * CSR auto clock gate disable bit -
377 * this is only applicable for HW with OTP shadow RAM 432 * this is only applicable for HW with OTP shadow RAM
378 */ 433 */
379 if (cfg(trans)->base_params->shadow_ram_support) 434 if (trans->cfg->base_params->shadow_ram_support)
380 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 435 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
381 CSR_RESET_LINK_PWR_MGMT_DISABLED); 436 CSR_RESET_LINK_PWR_MGMT_DISABLED);
382 } 437 }
@@ -497,7 +552,7 @@ static int iwl_find_otp_image(struct iwl_trans *trans,
497 } 552 }
498 /* more in the link list, continue */ 553 /* more in the link list, continue */
499 usedblocks++; 554 usedblocks++;
500 } while (usedblocks <= cfg(trans)->base_params->max_ll_items); 555 } while (usedblocks <= trans->cfg->base_params->max_ll_items);
501 556
502 /* OTP has no valid blocks */ 557 /* OTP has no valid blocks */
503 IWL_DEBUG_EEPROM(trans, "OTP has no valid blocks\n"); 558 IWL_DEBUG_EEPROM(trans, "OTP has no valid blocks\n");
@@ -591,7 +646,6 @@ iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv,
591 646
592static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv) 647static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
593{ 648{
594 struct iwl_shared *shrd = priv->shrd;
595 struct iwl_eeprom_enhanced_txpwr *txp_array, *txp; 649 struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
596 int idx, entries; 650 int idx, entries;
597 __le16 *txp_len; 651 __le16 *txp_len;
@@ -600,10 +654,10 @@ static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
600 BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8); 654 BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
601 655
602 /* the length is in 16-bit words, but we want entries */ 656 /* the length is in 16-bit words, but we want entries */
603 txp_len = (__le16 *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_SZ_OFFS); 657 txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
604 entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN; 658 entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
605 659
606 txp_array = (void *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_OFFS); 660 txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
607 661
608 for (idx = 0; idx < entries; idx++) { 662 for (idx = 0; idx < entries; idx++) {
609 txp = &txp_array[idx]; 663 txp = &txp_array[idx];
@@ -637,7 +691,7 @@ static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
637 ((txp->delta_20_in_40 & 0xf0) >> 4), 691 ((txp->delta_20_in_40 & 0xf0) >> 4),
638 (txp->delta_20_in_40 & 0x0f)); 692 (txp->delta_20_in_40 & 0x0f));
639 693
640 max_txp_avg = iwl_get_max_txpower_avg(cfg(priv), txp_array, idx, 694 max_txp_avg = iwl_get_max_txpower_avg(priv->cfg, txp_array, idx,
641 &max_txp_avg_halfdbm); 695 &max_txp_avg_halfdbm);
642 696
643 /* 697 /*
@@ -656,66 +710,66 @@ static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
656/** 710/**
657 * iwl_eeprom_init - read EEPROM contents 711 * iwl_eeprom_init - read EEPROM contents
658 * 712 *
659 * Load the EEPROM contents from adapter into shrd->eeprom 713 * Load the EEPROM contents from adapter into priv->eeprom
660 * 714 *
661 * NOTE: This routine uses the non-debug IO access functions. 715 * NOTE: This routine uses the non-debug IO access functions.
662 */ 716 */
663int iwl_eeprom_init(struct iwl_trans *trans, u32 hw_rev) 717int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
664{ 718{
665 __le16 *e; 719 __le16 *e;
666 u32 gp = iwl_read32(trans, CSR_EEPROM_GP); 720 u32 gp = iwl_read32(priv->trans, CSR_EEPROM_GP);
667 int sz; 721 int sz;
668 int ret; 722 int ret;
669 u16 addr; 723 u16 addr;
670 u16 validblockaddr = 0; 724 u16 validblockaddr = 0;
671 u16 cache_addr = 0; 725 u16 cache_addr = 0;
672 726
673 trans->nvm_device_type = iwl_get_nvm_type(trans, hw_rev); 727 priv->nvm_device_type = iwl_get_nvm_type(priv->trans, hw_rev);
674 if (trans->nvm_device_type == -ENOENT) 728 if (priv->nvm_device_type == -ENOENT)
675 return -ENOENT; 729 return -ENOENT;
676 /* allocate eeprom */ 730 /* allocate eeprom */
677 sz = cfg(trans)->base_params->eeprom_size; 731 sz = priv->cfg->base_params->eeprom_size;
678 IWL_DEBUG_EEPROM(trans, "NVM size = %d\n", sz); 732 IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
679 trans->shrd->eeprom = kzalloc(sz, GFP_KERNEL); 733 priv->eeprom = kzalloc(sz, GFP_KERNEL);
680 if (!trans->shrd->eeprom) { 734 if (!priv->eeprom) {
681 ret = -ENOMEM; 735 ret = -ENOMEM;
682 goto alloc_err; 736 goto alloc_err;
683 } 737 }
684 e = (__le16 *)trans->shrd->eeprom; 738 e = (__le16 *)priv->eeprom;
685 739
686 ret = iwl_eeprom_verify_signature(trans); 740 ret = iwl_eeprom_verify_signature(priv);
687 if (ret < 0) { 741 if (ret < 0) {
688 IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); 742 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
689 ret = -ENOENT; 743 ret = -ENOENT;
690 goto err; 744 goto err;
691 } 745 }
692 746
693 /* Make sure driver (instead of uCode) is allowed to read EEPROM */ 747 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
694 ret = iwl_eeprom_acquire_semaphore(trans); 748 ret = iwl_eeprom_acquire_semaphore(priv->trans);
695 if (ret < 0) { 749 if (ret < 0) {
696 IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n"); 750 IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
697 ret = -ENOENT; 751 ret = -ENOENT;
698 goto err; 752 goto err;
699 } 753 }
700 754
701 if (trans->nvm_device_type == NVM_DEVICE_TYPE_OTP) { 755 if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
702 756
703 ret = iwl_init_otp_access(trans); 757 ret = iwl_init_otp_access(priv->trans);
704 if (ret) { 758 if (ret) {
705 IWL_ERR(trans, "Failed to initialize OTP access.\n"); 759 IWL_ERR(priv, "Failed to initialize OTP access.\n");
706 ret = -ENOENT; 760 ret = -ENOENT;
707 goto done; 761 goto done;
708 } 762 }
709 iwl_write32(trans, CSR_EEPROM_GP, 763 iwl_write32(priv->trans, CSR_EEPROM_GP,
710 iwl_read32(trans, CSR_EEPROM_GP) & 764 iwl_read32(priv->trans, CSR_EEPROM_GP) &
711 ~CSR_EEPROM_GP_IF_OWNER_MSK); 765 ~CSR_EEPROM_GP_IF_OWNER_MSK);
712 766
713 iwl_set_bit(trans, CSR_OTP_GP_REG, 767 iwl_set_bit(priv->trans, CSR_OTP_GP_REG,
714 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK | 768 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
715 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); 769 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
716 /* traversing the linked list if no shadow ram supported */ 770 /* traversing the linked list if no shadow ram supported */
717 if (!cfg(trans)->base_params->shadow_ram_support) { 771 if (!priv->cfg->base_params->shadow_ram_support) {
718 if (iwl_find_otp_image(trans, &validblockaddr)) { 772 if (iwl_find_otp_image(priv->trans, &validblockaddr)) {
719 ret = -ENOENT; 773 ret = -ENOENT;
720 goto done; 774 goto done;
721 } 775 }
@@ -724,7 +778,8 @@ int iwl_eeprom_init(struct iwl_trans *trans, u32 hw_rev)
724 addr += sizeof(u16)) { 778 addr += sizeof(u16)) {
725 __le16 eeprom_data; 779 __le16 eeprom_data;
726 780
727 ret = iwl_read_otp_word(trans, addr, &eeprom_data); 781 ret = iwl_read_otp_word(priv->trans, addr,
782 &eeprom_data);
728 if (ret) 783 if (ret)
729 goto done; 784 goto done;
730 e[cache_addr / 2] = eeprom_data; 785 e[cache_addr / 2] = eeprom_data;
@@ -735,94 +790,93 @@ int iwl_eeprom_init(struct iwl_trans *trans, u32 hw_rev)
735 for (addr = 0; addr < sz; addr += sizeof(u16)) { 790 for (addr = 0; addr < sz; addr += sizeof(u16)) {
736 u32 r; 791 u32 r;
737 792
738 iwl_write32(trans, CSR_EEPROM_REG, 793 iwl_write32(priv->trans, CSR_EEPROM_REG,
739 CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); 794 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
740 795
741 ret = iwl_poll_bit(trans, CSR_EEPROM_REG, 796 ret = iwl_poll_bit(priv->trans, CSR_EEPROM_REG,
742 CSR_EEPROM_REG_READ_VALID_MSK, 797 CSR_EEPROM_REG_READ_VALID_MSK,
743 CSR_EEPROM_REG_READ_VALID_MSK, 798 CSR_EEPROM_REG_READ_VALID_MSK,
744 IWL_EEPROM_ACCESS_TIMEOUT); 799 IWL_EEPROM_ACCESS_TIMEOUT);
745 if (ret < 0) { 800 if (ret < 0) {
746 IWL_ERR(trans, 801 IWL_ERR(priv,
747 "Time out reading EEPROM[%d]\n", addr); 802 "Time out reading EEPROM[%d]\n", addr);
748 goto done; 803 goto done;
749 } 804 }
750 r = iwl_read32(trans, CSR_EEPROM_REG); 805 r = iwl_read32(priv->trans, CSR_EEPROM_REG);
751 e[addr / 2] = cpu_to_le16(r >> 16); 806 e[addr / 2] = cpu_to_le16(r >> 16);
752 } 807 }
753 } 808 }
754 809
755 IWL_DEBUG_EEPROM(trans, "NVM Type: %s, version: 0x%x\n", 810 IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
756 (trans->nvm_device_type == NVM_DEVICE_TYPE_OTP) 811 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
757 ? "OTP" : "EEPROM", 812 ? "OTP" : "EEPROM",
758 iwl_eeprom_query16(trans->shrd, EEPROM_VERSION)); 813 iwl_eeprom_query16(priv, EEPROM_VERSION));
759 814
760 ret = 0; 815 ret = 0;
761done: 816done:
762 iwl_eeprom_release_semaphore(trans); 817 iwl_eeprom_release_semaphore(priv->trans);
763 818
764err: 819err:
765 if (ret) 820 if (ret)
766 iwl_eeprom_free(trans->shrd); 821 iwl_eeprom_free(priv);
767alloc_err: 822alloc_err:
768 return ret; 823 return ret;
769} 824}
770 825
771void iwl_eeprom_free(struct iwl_shared *shrd) 826void iwl_eeprom_free(struct iwl_priv *priv)
772{ 827{
773 kfree(shrd->eeprom); 828 kfree(priv->eeprom);
774 shrd->eeprom = NULL; 829 priv->eeprom = NULL;
775} 830}
776 831
777static void iwl_init_band_reference(const struct iwl_priv *priv, 832static void iwl_init_band_reference(struct iwl_priv *priv,
778 int eep_band, int *eeprom_ch_count, 833 int eep_band, int *eeprom_ch_count,
779 const struct iwl_eeprom_channel **eeprom_ch_info, 834 const struct iwl_eeprom_channel **eeprom_ch_info,
780 const u8 **eeprom_ch_index) 835 const u8 **eeprom_ch_index)
781{ 836{
782 struct iwl_shared *shrd = priv->shrd; 837 u32 offset = priv->lib->
783 u32 offset = cfg(priv)->lib->
784 eeprom_ops.regulatory_bands[eep_band - 1]; 838 eeprom_ops.regulatory_bands[eep_band - 1];
785 switch (eep_band) { 839 switch (eep_band) {
786 case 1: /* 2.4GHz band */ 840 case 1: /* 2.4GHz band */
787 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1); 841 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
788 *eeprom_ch_info = (struct iwl_eeprom_channel *) 842 *eeprom_ch_info = (struct iwl_eeprom_channel *)
789 iwl_eeprom_query_addr(shrd, offset); 843 iwl_eeprom_query_addr(priv, offset);
790 *eeprom_ch_index = iwl_eeprom_band_1; 844 *eeprom_ch_index = iwl_eeprom_band_1;
791 break; 845 break;
792 case 2: /* 4.9GHz band */ 846 case 2: /* 4.9GHz band */
793 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2); 847 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
794 *eeprom_ch_info = (struct iwl_eeprom_channel *) 848 *eeprom_ch_info = (struct iwl_eeprom_channel *)
795 iwl_eeprom_query_addr(shrd, offset); 849 iwl_eeprom_query_addr(priv, offset);
796 *eeprom_ch_index = iwl_eeprom_band_2; 850 *eeprom_ch_index = iwl_eeprom_band_2;
797 break; 851 break;
798 case 3: /* 5.2GHz band */ 852 case 3: /* 5.2GHz band */
799 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3); 853 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
800 *eeprom_ch_info = (struct iwl_eeprom_channel *) 854 *eeprom_ch_info = (struct iwl_eeprom_channel *)
801 iwl_eeprom_query_addr(shrd, offset); 855 iwl_eeprom_query_addr(priv, offset);
802 *eeprom_ch_index = iwl_eeprom_band_3; 856 *eeprom_ch_index = iwl_eeprom_band_3;
803 break; 857 break;
804 case 4: /* 5.5GHz band */ 858 case 4: /* 5.5GHz band */
805 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4); 859 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
806 *eeprom_ch_info = (struct iwl_eeprom_channel *) 860 *eeprom_ch_info = (struct iwl_eeprom_channel *)
807 iwl_eeprom_query_addr(shrd, offset); 861 iwl_eeprom_query_addr(priv, offset);
808 *eeprom_ch_index = iwl_eeprom_band_4; 862 *eeprom_ch_index = iwl_eeprom_band_4;
809 break; 863 break;
810 case 5: /* 5.7GHz band */ 864 case 5: /* 5.7GHz band */
811 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5); 865 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
812 *eeprom_ch_info = (struct iwl_eeprom_channel *) 866 *eeprom_ch_info = (struct iwl_eeprom_channel *)
813 iwl_eeprom_query_addr(shrd, offset); 867 iwl_eeprom_query_addr(priv, offset);
814 *eeprom_ch_index = iwl_eeprom_band_5; 868 *eeprom_ch_index = iwl_eeprom_band_5;
815 break; 869 break;
816 case 6: /* 2.4GHz ht40 channels */ 870 case 6: /* 2.4GHz ht40 channels */
817 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6); 871 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
818 *eeprom_ch_info = (struct iwl_eeprom_channel *) 872 *eeprom_ch_info = (struct iwl_eeprom_channel *)
819 iwl_eeprom_query_addr(shrd, offset); 873 iwl_eeprom_query_addr(priv, offset);
820 *eeprom_ch_index = iwl_eeprom_band_6; 874 *eeprom_ch_index = iwl_eeprom_band_6;
821 break; 875 break;
822 case 7: /* 5 GHz ht40 channels */ 876 case 7: /* 5 GHz ht40 channels */
823 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7); 877 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
824 *eeprom_ch_info = (struct iwl_eeprom_channel *) 878 *eeprom_ch_info = (struct iwl_eeprom_channel *)
825 iwl_eeprom_query_addr(shrd, offset); 879 iwl_eeprom_query_addr(priv, offset);
826 *eeprom_ch_index = iwl_eeprom_band_7; 880 *eeprom_ch_index = iwl_eeprom_band_7;
827 break; 881 break;
828 default: 882 default:
@@ -987,9 +1041,9 @@ int iwl_init_channel_map(struct iwl_priv *priv)
987 } 1041 }
988 1042
989 /* Check if we do have HT40 channels */ 1043 /* Check if we do have HT40 channels */
990 if (cfg(priv)->lib->eeprom_ops.regulatory_bands[5] == 1044 if (priv->lib->eeprom_ops.regulatory_bands[5] ==
991 EEPROM_REGULATORY_BAND_NO_HT40 && 1045 EEPROM_REGULATORY_BAND_NO_HT40 &&
992 cfg(priv)->lib->eeprom_ops.regulatory_bands[6] == 1046 priv->lib->eeprom_ops.regulatory_bands[6] ==
993 EEPROM_REGULATORY_BAND_NO_HT40) 1047 EEPROM_REGULATORY_BAND_NO_HT40)
994 return 0; 1048 return 0;
995 1049
@@ -1025,7 +1079,7 @@ int iwl_init_channel_map(struct iwl_priv *priv)
1025 * driver need to process addition information 1079 * driver need to process addition information
1026 * to determine the max channel tx power limits 1080 * to determine the max channel tx power limits
1027 */ 1081 */
1028 if (cfg(priv)->lib->eeprom_ops.enhanced_txpower) 1082 if (priv->lib->eeprom_ops.enhanced_txpower)
1029 iwl_eeprom_enhanced_txpower(priv); 1083 iwl_eeprom_enhanced_txpower(priv);
1030 1084
1031 return 0; 1085 return 0;
@@ -1072,11 +1126,11 @@ void iwl_rf_config(struct iwl_priv *priv)
1072{ 1126{
1073 u16 radio_cfg; 1127 u16 radio_cfg;
1074 1128
1075 radio_cfg = iwl_eeprom_query16(priv->shrd, EEPROM_RADIO_CONFIG); 1129 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
1076 1130
1077 /* write radio config values to register */ 1131 /* write radio config values to register */
1078 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) { 1132 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
1079 iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG, 1133 iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
1080 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | 1134 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
1081 EEPROM_RF_CFG_STEP_MSK(radio_cfg) | 1135 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
1082 EEPROM_RF_CFG_DASH_MSK(radio_cfg)); 1136 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
@@ -1088,7 +1142,7 @@ void iwl_rf_config(struct iwl_priv *priv)
1088 WARN_ON(1); 1142 WARN_ON(1);
1089 1143
1090 /* set CSR_HW_CONFIG_REG for uCode use */ 1144 /* set CSR_HW_CONFIG_REG for uCode use */
1091 iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG, 1145 iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
1092 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 1146 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
1093 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); 1147 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
1094} 1148}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index e4a758340996..64bfd947caeb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -66,8 +66,6 @@
66#include <net/mac80211.h> 66#include <net/mac80211.h>
67 67
68struct iwl_priv; 68struct iwl_priv;
69struct iwl_shared;
70struct iwl_trans;
71 69
72/* 70/*
73 * EEPROM access time values: 71 * EEPROM access time values:
@@ -208,59 +206,6 @@ struct iwl_eeprom_calib_hdr {
208/* 6000 regulatory - indirect access */ 206/* 6000 regulatory - indirect access */
209#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\ 207#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\
210 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */ 208 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
211
212/* 5000 Specific */
213#define EEPROM_5000_TX_POWER_VERSION (4)
214#define EEPROM_5000_EEPROM_VERSION (0x11A)
215
216/* 5050 Specific */
217#define EEPROM_5050_TX_POWER_VERSION (4)
218#define EEPROM_5050_EEPROM_VERSION (0x21E)
219
220/* 1000 Specific */
221#define EEPROM_1000_TX_POWER_VERSION (4)
222#define EEPROM_1000_EEPROM_VERSION (0x15C)
223
224/* 6x00 Specific */
225#define EEPROM_6000_TX_POWER_VERSION (4)
226#define EEPROM_6000_EEPROM_VERSION (0x423)
227
228/* 6x50 Specific */
229#define EEPROM_6050_TX_POWER_VERSION (4)
230#define EEPROM_6050_EEPROM_VERSION (0x532)
231
232/* 6150 Specific */
233#define EEPROM_6150_TX_POWER_VERSION (6)
234#define EEPROM_6150_EEPROM_VERSION (0x553)
235
236/* 6x05 Specific */
237#define EEPROM_6005_TX_POWER_VERSION (6)
238#define EEPROM_6005_EEPROM_VERSION (0x709)
239
240/* 6x30 Specific */
241#define EEPROM_6030_TX_POWER_VERSION (6)
242#define EEPROM_6030_EEPROM_VERSION (0x709)
243
244/* 2x00 Specific */
245#define EEPROM_2000_TX_POWER_VERSION (6)
246#define EEPROM_2000_EEPROM_VERSION (0x805)
247
248/* 6x35 Specific */
249#define EEPROM_6035_TX_POWER_VERSION (6)
250#define EEPROM_6035_EEPROM_VERSION (0x753)
251
252
253/* OTP */
254/* lower blocks contain EEPROM image and calibration data */
255#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
256/* high blocks contain PAPD data */
257#define OTP_HIGH_IMAGE_SIZE_6x00 (6 * 512 * sizeof(u16)) /* 6 KB */
258#define OTP_HIGH_IMAGE_SIZE_1000 (0x200 * sizeof(u16)) /* 1024 bytes */
259#define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */
260#define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */
261#define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */
262#define OTP_MAX_LL_ITEMS_2x00 (4) /* OTP blocks for 2x00 */
263
264/* 2.4 GHz */ 209/* 2.4 GHz */
265extern const u8 iwl_eeprom_band_1[14]; 210extern const u8 iwl_eeprom_band_1[14];
266 211
@@ -306,12 +251,14 @@ struct iwl_eeprom_ops {
306}; 251};
307 252
308 253
309int iwl_eeprom_init(struct iwl_trans *trans, u32 hw_rev); 254int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev);
310void iwl_eeprom_free(struct iwl_shared *shrd); 255void iwl_eeprom_free(struct iwl_priv *priv);
311int iwl_eeprom_check_version(struct iwl_priv *priv); 256int iwl_eeprom_check_version(struct iwl_priv *priv);
312int iwl_eeprom_init_hw_params(struct iwl_priv *priv); 257int iwl_eeprom_init_hw_params(struct iwl_priv *priv);
313const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset); 258u16 iwl_eeprom_calib_version(struct iwl_priv *priv);
314u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset); 259const u8 *iwl_eeprom_query_addr(struct iwl_priv *priv, size_t offset);
260u16 iwl_eeprom_query16(struct iwl_priv *priv, size_t offset);
261void iwl_eeprom_get_mac(struct iwl_priv *priv, u8 *mac);
315int iwl_init_channel_map(struct iwl_priv *priv); 262int iwl_init_channel_map(struct iwl_priv *priv);
316void iwl_free_channel_map(struct iwl_priv *priv); 263void iwl_free_channel_map(struct iwl_priv *priv);
317const struct iwl_channel_info *iwl_get_channel_info( 264const struct iwl_channel_info *iwl_get_channel_info(
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 90208094b8eb..74bce97a8600 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -104,15 +104,29 @@
104 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04 104 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
105 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte 105 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
106 * aligned (address bits 0-7 must be 0). 106 * aligned (address bits 0-7 must be 0).
107 * Later devices have 20 (5000 series) or 30 (higher) queues, but the registers
108 * for them are in different places.
107 * 109 *
108 * Bit fields in each pointer register: 110 * Bit fields in each pointer register:
109 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned 111 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
110 */ 112 */
111#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) 113#define FH_MEM_CBBC_0_15_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
112#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10) 114#define FH_MEM_CBBC_0_15_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
113 115#define FH_MEM_CBBC_16_19_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBF0)
114/* Find TFD CB base pointer for given queue (range 0-15). */ 116#define FH_MEM_CBBC_16_19_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
115#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4) 117#define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20)
118#define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80)
119
120/* Find TFD CB base pointer for given queue */
121static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
122{
123 if (chnl < 16)
124 return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl;
125 if (chnl < 20)
126 return FH_MEM_CBBC_16_19_LOWER_BOUND + 4 * (chnl - 16);
127 WARN_ON_ONCE(chnl >= 32);
128 return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
129}
116 130
117 131
118/** 132/**
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index c924ccb93c8c..e71564053e7f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -93,15 +93,7 @@ struct iwl_ucode_header {
93 * new TLV uCode file layout 93 * new TLV uCode file layout
94 * 94 *
95 * The new TLV file format contains TLVs, that each specify 95 * The new TLV file format contains TLVs, that each specify
96 * some piece of data. To facilitate "groups", for example 96 * some piece of data.
97 * different instruction image with different capabilities,
98 * bundled with the same init image, an alternative mechanism
99 * is provided:
100 * When the alternative field is 0, that means that the item
101 * is always valid. When it is non-zero, then it is only
102 * valid in conjunction with items of the same alternative,
103 * in which case the driver (user) selects one alternative
104 * to use.
105 */ 97 */
106 98
107enum iwl_ucode_tlv_type { 99enum iwl_ucode_tlv_type {
@@ -132,8 +124,7 @@ enum iwl_ucode_tlv_type {
132}; 124};
133 125
134struct iwl_ucode_tlv { 126struct iwl_ucode_tlv {
135 __le16 type; /* see above */ 127 __le32 type; /* see above */
136 __le16 alternative; /* see comment */
137 __le32 length; /* not including type/length fields */ 128 __le32 length; /* not including type/length fields */
138 u8 data[0]; 129 u8 data[0];
139}; 130};
@@ -152,7 +143,7 @@ struct iwl_tlv_ucode_header {
152 u8 human_readable[64]; 143 u8 human_readable[64];
153 __le32 ver; /* major/minor/API/serial */ 144 __le32 ver; /* major/minor/API/serial */
154 __le32 build; 145 __le32 build;
155 __le64 alternatives; /* bitmask of valid alternatives */ 146 __le64 ignore;
156 /* 147 /*
157 * The data contained herein has a TLV layout, 148 * The data contained herein has a TLV layout,
158 * see above for the TLV header and types. 149 * see above for the TLV header and types.
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 8e36bdc1e522..2153e4cc5572 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -63,6 +63,7 @@
63#ifndef __iwl_fw_h__ 63#ifndef __iwl_fw_h__
64#define __iwl_fw_h__ 64#define __iwl_fw_h__
65#include <linux/types.h> 65#include <linux/types.h>
66#include <net/mac80211.h>
66 67
67/** 68/**
68 * enum iwl_ucode_tlv_flag - ucode API flags 69 * enum iwl_ucode_tlv_flag - ucode API flags
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 09b856768f62..abb3250164ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -30,7 +30,6 @@
30#define __iwl_io_h__ 30#define __iwl_io_h__
31 31
32#include "iwl-devtrace.h" 32#include "iwl-devtrace.h"
33#include "iwl-shared.h"
34#include "iwl-trans.h" 33#include "iwl-trans.h"
35 34
36static inline void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val) 35static inline void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val)
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 1993a2b7ae63..47000419f916 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -36,11 +36,10 @@
36#include <asm/unaligned.h> 36#include <asm/unaligned.h>
37 37
38#include "iwl-dev.h" 38#include "iwl-dev.h"
39#include "iwl-core.h"
40#include "iwl-agn.h" 39#include "iwl-agn.h"
41#include "iwl-io.h" 40#include "iwl-io.h"
42#include "iwl-trans.h" 41#include "iwl-trans.h"
43#include "iwl-shared.h" 42#include "iwl-modparams.h"
44 43
45/* Throughput OFF time(ms) ON time (ms) 44/* Throughput OFF time(ms) ON time (ms)
46 * >300 25 25 45 * >300 25 25
@@ -71,7 +70,7 @@ static const struct ieee80211_tpt_blink iwl_blink[] = {
71/* Set led register off */ 70/* Set led register off */
72void iwlagn_led_enable(struct iwl_priv *priv) 71void iwlagn_led_enable(struct iwl_priv *priv)
73{ 72{
74 iwl_write32(trans(priv), CSR_LED_REG, CSR_LED_REG_TRUN_ON); 73 iwl_write32(priv->trans, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
75} 74}
76 75
77/* 76/*
@@ -107,9 +106,9 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
107 }; 106 };
108 u32 reg; 107 u32 reg;
109 108
110 reg = iwl_read32(trans(priv), CSR_LED_REG); 109 reg = iwl_read32(priv->trans, CSR_LED_REG);
111 if (reg != (reg & CSR_LED_BSM_CTRL_MSK)) 110 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
112 iwl_write32(trans(priv), CSR_LED_REG, 111 iwl_write32(priv->trans, CSR_LED_REG,
113 reg & CSR_LED_BSM_CTRL_MSK); 112 reg & CSR_LED_BSM_CTRL_MSK);
114 113
115 return iwl_dvm_send_cmd(priv, &cmd); 114 return iwl_dvm_send_cmd(priv, &cmd);
@@ -138,11 +137,11 @@ static int iwl_led_cmd(struct iwl_priv *priv,
138 } 137 }
139 138
140 IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n", 139 IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
141 cfg(priv)->base_params->led_compensation); 140 priv->cfg->base_params->led_compensation);
142 led_cmd.on = iwl_blink_compensation(priv, on, 141 led_cmd.on = iwl_blink_compensation(priv, on,
143 cfg(priv)->base_params->led_compensation); 142 priv->cfg->base_params->led_compensation);
144 led_cmd.off = iwl_blink_compensation(priv, off, 143 led_cmd.off = iwl_blink_compensation(priv, off,
145 cfg(priv)->base_params->led_compensation); 144 priv->cfg->base_params->led_compensation);
146 145
147 ret = iwl_send_led_cmd(priv, &led_cmd); 146 ret = iwl_send_led_cmd(priv, &led_cmd);
148 if (!ret) { 147 if (!ret) {
@@ -175,7 +174,7 @@ static int iwl_led_blink_set(struct led_classdev *led_cdev,
175 174
176void iwl_leds_init(struct iwl_priv *priv) 175void iwl_leds_init(struct iwl_priv *priv)
177{ 176{
178 int mode = iwlagn_mod_params.led_mode; 177 int mode = iwlwifi_mod_params.led_mode;
179 int ret; 178 int ret;
180 179
181 if (mode == IWL_LED_DISABLE) { 180 if (mode == IWL_LED_DISABLE) {
@@ -183,7 +182,7 @@ void iwl_leds_init(struct iwl_priv *priv)
183 return; 182 return;
184 } 183 }
185 if (mode == IWL_LED_DEFAULT) 184 if (mode == IWL_LED_DEFAULT)
186 mode = cfg(priv)->led_mode; 185 mode = priv->cfg->led_mode;
187 186
188 priv->led.name = kasprintf(GFP_KERNEL, "%s-led", 187 priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
189 wiphy_name(priv->hw->wiphy)); 188 wiphy_name(priv->hw->wiphy));
@@ -207,7 +206,7 @@ void iwl_leds_init(struct iwl_priv *priv)
207 break; 206 break;
208 } 207 }
209 208
210 ret = led_classdev_register(trans(priv)->dev, &priv->led); 209 ret = led_classdev_register(priv->trans->dev, &priv->led);
211 if (ret) { 210 if (ret) {
212 kfree(priv->led.name); 211 kfree(priv->led.name);
213 return; 212 return;
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
index b6805f8e9a01..d33cc9cc7d3f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
@@ -44,13 +44,12 @@
44 44
45#include "iwl-eeprom.h" 45#include "iwl-eeprom.h"
46#include "iwl-dev.h" 46#include "iwl-dev.h"
47#include "iwl-core.h"
48#include "iwl-io.h" 47#include "iwl-io.h"
49#include "iwl-agn-calib.h" 48#include "iwl-agn-calib.h"
50#include "iwl-agn.h" 49#include "iwl-agn.h"
51#include "iwl-shared.h"
52#include "iwl-trans.h" 50#include "iwl-trans.h"
53#include "iwl-op-mode.h" 51#include "iwl-op-mode.h"
52#include "iwl-modparams.h"
54 53
55/***************************************************************************** 54/*****************************************************************************
56 * 55 *
@@ -147,7 +146,13 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
147 IEEE80211_HW_AMPDU_AGGREGATION | 146 IEEE80211_HW_AMPDU_AGGREGATION |
148 IEEE80211_HW_NEED_DTIM_PERIOD | 147 IEEE80211_HW_NEED_DTIM_PERIOD |
149 IEEE80211_HW_SPECTRUM_MGMT | 148 IEEE80211_HW_SPECTRUM_MGMT |
150 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 149 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
150 IEEE80211_HW_QUEUE_CONTROL |
151 IEEE80211_HW_SUPPORTS_PS |
152 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
153 IEEE80211_HW_SCAN_WHILE_IDLE;
154
155 hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
151 156
152 /* 157 /*
153 * Including the following line will crash some AP's. This 158 * Including the following line will crash some AP's. This
@@ -156,10 +161,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
156 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 161 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
157 */ 162 */
158 163
159 hw->flags |= IEEE80211_HW_SUPPORTS_PS | 164 if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
160 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
161
162 if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
163 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | 165 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
164 IEEE80211_HW_SUPPORTS_STATIC_SMPS; 166 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
165 167
@@ -197,13 +199,13 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
197 WIPHY_FLAG_IBSS_RSN; 199 WIPHY_FLAG_IBSS_RSN;
198 200
199 if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len && 201 if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
200 trans(priv)->ops->wowlan_suspend && 202 priv->trans->ops->wowlan_suspend &&
201 device_can_wakeup(trans(priv)->dev)) { 203 device_can_wakeup(priv->trans->dev)) {
202 hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | 204 hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
203 WIPHY_WOWLAN_DISCONNECT | 205 WIPHY_WOWLAN_DISCONNECT |
204 WIPHY_WOWLAN_EAP_IDENTITY_REQ | 206 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
205 WIPHY_WOWLAN_RFKILL_RELEASE; 207 WIPHY_WOWLAN_RFKILL_RELEASE;
206 if (!iwlagn_mod_params.sw_crypto) 208 if (!iwlwifi_mod_params.sw_crypto)
207 hw->wiphy->wowlan.flags |= 209 hw->wiphy->wowlan.flags |=
208 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | 210 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
209 WIPHY_WOWLAN_GTK_REKEY_FAILURE; 211 WIPHY_WOWLAN_GTK_REKEY_FAILURE;
@@ -215,7 +217,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
215 IWLAGN_WOWLAN_MAX_PATTERN_LEN; 217 IWLAGN_WOWLAN_MAX_PATTERN_LEN;
216 } 218 }
217 219
218 if (iwlagn_mod_params.power_save) 220 if (iwlwifi_mod_params.power_save)
219 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; 221 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
220 else 222 else
221 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 223 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -224,8 +226,11 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
224 /* we create the 802.11 header and a zero-length SSID element */ 226 /* we create the 802.11 header and a zero-length SSID element */
225 hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2; 227 hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
226 228
227 /* Default value; 4 EDCA QOS priorities */ 229 /*
228 hw->queues = 4; 230 * We don't use all queues: 4 and 9 are unused and any
231 * aggregation queue gets mapped down to the AC queue.
232 */
233 hw->queues = IWLAGN_FIRST_AMPDU_QUEUE;
229 234
230 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; 235 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
231 236
@@ -236,7 +241,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
236 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 241 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
237 &priv->bands[IEEE80211_BAND_5GHZ]; 242 &priv->bands[IEEE80211_BAND_5GHZ];
238 243
239 hw->wiphy->hw_version = trans(priv)->hw_id; 244 hw->wiphy->hw_version = priv->trans->hw_id;
240 245
241 iwl_leds_init(priv); 246 iwl_leds_init(priv);
242 247
@@ -332,7 +337,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw)
332 return 0; 337 return 0;
333} 338}
334 339
335static void iwlagn_mac_stop(struct ieee80211_hw *hw) 340void iwlagn_mac_stop(struct ieee80211_hw *hw)
336{ 341{
337 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 342 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
338 343
@@ -355,18 +360,18 @@ static void iwlagn_mac_stop(struct ieee80211_hw *hw)
355 * even if interface is down, trans->down will leave the RF 360 * even if interface is down, trans->down will leave the RF
356 * kill interrupt enabled 361 * kill interrupt enabled
357 */ 362 */
358 iwl_trans_stop_hw(trans(priv)); 363 iwl_trans_stop_hw(priv->trans, false);
359 364
360 IWL_DEBUG_MAC80211(priv, "leave\n"); 365 IWL_DEBUG_MAC80211(priv, "leave\n");
361} 366}
362 367
363static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw, 368void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
364 struct ieee80211_vif *vif, 369 struct ieee80211_vif *vif,
365 struct cfg80211_gtk_rekey_data *data) 370 struct cfg80211_gtk_rekey_data *data)
366{ 371{
367 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 372 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
368 373
369 if (iwlagn_mod_params.sw_crypto) 374 if (iwlwifi_mod_params.sw_crypto)
370 return; 375 return;
371 376
372 IWL_DEBUG_MAC80211(priv, "enter\n"); 377 IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -388,8 +393,7 @@ static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
388 393
389#ifdef CONFIG_PM_SLEEP 394#ifdef CONFIG_PM_SLEEP
390 395
391static int iwlagn_mac_suspend(struct ieee80211_hw *hw, 396int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
392 struct cfg80211_wowlan *wowlan)
393{ 397{
394 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 398 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
395 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 399 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
@@ -412,9 +416,9 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
412 if (ret) 416 if (ret)
413 goto error; 417 goto error;
414 418
415 device_set_wakeup_enable(trans(priv)->dev, true); 419 device_set_wakeup_enable(priv->trans->dev, true);
416 420
417 iwl_trans_wowlan_suspend(trans(priv)); 421 iwl_trans_wowlan_suspend(priv->trans);
418 422
419 goto out; 423 goto out;
420 424
@@ -437,27 +441,28 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
437 unsigned long flags; 441 unsigned long flags;
438 u32 base, status = 0xffffffff; 442 u32 base, status = 0xffffffff;
439 int ret = -EIO; 443 int ret = -EIO;
440 const struct fw_img *img;
441 444
442 IWL_DEBUG_MAC80211(priv, "enter\n"); 445 IWL_DEBUG_MAC80211(priv, "enter\n");
443 mutex_lock(&priv->mutex); 446 mutex_lock(&priv->mutex);
444 447
445 iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR, 448 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
446 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); 449 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
447 450
448 base = priv->shrd->device_pointers.error_event_table; 451 base = priv->device_pointers.error_event_table;
449 if (iwlagn_hw_valid_rtc_data_addr(base)) { 452 if (iwlagn_hw_valid_rtc_data_addr(base)) {
450 spin_lock_irqsave(&trans(priv)->reg_lock, flags); 453 spin_lock_irqsave(&priv->trans->reg_lock, flags);
451 ret = iwl_grab_nic_access_silent(trans(priv)); 454 ret = iwl_grab_nic_access_silent(priv->trans);
452 if (likely(ret == 0)) { 455 if (likely(ret == 0)) {
453 iwl_write32(trans(priv), HBUS_TARG_MEM_RADDR, base); 456 iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, base);
454 status = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT); 457 status = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
455 iwl_release_nic_access(trans(priv)); 458 iwl_release_nic_access(priv->trans);
456 } 459 }
457 spin_unlock_irqrestore(&trans(priv)->reg_lock, flags); 460 spin_unlock_irqrestore(&priv->trans->reg_lock, flags);
458 461
459#ifdef CONFIG_IWLWIFI_DEBUGFS 462#ifdef CONFIG_IWLWIFI_DEBUGFS
460 if (ret == 0) { 463 if (ret == 0) {
464 const struct fw_img *img;
465
461 img = &(priv->fw->img[IWL_UCODE_WOWLAN]); 466 img = &(priv->fw->img[IWL_UCODE_WOWLAN]);
462 if (!priv->wowlan_sram) { 467 if (!priv->wowlan_sram) {
463 priv->wowlan_sram = 468 priv->wowlan_sram =
@@ -467,7 +472,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
467 472
468 if (priv->wowlan_sram) 473 if (priv->wowlan_sram)
469 _iwl_read_targ_mem_words( 474 _iwl_read_targ_mem_words(
470 trans(priv), 0x800000, 475 priv->trans, 0x800000,
471 priv->wowlan_sram, 476 priv->wowlan_sram,
472 img->sec[IWL_UCODE_SECTION_DATA].len / 4); 477 img->sec[IWL_UCODE_SECTION_DATA].len / 4);
473 } 478 }
@@ -479,7 +484,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
479 484
480 priv->wowlan = false; 485 priv->wowlan = false;
481 486
482 device_set_wakeup_enable(trans(priv)->dev, false); 487 device_set_wakeup_enable(priv->trans->dev, false);
483 488
484 iwlagn_prepare_restart(priv); 489 iwlagn_prepare_restart(priv);
485 490
@@ -497,7 +502,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
497 502
498#endif 503#endif
499 504
500static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 505void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
501{ 506{
502 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 507 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
503 508
@@ -508,21 +513,21 @@ static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
508 dev_kfree_skb_any(skb); 513 dev_kfree_skb_any(skb);
509} 514}
510 515
511static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, 516void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
512 struct ieee80211_vif *vif, 517 struct ieee80211_vif *vif,
513 struct ieee80211_key_conf *keyconf, 518 struct ieee80211_key_conf *keyconf,
514 struct ieee80211_sta *sta, 519 struct ieee80211_sta *sta,
515 u32 iv32, u16 *phase1key) 520 u32 iv32, u16 *phase1key)
516{ 521{
517 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 522 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
518 523
519 iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key); 524 iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
520} 525}
521 526
522static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 527int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
523 struct ieee80211_vif *vif, 528 struct ieee80211_vif *vif,
524 struct ieee80211_sta *sta, 529 struct ieee80211_sta *sta,
525 struct ieee80211_key_conf *key) 530 struct ieee80211_key_conf *key)
526{ 531{
527 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 532 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
528 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 533 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -532,7 +537,7 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
532 537
533 IWL_DEBUG_MAC80211(priv, "enter\n"); 538 IWL_DEBUG_MAC80211(priv, "enter\n");
534 539
535 if (iwlagn_mod_params.sw_crypto) { 540 if (iwlwifi_mod_params.sw_crypto) {
536 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); 541 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
537 return -EOPNOTSUPP; 542 return -EOPNOTSUPP;
538 } 543 }
@@ -622,11 +627,11 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
622 return ret; 627 return ret;
623} 628}
624 629
625static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, 630int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
626 struct ieee80211_vif *vif, 631 struct ieee80211_vif *vif,
627 enum ieee80211_ampdu_mlme_action action, 632 enum ieee80211_ampdu_mlme_action action,
628 struct ieee80211_sta *sta, u16 tid, u16 *ssn, 633 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
629 u8 buf_size) 634 u8 buf_size)
630{ 635{
631 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 636 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
632 int ret = -EINVAL; 637 int ret = -EINVAL;
@@ -635,7 +640,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
635 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", 640 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
636 sta->addr, tid); 641 sta->addr, tid);
637 642
638 if (!(hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)) 643 if (!(priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE))
639 return -EACCES; 644 return -EACCES;
640 645
641 IWL_DEBUG_MAC80211(priv, "enter\n"); 646 IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -643,7 +648,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
643 648
644 switch (action) { 649 switch (action) {
645 case IEEE80211_AMPDU_RX_START: 650 case IEEE80211_AMPDU_RX_START:
646 if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) 651 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
647 break; 652 break;
648 IWL_DEBUG_HT(priv, "start Rx\n"); 653 IWL_DEBUG_HT(priv, "start Rx\n");
649 ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); 654 ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
@@ -653,7 +658,9 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
653 ret = iwl_sta_rx_agg_stop(priv, sta, tid); 658 ret = iwl_sta_rx_agg_stop(priv, sta, tid);
654 break; 659 break;
655 case IEEE80211_AMPDU_TX_START: 660 case IEEE80211_AMPDU_TX_START:
656 if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) 661 if (!priv->trans->ops->tx_agg_setup)
662 break;
663 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
657 break; 664 break;
658 IWL_DEBUG_HT(priv, "start Tx\n"); 665 IWL_DEBUG_HT(priv, "start Tx\n");
659 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); 666 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
@@ -667,7 +674,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
667 priv->agg_tids_count); 674 priv->agg_tids_count);
668 } 675 }
669 if (!priv->agg_tids_count && 676 if (!priv->agg_tids_count &&
670 hw_params(priv).use_rts_for_aggregation) { 677 priv->hw_params.use_rts_for_aggregation) {
671 /* 678 /*
672 * switch off RTS/CTS if it was previously enabled 679 * switch off RTS/CTS if it was previously enabled
673 */ 680 */
@@ -746,11 +753,11 @@ static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
746 return ret; 753 return ret;
747} 754}
748 755
749static int iwlagn_mac_sta_state(struct ieee80211_hw *hw, 756int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
750 struct ieee80211_vif *vif, 757 struct ieee80211_vif *vif,
751 struct ieee80211_sta *sta, 758 struct ieee80211_sta *sta,
752 enum ieee80211_sta_state old_state, 759 enum ieee80211_sta_state old_state,
753 enum ieee80211_sta_state new_state) 760 enum ieee80211_sta_state new_state)
754{ 761{
755 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 762 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
756 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 763 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -829,8 +836,8 @@ static int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
829 return ret; 836 return ret;
830} 837}
831 838
832static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, 839void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
833 struct ieee80211_channel_switch *ch_switch) 840 struct ieee80211_channel_switch *ch_switch)
834{ 841{
835 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 842 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
836 const struct iwl_channel_info *ch_info; 843 const struct iwl_channel_info *ch_info;
@@ -863,7 +870,7 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
863 if (!iwl_is_associated_ctx(ctx)) 870 if (!iwl_is_associated_ctx(ctx))
864 goto out; 871 goto out;
865 872
866 if (!cfg(priv)->lib->set_channel_switch) 873 if (!priv->lib->set_channel_switch)
867 goto out; 874 goto out;
868 875
869 ch = channel->hw_value; 876 ch = channel->hw_value;
@@ -892,14 +899,13 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
892 iwl_set_rxon_ht(priv, ht_conf); 899 iwl_set_rxon_ht(priv, ht_conf);
893 iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif); 900 iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
894 901
895 iwl_set_rate(priv);
896 /* 902 /*
897 * at this point, staging_rxon has the 903 * at this point, staging_rxon has the
898 * configuration for channel switch 904 * configuration for channel switch
899 */ 905 */
900 set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); 906 set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
901 priv->switch_channel = cpu_to_le16(ch); 907 priv->switch_channel = cpu_to_le16(ch);
902 if (cfg(priv)->lib->set_channel_switch(priv, ch_switch)) { 908 if (priv->lib->set_channel_switch(priv, ch_switch)) {
903 clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); 909 clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
904 priv->switch_channel = 0; 910 priv->switch_channel = 0;
905 ieee80211_chswitch_done(ctx->vif, false); 911 ieee80211_chswitch_done(ctx->vif, false);
@@ -910,10 +916,25 @@ out:
910 IWL_DEBUG_MAC80211(priv, "leave\n"); 916 IWL_DEBUG_MAC80211(priv, "leave\n");
911} 917}
912 918
913static void iwlagn_configure_filter(struct ieee80211_hw *hw, 919void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
914 unsigned int changed_flags, 920{
915 unsigned int *total_flags, 921 /*
916 u64 multicast) 922 * MULTI-FIXME
923 * See iwlagn_mac_channel_switch.
924 */
925 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
926
927 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
928 return;
929
930 if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
931 ieee80211_chswitch_done(ctx->vif, is_success);
932}
933
934void iwlagn_configure_filter(struct ieee80211_hw *hw,
935 unsigned int changed_flags,
936 unsigned int *total_flags,
937 u64 multicast)
917{ 938{
918 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 939 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
919 __le32 filter_or = 0, filter_nand = 0; 940 __le32 filter_or = 0, filter_nand = 0;
@@ -960,7 +981,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
960 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; 981 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
961} 982}
962 983
963static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop) 984void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
964{ 985{
965 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 986 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
966 987
@@ -988,7 +1009,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
988 } 1009 }
989 } 1010 }
990 IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n"); 1011 IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
991 iwl_trans_wait_tx_queue_empty(trans(priv)); 1012 iwl_trans_wait_tx_queue_empty(priv->trans);
992done: 1013done:
993 mutex_unlock(&priv->mutex); 1014 mutex_unlock(&priv->mutex);
994 IWL_DEBUG_MAC80211(priv, "leave\n"); 1015 IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -1003,7 +1024,7 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
1003 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN]; 1024 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
1004 int err = 0; 1025 int err = 0;
1005 1026
1006 if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN))) 1027 if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
1007 return -EOPNOTSUPP; 1028 return -EOPNOTSUPP;
1008 1029
1009 if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT))) 1030 if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
@@ -1087,11 +1108,11 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
1087 return err; 1108 return err;
1088} 1109}
1089 1110
1090static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw) 1111int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
1091{ 1112{
1092 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1113 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1093 1114
1094 if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN))) 1115 if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
1095 return -EOPNOTSUPP; 1116 return -EOPNOTSUPP;
1096 1117
1097 IWL_DEBUG_MAC80211(priv, "enter\n"); 1118 IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -1104,16 +1125,16 @@ static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
1104 return 0; 1125 return 0;
1105} 1126}
1106 1127
1107static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw, 1128void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
1108 enum ieee80211_rssi_event rssi_event) 1129 enum ieee80211_rssi_event rssi_event)
1109{ 1130{
1110 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1131 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1111 1132
1112 IWL_DEBUG_MAC80211(priv, "enter\n"); 1133 IWL_DEBUG_MAC80211(priv, "enter\n");
1113 mutex_lock(&priv->mutex); 1134 mutex_lock(&priv->mutex);
1114 1135
1115 if (cfg(priv)->bt_params && 1136 if (priv->cfg->bt_params &&
1116 cfg(priv)->bt_params->advanced_bt_coexist) { 1137 priv->cfg->bt_params->advanced_bt_coexist) {
1117 if (rssi_event == RSSI_EVENT_LOW) 1138 if (rssi_event == RSSI_EVENT_LOW)
1118 priv->bt_enable_pspoll = true; 1139 priv->bt_enable_pspoll = true;
1119 else if (rssi_event == RSSI_EVENT_HIGH) 1140 else if (rssi_event == RSSI_EVENT_HIGH)
@@ -1129,8 +1150,8 @@ static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
1129 IWL_DEBUG_MAC80211(priv, "leave\n"); 1150 IWL_DEBUG_MAC80211(priv, "leave\n");
1130} 1151}
1131 1152
1132static int iwlagn_mac_set_tim(struct ieee80211_hw *hw, 1153int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
1133 struct ieee80211_sta *sta, bool set) 1154 struct ieee80211_sta *sta, bool set)
1134{ 1155{
1135 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1156 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1136 1157
@@ -1139,9 +1160,9 @@ static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
1139 return 0; 1160 return 0;
1140} 1161}
1141 1162
1142static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw, 1163int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
1143 struct ieee80211_vif *vif, u16 queue, 1164 struct ieee80211_vif *vif, u16 queue,
1144 const struct ieee80211_tx_queue_params *params) 1165 const struct ieee80211_tx_queue_params *params)
1145{ 1166{
1146 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1167 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1147 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 1168 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -1183,7 +1204,7 @@ static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
1183 return 0; 1204 return 0;
1184} 1205}
1185 1206
1186static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw) 1207int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
1187{ 1208{
1188 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1209 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1189 1210
@@ -1199,11 +1220,10 @@ static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1199 return iwlagn_commit_rxon(priv, ctx); 1220 return iwlagn_commit_rxon(priv, ctx);
1200} 1221}
1201 1222
1202static int iwl_setup_interface(struct iwl_priv *priv, 1223int iwl_setup_interface(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1203 struct iwl_rxon_context *ctx)
1204{ 1224{
1205 struct ieee80211_vif *vif = ctx->vif; 1225 struct ieee80211_vif *vif = ctx->vif;
1206 int err; 1226 int err, ac;
1207 1227
1208 lockdep_assert_held(&priv->mutex); 1228 lockdep_assert_held(&priv->mutex);
1209 1229
@@ -1223,7 +1243,7 @@ static int iwl_setup_interface(struct iwl_priv *priv,
1223 return err; 1243 return err;
1224 } 1244 }
1225 1245
1226 if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist && 1246 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist &&
1227 vif->type == NL80211_IFTYPE_ADHOC) { 1247 vif->type == NL80211_IFTYPE_ADHOC) {
1228 /* 1248 /*
1229 * pretend to have high BT traffic as long as we 1249 * pretend to have high BT traffic as long as we
@@ -1233,17 +1253,27 @@ static int iwl_setup_interface(struct iwl_priv *priv,
1233 priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH; 1253 priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
1234 } 1254 }
1235 1255
1256 /* set up queue mappings */
1257 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
1258 vif->hw_queue[ac] = ctx->ac_to_queue[ac];
1259
1260 if (vif->type == NL80211_IFTYPE_AP)
1261 vif->cab_queue = ctx->mcast_queue;
1262 else
1263 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
1264
1236 return 0; 1265 return 0;
1237} 1266}
1238 1267
1239static int iwlagn_mac_add_interface(struct ieee80211_hw *hw, 1268static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
1240 struct ieee80211_vif *vif) 1269 struct ieee80211_vif *vif)
1241{ 1270{
1242 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1271 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1243 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 1272 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1244 struct iwl_rxon_context *tmp, *ctx = NULL; 1273 struct iwl_rxon_context *tmp, *ctx = NULL;
1245 int err; 1274 int err;
1246 enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif); 1275 enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
1276 bool reset = false;
1247 1277
1248 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", 1278 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1249 viftype, vif->addr); 1279 viftype, vif->addr);
@@ -1265,6 +1295,13 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
1265 tmp->interface_modes | tmp->exclusive_interface_modes; 1295 tmp->interface_modes | tmp->exclusive_interface_modes;
1266 1296
1267 if (tmp->vif) { 1297 if (tmp->vif) {
1298 /* On reset we need to add the same interface again */
1299 if (tmp->vif == vif) {
1300 reset = true;
1301 ctx = tmp;
1302 break;
1303 }
1304
1268 /* check if this busy context is exclusive */ 1305 /* check if this busy context is exclusive */
1269 if (tmp->exclusive_interface_modes & 1306 if (tmp->exclusive_interface_modes &
1270 BIT(tmp->vif->type)) { 1307 BIT(tmp->vif->type)) {
@@ -1291,7 +1328,7 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
1291 ctx->vif = vif; 1328 ctx->vif = vif;
1292 1329
1293 err = iwl_setup_interface(priv, ctx); 1330 err = iwl_setup_interface(priv, ctx);
1294 if (!err) 1331 if (!err || reset)
1295 goto out; 1332 goto out;
1296 1333
1297 ctx->vif = NULL; 1334 ctx->vif = NULL;
@@ -1303,9 +1340,9 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
1303 return err; 1340 return err;
1304} 1341}
1305 1342
1306static void iwl_teardown_interface(struct iwl_priv *priv, 1343void iwl_teardown_interface(struct iwl_priv *priv,
1307 struct ieee80211_vif *vif, 1344 struct ieee80211_vif *vif,
1308 bool mode_change) 1345 bool mode_change)
1309{ 1346{
1310 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); 1347 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1311 1348
@@ -1446,9 +1483,9 @@ static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
1446 return err; 1483 return err;
1447} 1484}
1448 1485
1449static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw, 1486int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
1450 struct ieee80211_vif *vif, 1487 struct ieee80211_vif *vif,
1451 struct cfg80211_scan_request *req) 1488 struct cfg80211_scan_request *req)
1452{ 1489{
1453 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1490 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1454 int ret; 1491 int ret;
@@ -1503,7 +1540,7 @@ static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
1503 iwl_send_add_sta(priv, &cmd, CMD_ASYNC); 1540 iwl_send_add_sta(priv, &cmd, CMD_ASYNC);
1504} 1541}
1505 1542
1506static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw, 1543void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
1507 struct ieee80211_vif *vif, 1544 struct ieee80211_vif *vif,
1508 enum sta_notify_cmd cmd, 1545 enum sta_notify_cmd cmd,
1509 struct ieee80211_sta *sta) 1546 struct ieee80211_sta *sta)
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
new file mode 100644
index 000000000000..d9a86d6b2bd7
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -0,0 +1,126 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_modparams_h__
64#define __iwl_modparams_h__
65
66#include <linux/types.h>
67#include <linux/spinlock.h>
68#include <linux/gfp.h>
69#include <net/mac80211.h>
70
71extern struct iwl_mod_params iwlwifi_mod_params;
72
73enum iwl_power_level {
74 IWL_POWER_INDEX_1,
75 IWL_POWER_INDEX_2,
76 IWL_POWER_INDEX_3,
77 IWL_POWER_INDEX_4,
78 IWL_POWER_INDEX_5,
79 IWL_POWER_NUM
80};
81
82#define IWL_DISABLE_HT_ALL BIT(0)
83#define IWL_DISABLE_HT_TXAGG BIT(1)
84#define IWL_DISABLE_HT_RXAGG BIT(2)
85
86/**
87 * struct iwl_mod_params
88 *
89 * Holds the module parameters
90 *
91 * @sw_crypto: using hardware encryption, default = 0
92 * @disable_11n: disable 11n capabilities, default = 0,
93 * use IWL_DISABLE_HT_* constants
94 * @amsdu_size_8K: enable 8K amsdu size, default = 1
95 * @restart_fw: restart firmware, default = 1
96 * @plcp_check: enable plcp health check, default = true
97 * @wd_disable: enable stuck queue check, default = 0
98 * @bt_coex_active: enable bt coex, default = true
99 * @led_mode: system default, default = 0
100 * @power_save: disable power save, default = false
101 * @power_level: power level, default = 1
102 * @debug_level: levels are IWL_DL_*
103 * @ant_coupling: antenna coupling in dB, default = 0
104 * @bt_ch_announce: BT channel inhibition, default = enable
105 * @auto_agg: enable agg. without check, default = true
106 * @disable_5ghz: disable 5GHz capability, default = false
107 */
108struct iwl_mod_params {
109 int sw_crypto;
110 unsigned int disable_11n;
111 int amsdu_size_8K;
112 int restart_fw;
113 bool plcp_check;
114 int wd_disable;
115 bool bt_coex_active;
116 int led_mode;
117 bool power_save;
118 int power_level;
119 u32 debug_level;
120 int ant_coupling;
121 bool bt_ch_announce;
122 bool auto_agg;
123 bool disable_5ghz;
124};
125
126#endif /* #__iwl_modparams_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
index 88dc4a0f96b4..0066b899fe5c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -75,21 +75,45 @@ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
75void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait, 75void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
76 struct iwl_rx_packet *pkt) 76 struct iwl_rx_packet *pkt)
77{ 77{
78 bool triggered = false;
79
78 if (!list_empty(&notif_wait->notif_waits)) { 80 if (!list_empty(&notif_wait->notif_waits)) {
79 struct iwl_notification_wait *w; 81 struct iwl_notification_wait *w;
80 82
81 spin_lock(&notif_wait->notif_wait_lock); 83 spin_lock(&notif_wait->notif_wait_lock);
82 list_for_each_entry(w, &notif_wait->notif_waits, list) { 84 list_for_each_entry(w, &notif_wait->notif_waits, list) {
83 if (w->cmd != pkt->hdr.cmd) 85 int i;
86 bool found = false;
87
88 /*
89 * If it already finished (triggered) or has been
90 * aborted then don't evaluate it again to avoid races,
91 * Otherwise the function could be called again even
92 * though it returned true before
93 */
94 if (w->triggered || w->aborted)
95 continue;
96
97 for (i = 0; i < w->n_cmds; i++) {
98 if (w->cmds[i] == pkt->hdr.cmd) {
99 found = true;
100 break;
101 }
102 }
103 if (!found)
84 continue; 104 continue;
85 w->triggered = true; 105
86 if (w->fn) 106 if (!w->fn || w->fn(notif_wait, pkt, w->fn_data)) {
87 w->fn(notif_wait, pkt, w->fn_data); 107 w->triggered = true;
108 triggered = true;
109 }
88 } 110 }
89 spin_unlock(&notif_wait->notif_wait_lock); 111 spin_unlock(&notif_wait->notif_wait_lock);
90 112
91 wake_up_all(&notif_wait->notif_waitq);
92 } 113 }
114
115 if (triggered)
116 wake_up_all(&notif_wait->notif_waitq);
93} 117}
94 118
95void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait) 119void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
@@ -109,14 +133,18 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
109void 133void
110iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait, 134iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
111 struct iwl_notification_wait *wait_entry, 135 struct iwl_notification_wait *wait_entry,
112 u8 cmd, 136 const u8 *cmds, int n_cmds,
113 void (*fn)(struct iwl_notif_wait_data *notif_wait, 137 bool (*fn)(struct iwl_notif_wait_data *notif_wait,
114 struct iwl_rx_packet *pkt, void *data), 138 struct iwl_rx_packet *pkt, void *data),
115 void *fn_data) 139 void *fn_data)
116{ 140{
141 if (WARN_ON(n_cmds > MAX_NOTIF_CMDS))
142 n_cmds = MAX_NOTIF_CMDS;
143
117 wait_entry->fn = fn; 144 wait_entry->fn = fn;
118 wait_entry->fn_data = fn_data; 145 wait_entry->fn_data = fn_data;
119 wait_entry->cmd = cmd; 146 wait_entry->n_cmds = n_cmds;
147 memcpy(wait_entry->cmds, cmds, n_cmds);
120 wait_entry->triggered = false; 148 wait_entry->triggered = false;
121 wait_entry->aborted = false; 149 wait_entry->aborted = false;
122 150
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
index 5e8af957aa7b..821523100cf1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
@@ -72,11 +72,19 @@ struct iwl_notif_wait_data {
72 wait_queue_head_t notif_waitq; 72 wait_queue_head_t notif_waitq;
73}; 73};
74 74
75#define MAX_NOTIF_CMDS 5
76
75/** 77/**
76 * struct iwl_notification_wait - notification wait entry 78 * struct iwl_notification_wait - notification wait entry
77 * @list: list head for global list 79 * @list: list head for global list
78 * @fn: function called with the notification 80 * @fn: Function called with the notification. If the function
79 * @cmd: command ID 81 * returns true, the wait is over, if it returns false then
82 * the waiter stays blocked. If no function is given, any
83 * of the listed commands will unblock the waiter.
84 * @cmds: command IDs
85 * @n_cmds: number of command IDs
86 * @triggered: waiter should be woken up
87 * @aborted: wait was aborted
80 * 88 *
81 * This structure is not used directly, to wait for a 89 * This structure is not used directly, to wait for a
82 * notification declare it on the stack, and call 90 * notification declare it on the stack, and call
@@ -93,11 +101,12 @@ struct iwl_notif_wait_data {
93struct iwl_notification_wait { 101struct iwl_notification_wait {
94 struct list_head list; 102 struct list_head list;
95 103
96 void (*fn)(struct iwl_notif_wait_data *notif_data, 104 bool (*fn)(struct iwl_notif_wait_data *notif_data,
97 struct iwl_rx_packet *pkt, void *data); 105 struct iwl_rx_packet *pkt, void *data);
98 void *fn_data; 106 void *fn_data;
99 107
100 u8 cmd; 108 u8 cmds[MAX_NOTIF_CMDS];
109 u8 n_cmds;
101 bool triggered, aborted; 110 bool triggered, aborted;
102}; 111};
103 112
@@ -112,8 +121,8 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
112void __acquires(wait_entry) 121void __acquires(wait_entry)
113iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data, 122iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
114 struct iwl_notification_wait *wait_entry, 123 struct iwl_notification_wait *wait_entry,
115 u8 cmd, 124 const u8 *cmds, int n_cmds,
116 void (*fn)(struct iwl_notif_wait_data *notif_data, 125 bool (*fn)(struct iwl_notif_wait_data *notif_data,
117 struct iwl_rx_packet *pkt, void *data), 126 struct iwl_rx_packet *pkt, void *data),
118 void *fn_data); 127 void *fn_data);
119 128
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 6ea4163ff56a..4ef742b28e08 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -69,6 +69,7 @@ struct sk_buff;
69struct iwl_device_cmd; 69struct iwl_device_cmd;
70struct iwl_rx_cmd_buffer; 70struct iwl_rx_cmd_buffer;
71struct iwl_fw; 71struct iwl_fw;
72struct iwl_cfg;
72 73
73/** 74/**
74 * DOC: Operational mode - what is it ? 75 * DOC: Operational mode - what is it ?
@@ -111,10 +112,10 @@ struct iwl_fw;
111 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the 112 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
112 * HCMD the this Rx responds to. 113 * HCMD the this Rx responds to.
113 * Must be atomic. 114 * Must be atomic.
114 * @queue_full: notifies that a HW queue is full. Ac is the ac of the queue 115 * @queue_full: notifies that a HW queue is full.
115 * Must be atomic 116 * Must be atomic
116 * @queue_not_full: notifies that a HW queue is not full any more. 117 * @queue_not_full: notifies that a HW queue is not full any more.
117 * Ac is the ac of the queue. Must be atomic 118 * Must be atomic
118 * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that 119 * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
119 * the radio is killed. Must be atomic. 120 * the radio is killed. Must be atomic.
120 * @free_skb: allows the transport layer to free skbs that haven't been 121 * @free_skb: allows the transport layer to free skbs that haven't been
@@ -125,20 +126,23 @@ struct iwl_fw;
125 * @cmd_queue_full: Called when the command queue gets full. Must be atomic. 126 * @cmd_queue_full: Called when the command queue gets full. Must be atomic.
126 * @nic_config: configure NIC, called before firmware is started. 127 * @nic_config: configure NIC, called before firmware is started.
127 * May sleep 128 * May sleep
129 * @wimax_active: invoked when WiMax becomes active. Must be atomic.
128 */ 130 */
129struct iwl_op_mode_ops { 131struct iwl_op_mode_ops {
130 struct iwl_op_mode *(*start)(struct iwl_trans *trans, 132 struct iwl_op_mode *(*start)(struct iwl_trans *trans,
133 const struct iwl_cfg *cfg,
131 const struct iwl_fw *fw); 134 const struct iwl_fw *fw);
132 void (*stop)(struct iwl_op_mode *op_mode); 135 void (*stop)(struct iwl_op_mode *op_mode);
133 int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, 136 int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
134 struct iwl_device_cmd *cmd); 137 struct iwl_device_cmd *cmd);
135 void (*queue_full)(struct iwl_op_mode *op_mode, u8 ac); 138 void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
136 void (*queue_not_full)(struct iwl_op_mode *op_mode, u8 ac); 139 void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
137 void (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state); 140 void (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
138 void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb); 141 void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
139 void (*nic_error)(struct iwl_op_mode *op_mode); 142 void (*nic_error)(struct iwl_op_mode *op_mode);
140 void (*cmd_queue_full)(struct iwl_op_mode *op_mode); 143 void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
141 void (*nic_config)(struct iwl_op_mode *op_mode); 144 void (*nic_config)(struct iwl_op_mode *op_mode);
145 void (*wimax_active)(struct iwl_op_mode *op_mode);
142}; 146};
143 147
144/** 148/**
@@ -169,15 +173,16 @@ static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
169 return op_mode->ops->rx(op_mode, rxb, cmd); 173 return op_mode->ops->rx(op_mode, rxb, cmd);
170} 174}
171 175
172static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode, u8 ac) 176static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
177 int queue)
173{ 178{
174 op_mode->ops->queue_full(op_mode, ac); 179 op_mode->ops->queue_full(op_mode, queue);
175} 180}
176 181
177static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode, 182static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
178 u8 ac) 183 int queue)
179{ 184{
180 op_mode->ops->queue_not_full(op_mode, ac); 185 op_mode->ops->queue_not_full(op_mode, queue);
181} 186}
182 187
183static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, 188static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode,
@@ -208,6 +213,11 @@ static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
208 op_mode->ops->nic_config(op_mode); 213 op_mode->ops->nic_config(op_mode);
209} 214}
210 215
216static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
217{
218 op_mode->ops->wimax_active(op_mode);
219}
220
211/***************************************************** 221/*****************************************************
212* Op mode layers implementations 222* Op mode layers implementations
213******************************************************/ 223******************************************************/
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
index c5e339ee918b..0c8a1c2d8847 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -60,17 +60,18 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
63#include <linux/module.h> 66#include <linux/module.h>
64#include <linux/pci.h> 67#include <linux/pci.h>
65#include <linux/pci-aspm.h> 68#include <linux/pci-aspm.h>
66 69
67#include "iwl-io.h"
68#include "iwl-shared.h"
69#include "iwl-trans.h" 70#include "iwl-trans.h"
70#include "iwl-csr.h"
71#include "iwl-cfg.h" 71#include "iwl-cfg.h"
72#include "iwl-drv.h" 72#include "iwl-drv.h"
73#include "iwl-trans.h" 73#include "iwl-trans.h"
74#include "iwl-trans-pcie-int.h"
74 75
75#define IWL_PCI_DEVICE(dev, subdev, cfg) \ 76#define IWL_PCI_DEVICE(dev, subdev, cfg) \
76 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ 77 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
@@ -261,61 +262,46 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
261/* PCI registers */ 262/* PCI registers */
262#define PCI_CFG_RETRY_TIMEOUT 0x041 263#define PCI_CFG_RETRY_TIMEOUT 0x041
263 264
265#ifndef CONFIG_IWLWIFI_IDI
266
264static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 267static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
265{ 268{
266 const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); 269 const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
267 struct iwl_shared *shrd;
268 struct iwl_trans *iwl_trans; 270 struct iwl_trans *iwl_trans;
269 int err; 271 struct iwl_trans_pcie *trans_pcie;
270
271 shrd = kzalloc(sizeof(*iwl_trans->shrd), GFP_KERNEL);
272 if (!shrd) {
273 dev_printk(KERN_ERR, &pdev->dev,
274 "Couldn't allocate iwl_shared");
275 err = -ENOMEM;
276 goto out_free_bus;
277 }
278 272
279#ifdef CONFIG_IWLWIFI_IDI 273 iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
280 iwl_trans = iwl_trans_idi_alloc(shrd, pdev, ent); 274 if (iwl_trans == NULL)
281#else 275 return -ENOMEM;
282 iwl_trans = iwl_trans_pcie_alloc(shrd, pdev, ent);
283#endif
284 if (iwl_trans == NULL) {
285 err = -ENOMEM;
286 goto out_free_bus;
287 }
288 276
289 shrd->trans = iwl_trans;
290 pci_set_drvdata(pdev, iwl_trans); 277 pci_set_drvdata(pdev, iwl_trans);
291 278
292 err = iwl_drv_start(shrd, iwl_trans, cfg); 279 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
293 if (err) 280 trans_pcie->drv = iwl_drv_start(iwl_trans, cfg);
281 if (!trans_pcie->drv)
294 goto out_free_trans; 282 goto out_free_trans;
295 283
296 return 0; 284 return 0;
297 285
298out_free_trans: 286out_free_trans:
299 iwl_trans_free(iwl_trans); 287 iwl_trans_pcie_free(iwl_trans);
300 pci_set_drvdata(pdev, NULL); 288 pci_set_drvdata(pdev, NULL);
301out_free_bus: 289 return -EFAULT;
302 kfree(shrd);
303 return err;
304} 290}
305 291
306static void __devexit iwl_pci_remove(struct pci_dev *pdev) 292static void __devexit iwl_pci_remove(struct pci_dev *pdev)
307{ 293{
308 struct iwl_trans *iwl_trans = pci_get_drvdata(pdev); 294 struct iwl_trans *trans = pci_get_drvdata(pdev);
309 struct iwl_shared *shrd = iwl_trans->shrd; 295 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
310 296
311 iwl_drv_stop(shrd); 297 iwl_drv_stop(trans_pcie->drv);
312 iwl_trans_free(shrd->trans); 298 iwl_trans_pcie_free(trans);
313 299
314 pci_set_drvdata(pdev, NULL); 300 pci_set_drvdata(pdev, NULL);
315
316 kfree(shrd);
317} 301}
318 302
303#endif /* CONFIG_IWLWIFI_IDI */
304
319#ifdef CONFIG_PM_SLEEP 305#ifdef CONFIG_PM_SLEEP
320 306
321static int iwl_pci_suspend(struct device *device) 307static int iwl_pci_suspend(struct device *device)
@@ -360,6 +346,15 @@ static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
360 346
361#endif 347#endif
362 348
349#ifdef CONFIG_IWLWIFI_IDI
350/*
351 * Defined externally in iwl-idi.c
352 */
353int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
354void __devexit iwl_pci_remove(struct pci_dev *pdev);
355
356#endif /* CONFIG_IWLWIFI_IDI */
357
363static struct pci_driver iwl_pci_driver = { 358static struct pci_driver iwl_pci_driver = {
364 .name = DRV_NAME, 359 .name = DRV_NAME,
365 .id_table = iwl_hw_card_ids, 360 .id_table = iwl_hw_card_ids,
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
new file mode 100644
index 000000000000..f166955340fe
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -0,0 +1,288 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/slab.h>
65#include <linux/string.h>
66
67#include "iwl-debug.h"
68#include "iwl-dev.h"
69
70#include "iwl-phy-db.h"
71
72#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
73
74struct iwl_phy_db *iwl_phy_db_init(struct device *dev)
75{
76 struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
77 GFP_KERNEL);
78
79 if (!phy_db)
80 return phy_db;
81
82 phy_db->dev = dev;
83
84 /* TODO: add default values of the phy db. */
85 return phy_db;
86}
87
88/*
89 * get phy db section: returns a pointer to a phy db section specified by
90 * type and channel group id.
91 */
92static struct iwl_phy_db_entry *
93iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
94 enum iwl_phy_db_section_type type,
95 u16 chg_id)
96{
97 if (!phy_db || type < 0 || type >= IWL_PHY_DB_MAX)
98 return NULL;
99
100 switch (type) {
101 case IWL_PHY_DB_CFG:
102 return &phy_db->cfg;
103 case IWL_PHY_DB_CALIB_NCH:
104 return &phy_db->calib_nch;
105 case IWL_PHY_DB_CALIB_CH:
106 return &phy_db->calib_ch;
107 case IWL_PHY_DB_CALIB_CHG_PAPD:
108 if (chg_id < 0 || chg_id >= IWL_NUM_PAPD_CH_GROUPS)
109 return NULL;
110 return &phy_db->calib_ch_group_papd[chg_id];
111 case IWL_PHY_DB_CALIB_CHG_TXP:
112 if (chg_id < 0 || chg_id >= IWL_NUM_TXP_CH_GROUPS)
113 return NULL;
114 return &phy_db->calib_ch_group_txp[chg_id];
115 default:
116 return NULL;
117 }
118 return NULL;
119}
120
121static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db,
122 enum iwl_phy_db_section_type type,
123 u16 chg_id)
124{
125 struct iwl_phy_db_entry *entry =
126 iwl_phy_db_get_section(phy_db, type, chg_id);
127 if (!entry)
128 return;
129
130 kfree(entry->data);
131 entry->data = NULL;
132 entry->size = 0;
133}
134
135void iwl_phy_db_free(struct iwl_phy_db *phy_db)
136{
137 int i;
138
139 if (!phy_db)
140 return;
141
142 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
143 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
144 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0);
145 for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
146 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
147 for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
148 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
149
150 kfree(phy_db);
151}
152
153int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
154 enum iwl_phy_db_section_type type, u8 *data,
155 u16 size, gfp_t alloc_ctx)
156{
157 struct iwl_phy_db_entry *entry;
158 u16 chg_id = 0;
159
160 if (!phy_db)
161 return -EINVAL;
162
163 if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
164 type == IWL_PHY_DB_CALIB_CHG_TXP)
165 chg_id = le16_to_cpup((__le16 *)data);
166
167 entry = iwl_phy_db_get_section(phy_db, type, chg_id);
168 if (!entry)
169 return -EINVAL;
170
171 kfree(entry->data);
172 entry->data = kmemdup(data, size, alloc_ctx);
173 if (!entry->data) {
174 entry->size = 0;
175 return -ENOMEM;
176 }
177
178 entry->size = size;
179
180 if (type == IWL_PHY_DB_CALIB_CH) {
181 phy_db->channel_num = le32_to_cpup((__le32 *)data);
182 phy_db->channel_size =
183 (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
184 }
185
186 return 0;
187}
188
189static int is_valid_channel(u16 ch_id)
190{
191 if (ch_id <= 14 ||
192 (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
193 (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
194 (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
195 return 1;
196 return 0;
197}
198
199static u8 ch_id_to_ch_index(u16 ch_id)
200{
201 if (WARN_ON(!is_valid_channel(ch_id)))
202 return 0xff;
203
204 if (ch_id <= 14)
205 return ch_id - 1;
206 if (ch_id <= 64)
207 return (ch_id + 20) / 4;
208 if (ch_id <= 140)
209 return (ch_id - 12) / 4;
210 return (ch_id - 13) / 4;
211}
212
213
214static u16 channel_id_to_papd(u16 ch_id)
215{
216 if (WARN_ON(!is_valid_channel(ch_id)))
217 return 0xff;
218
219 if (1 <= ch_id && ch_id <= 14)
220 return 0;
221 if (36 <= ch_id && ch_id <= 64)
222 return 1;
223 if (100 <= ch_id && ch_id <= 140)
224 return 2;
225 return 3;
226}
227
228static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
229{
230 struct iwl_phy_db_chg_txp *txp_chg;
231 int i;
232 u8 ch_index = ch_id_to_ch_index(ch_id);
233 if (ch_index == 0xff)
234 return 0xff;
235
236 for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
237 txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
238 if (!txp_chg)
239 return 0xff;
240 /*
241 * Looking for the first channel group that its max channel is
242 * higher then wanted channel.
243 */
244 if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index)
245 return i;
246 }
247 return 0xff;
248}
249
250int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
251 enum iwl_phy_db_section_type type, u8 **data,
252 u16 *size, u16 ch_id)
253{
254 struct iwl_phy_db_entry *entry;
255 u32 channel_num;
256 u32 channel_size;
257 u16 ch_group_id = 0;
258 u16 index;
259
260 if (!phy_db)
261 return -EINVAL;
262
263 /* find wanted channel group */
264 if (type == IWL_PHY_DB_CALIB_CHG_PAPD)
265 ch_group_id = channel_id_to_papd(ch_id);
266 else if (type == IWL_PHY_DB_CALIB_CHG_TXP)
267 ch_group_id = channel_id_to_txp(phy_db, ch_id);
268
269 entry = iwl_phy_db_get_section(phy_db, type, ch_group_id);
270 if (!entry)
271 return -EINVAL;
272
273 if (type == IWL_PHY_DB_CALIB_CH) {
274 index = ch_id_to_ch_index(ch_id);
275 channel_num = phy_db->channel_num;
276 channel_size = phy_db->channel_size;
277 if (index >= channel_num) {
278 IWL_ERR(phy_db, "Wrong channel number %d", ch_id);
279 return -EINVAL;
280 }
281 *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size;
282 *size = channel_size;
283 } else {
284 *data = entry->data;
285 *size = entry->size;
286 }
287 return 0;
288}
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
new file mode 100644
index 000000000000..c34c6a9303ab
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
@@ -0,0 +1,129 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#ifndef __IWL_PHYDB_H__
65#define __IWL_PHYDB_H__
66
67#include <linux/types.h>
68
69#define IWL_NUM_PAPD_CH_GROUPS 4
70#define IWL_NUM_TXP_CH_GROUPS 8
71
72struct iwl_phy_db_entry {
73 u16 size;
74 u8 *data;
75};
76
77struct iwl_shared;
78
79/**
80 * struct iwl_phy_db - stores phy configuration and calibration data.
81 *
82 * @cfg: phy configuration.
83 * @calib_nch: non channel specific calibration data.
84 * @calib_ch: channel specific calibration data.
85 * @calib_ch_group_papd: calibration data related to papd channel group.
86 * @calib_ch_group_txp: calibration data related to tx power chanel group.
87 */
88struct iwl_phy_db {
89 struct iwl_phy_db_entry cfg;
90 struct iwl_phy_db_entry calib_nch;
91 struct iwl_phy_db_entry calib_ch;
92 struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
93 struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
94
95 u32 channel_num;
96 u32 channel_size;
97
98 /* for an access to the logger */
99 struct device *dev;
100};
101
102enum iwl_phy_db_section_type {
103 IWL_PHY_DB_CFG = 1,
104 IWL_PHY_DB_CALIB_NCH,
105 IWL_PHY_DB_CALIB_CH,
106 IWL_PHY_DB_CALIB_CHG_PAPD,
107 IWL_PHY_DB_CALIB_CHG_TXP,
108 IWL_PHY_DB_MAX
109};
110
111/* for parsing of tx power channel group data that comes from the firmware*/
112struct iwl_phy_db_chg_txp {
113 __le32 space;
114 __le16 max_channel_idx;
115} __packed;
116
117struct iwl_phy_db *iwl_phy_db_init(struct device *dev);
118
119void iwl_phy_db_free(struct iwl_phy_db *phy_db);
120
121int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
122 enum iwl_phy_db_section_type type, u8 *data,
123 u16 size, gfp_t alloc_ctx);
124
125int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
126 enum iwl_phy_db_section_type type, u8 **data,
127 u16 *size, u16 ch_id);
128
129#endif /* __IWL_PHYDB_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 958d9d09aee3..8352265dbc4b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -37,13 +37,12 @@
37#include "iwl-eeprom.h" 37#include "iwl-eeprom.h"
38#include "iwl-dev.h" 38#include "iwl-dev.h"
39#include "iwl-agn.h" 39#include "iwl-agn.h"
40#include "iwl-core.h"
41#include "iwl-io.h" 40#include "iwl-io.h"
42#include "iwl-commands.h" 41#include "iwl-commands.h"
43#include "iwl-debug.h" 42#include "iwl-debug.h"
44#include "iwl-power.h" 43#include "iwl-power.h"
45#include "iwl-trans.h" 44#include "iwl-trans.h"
46#include "iwl-shared.h" 45#include "iwl-modparams.h"
47 46
48/* 47/*
49 * Setting power level allows the card to go to sleep when not busy. 48 * Setting power level allows the card to go to sleep when not busy.
@@ -167,7 +166,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
167 u8 skip; 166 u8 skip;
168 u32 slp_itrvl; 167 u32 slp_itrvl;
169 168
170 if (cfg(priv)->adv_pm) { 169 if (priv->cfg->adv_pm) {
171 table = apm_range_2; 170 table = apm_range_2;
172 if (period <= IWL_DTIM_RANGE_1_MAX) 171 if (period <= IWL_DTIM_RANGE_1_MAX)
173 table = apm_range_1; 172 table = apm_range_1;
@@ -215,13 +214,13 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
215 else 214 else
216 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; 215 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
217 216
218 if (cfg(priv)->base_params->shadow_reg_enable) 217 if (priv->cfg->base_params->shadow_reg_enable)
219 cmd->flags |= IWL_POWER_SHADOW_REG_ENA; 218 cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
220 else 219 else
221 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; 220 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
222 221
223 if (iwl_advanced_bt_coexist(priv)) { 222 if (iwl_advanced_bt_coexist(priv)) {
224 if (!cfg(priv)->bt_params->bt_sco_disable) 223 if (!priv->cfg->bt_params->bt_sco_disable)
225 cmd->flags |= IWL_POWER_BT_SCO_ENA; 224 cmd->flags |= IWL_POWER_BT_SCO_ENA;
226 else 225 else
227 cmd->flags &= ~IWL_POWER_BT_SCO_ENA; 226 cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
@@ -268,61 +267,6 @@ static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv,
268 IWL_DEBUG_POWER(priv, "Sleep command for CAM\n"); 267 IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
269} 268}
270 269
271static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
272 struct iwl_powertable_cmd *cmd,
273 int dynps_ms, int wakeup_period)
274{
275 /*
276 * These are the original power level 3 sleep successions. The
277 * device may behave better with such succession and was also
278 * only tested with that. Just like the original sleep commands,
279 * also adjust the succession here to the wakeup_period below.
280 * The ranges are the same as for the sleep commands, 0-2, 3-9
281 * and >10, which is selected based on the DTIM interval for
282 * the sleep index but here we use the wakeup period since that
283 * is what we need to do for the latency requirements.
284 */
285 static const u8 slp_succ_r0[IWL_POWER_VEC_SIZE] = { 2, 2, 2, 2, 2 };
286 static const u8 slp_succ_r1[IWL_POWER_VEC_SIZE] = { 2, 4, 6, 7, 9 };
287 static const u8 slp_succ_r2[IWL_POWER_VEC_SIZE] = { 2, 7, 9, 9, 0xFF };
288 const u8 *slp_succ = slp_succ_r0;
289 int i;
290
291 if (wakeup_period > IWL_DTIM_RANGE_0_MAX)
292 slp_succ = slp_succ_r1;
293 if (wakeup_period > IWL_DTIM_RANGE_1_MAX)
294 slp_succ = slp_succ_r2;
295
296 memset(cmd, 0, sizeof(*cmd));
297
298 cmd->flags = IWL_POWER_DRIVER_ALLOW_SLEEP_MSK |
299 IWL_POWER_FAST_PD; /* no use seeing frames for others */
300
301 if (priv->power_data.bus_pm)
302 cmd->flags |= IWL_POWER_PCI_PM_MSK;
303
304 if (cfg(priv)->base_params->shadow_reg_enable)
305 cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
306 else
307 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
308
309 if (iwl_advanced_bt_coexist(priv)) {
310 if (!cfg(priv)->bt_params->bt_sco_disable)
311 cmd->flags |= IWL_POWER_BT_SCO_ENA;
312 else
313 cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
314 }
315
316 cmd->rx_data_timeout = cpu_to_le32(1000 * dynps_ms);
317 cmd->tx_data_timeout = cpu_to_le32(1000 * dynps_ms);
318
319 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
320 cmd->sleep_interval[i] =
321 cpu_to_le32(min_t(int, slp_succ[i], wakeup_period));
322
323 IWL_DEBUG_POWER(priv, "Automatic sleep command\n");
324}
325
326static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd) 270static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
327{ 271{
328 IWL_DEBUG_POWER(priv, "Sending power/sleep command\n"); 272 IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
@@ -350,7 +294,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
350 294
351 if (priv->wowlan) 295 if (priv->wowlan)
352 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper); 296 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
353 else if (!cfg(priv)->base_params->no_idle_support && 297 else if (!priv->cfg->base_params->no_idle_support &&
354 priv->hw->conf.flags & IEEE80211_CONF_IDLE) 298 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
355 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); 299 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
356 else if (iwl_tt_is_low_power_state(priv)) { 300 else if (iwl_tt_is_low_power_state(priv)) {
@@ -363,18 +307,15 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
363 iwl_static_sleep_cmd(priv, cmd, 307 iwl_static_sleep_cmd(priv, cmd,
364 priv->power_data.debug_sleep_level_override, 308 priv->power_data.debug_sleep_level_override,
365 dtimper); 309 dtimper);
366 else if (iwlagn_mod_params.no_sleep_autoadjust) { 310 else {
367 if (iwlagn_mod_params.power_level > IWL_POWER_INDEX_1 && 311 if (iwlwifi_mod_params.power_level > IWL_POWER_INDEX_1 &&
368 iwlagn_mod_params.power_level <= IWL_POWER_INDEX_5) 312 iwlwifi_mod_params.power_level <= IWL_POWER_INDEX_5)
369 iwl_static_sleep_cmd(priv, cmd, 313 iwl_static_sleep_cmd(priv, cmd,
370 iwlagn_mod_params.power_level, dtimper); 314 iwlwifi_mod_params.power_level, dtimper);
371 else 315 else
372 iwl_static_sleep_cmd(priv, cmd, 316 iwl_static_sleep_cmd(priv, cmd,
373 IWL_POWER_INDEX_1, dtimper); 317 IWL_POWER_INDEX_1, dtimper);
374 } else 318 }
375 iwl_power_fill_sleep_cmd(priv, cmd,
376 priv->hw->conf.dynamic_ps_timeout,
377 priv->hw->conf.max_sleep_period);
378} 319}
379 320
380int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, 321int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
@@ -403,12 +344,12 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
403 } 344 }
404 345
405 if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK) 346 if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
406 set_bit(STATUS_POWER_PMI, &priv->shrd->status); 347 iwl_dvm_set_pmi(priv, true);
407 348
408 ret = iwl_set_power(priv, cmd); 349 ret = iwl_set_power(priv, cmd);
409 if (!ret) { 350 if (!ret) {
410 if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)) 351 if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
411 clear_bit(STATUS_POWER_PMI, &priv->shrd->status); 352 iwl_dvm_set_pmi(priv, false);
412 353
413 if (update_chains) 354 if (update_chains)
414 iwl_update_chain_flags(priv); 355 iwl_update_chain_flags(priv);
@@ -436,7 +377,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
436/* initialize to default */ 377/* initialize to default */
437void iwl_power_initialize(struct iwl_priv *priv) 378void iwl_power_initialize(struct iwl_priv *priv)
438{ 379{
439 priv->power_data.bus_pm = trans(priv)->pm_support; 380 priv->power_data.bus_pm = priv->trans->pm_support;
440 381
441 priv->power_data.debug_sleep_level_override = -1; 382 priv->power_data.debug_sleep_level_override = -1;
442 383
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index 07a19fce5fdc..21afc92efacb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -30,15 +30,6 @@
30 30
31#include "iwl-commands.h" 31#include "iwl-commands.h"
32 32
33enum iwl_power_level {
34 IWL_POWER_INDEX_1,
35 IWL_POWER_INDEX_2,
36 IWL_POWER_INDEX_3,
37 IWL_POWER_INDEX_4,
38 IWL_POWER_INDEX_5,
39 IWL_POWER_NUM
40};
41
42struct iwl_power_mgr { 33struct iwl_power_mgr {
43 struct iwl_powertable_cmd sleep_cmd; 34 struct iwl_powertable_cmd sleep_cmd;
44 struct iwl_powertable_cmd sleep_cmd_next; 35 struct iwl_powertable_cmd sleep_cmd_next;
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 75dc20bd965b..3b1069290fa9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -223,12 +223,33 @@
223#define SCD_AIT (SCD_BASE + 0x0c) 223#define SCD_AIT (SCD_BASE + 0x0c)
224#define SCD_TXFACT (SCD_BASE + 0x10) 224#define SCD_TXFACT (SCD_BASE + 0x10)
225#define SCD_ACTIVE (SCD_BASE + 0x14) 225#define SCD_ACTIVE (SCD_BASE + 0x14)
226#define SCD_QUEUE_WRPTR(x) (SCD_BASE + 0x18 + (x) * 4)
227#define SCD_QUEUE_RDPTR(x) (SCD_BASE + 0x68 + (x) * 4)
228#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8) 226#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8)
229#define SCD_AGGR_SEL (SCD_BASE + 0x248) 227#define SCD_AGGR_SEL (SCD_BASE + 0x248)
230#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) 228#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108)
231#define SCD_QUEUE_STATUS_BITS(x) (SCD_BASE + 0x10c + (x) * 4) 229
230static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl)
231{
232 if (chnl < 20)
233 return SCD_BASE + 0x18 + chnl * 4;
234 WARN_ON_ONCE(chnl >= 32);
235 return SCD_BASE + 0x284 + (chnl - 20) * 4;
236}
237
238static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl)
239{
240 if (chnl < 20)
241 return SCD_BASE + 0x68 + chnl * 4;
242 WARN_ON_ONCE(chnl >= 32);
243 return SCD_BASE + 0x2B4 + (chnl - 20) * 4;
244}
245
246static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
247{
248 if (chnl < 20)
249 return SCD_BASE + 0x10c + chnl * 4;
250 WARN_ON_ONCE(chnl >= 32);
251 return SCD_BASE + 0x384 + (chnl - 20) * 4;
252}
232 253
233/*********************** END TX SCHEDULER *************************************/ 254/*********************** END TX SCHEDULER *************************************/
234 255
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 902efe4bc898..a8437a6bc18e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -32,7 +32,6 @@
32 32
33#include "iwl-eeprom.h" 33#include "iwl-eeprom.h"
34#include "iwl-dev.h" 34#include "iwl-dev.h"
35#include "iwl-core.h"
36#include "iwl-io.h" 35#include "iwl-io.h"
37#include "iwl-agn.h" 36#include "iwl-agn.h"
38#include "iwl-trans.h" 37#include "iwl-trans.h"
@@ -69,7 +68,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
69 if (!test_bit(STATUS_READY, &priv->status) || 68 if (!test_bit(STATUS_READY, &priv->status) ||
70 !test_bit(STATUS_GEO_CONFIGURED, &priv->status) || 69 !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
71 !test_bit(STATUS_SCAN_HW, &priv->status) || 70 !test_bit(STATUS_SCAN_HW, &priv->status) ||
72 test_bit(STATUS_FW_ERROR, &priv->shrd->status)) 71 test_bit(STATUS_FW_ERROR, &priv->status))
73 return -EIO; 72 return -EIO;
74 73
75 ret = iwl_dvm_send_cmd(priv, &cmd); 74 ret = iwl_dvm_send_cmd(priv, &cmd);
@@ -451,6 +450,46 @@ static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
451 return iwl_limit_dwell(priv, passive); 450 return iwl_limit_dwell(priv, passive);
452} 451}
453 452
453/* Return valid, unused, channel for a passive scan to reset the RF */
454static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
455 enum ieee80211_band band)
456{
457 const struct iwl_channel_info *ch_info;
458 int i;
459 u8 channel = 0;
460 u8 min, max;
461 struct iwl_rxon_context *ctx;
462
463 if (band == IEEE80211_BAND_5GHZ) {
464 min = 14;
465 max = priv->channel_count;
466 } else {
467 min = 0;
468 max = 14;
469 }
470
471 for (i = min; i < max; i++) {
472 bool busy = false;
473
474 for_each_context(priv, ctx) {
475 busy = priv->channel_info[i].channel ==
476 le16_to_cpu(ctx->staging.channel);
477 if (busy)
478 break;
479 }
480
481 if (busy)
482 continue;
483
484 channel = priv->channel_info[i].channel;
485 ch_info = iwl_get_channel_info(priv, band, channel);
486 if (is_channel_valid(ch_info))
487 break;
488 }
489
490 return channel;
491}
492
454static int iwl_get_single_channel_for_scan(struct iwl_priv *priv, 493static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
455 struct ieee80211_vif *vif, 494 struct ieee80211_vif *vif,
456 enum ieee80211_band band, 495 enum ieee80211_band band,
@@ -633,12 +672,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
633 u16 rx_chain = 0; 672 u16 rx_chain = 0;
634 enum ieee80211_band band; 673 enum ieee80211_band band;
635 u8 n_probes = 0; 674 u8 n_probes = 0;
636 u8 rx_ant = hw_params(priv).valid_rx_ant; 675 u8 rx_ant = priv->hw_params.valid_rx_ant;
637 u8 rate; 676 u8 rate;
638 bool is_active = false; 677 bool is_active = false;
639 int chan_mod; 678 int chan_mod;
640 u8 active_chains; 679 u8 active_chains;
641 u8 scan_tx_antennas = hw_params(priv).valid_tx_ant; 680 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
642 int ret; 681 int ret;
643 682
644 lockdep_assert_held(&priv->mutex); 683 lockdep_assert_held(&priv->mutex);
@@ -751,8 +790,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
751 * Internal scans are passive, so we can indiscriminately set 790 * Internal scans are passive, so we can indiscriminately set
752 * the BT ignore flag on 2.4 GHz since it applies to TX only. 791 * the BT ignore flag on 2.4 GHz since it applies to TX only.
753 */ 792 */
754 if (cfg(priv)->bt_params && 793 if (priv->cfg->bt_params &&
755 cfg(priv)->bt_params->advanced_bt_coexist) 794 priv->cfg->bt_params->advanced_bt_coexist)
756 scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT; 795 scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
757 break; 796 break;
758 case IEEE80211_BAND_5GHZ: 797 case IEEE80211_BAND_5GHZ:
@@ -793,12 +832,9 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
793 832
794 band = priv->scan_band; 833 band = priv->scan_band;
795 834
796 if (cfg(priv)->scan_rx_antennas[band])
797 rx_ant = cfg(priv)->scan_rx_antennas[band];
798
799 if (band == IEEE80211_BAND_2GHZ && 835 if (band == IEEE80211_BAND_2GHZ &&
800 cfg(priv)->bt_params && 836 priv->cfg->bt_params &&
801 cfg(priv)->bt_params->advanced_bt_coexist) { 837 priv->cfg->bt_params->advanced_bt_coexist) {
802 /* transmit 2.4 GHz probes only on first antenna */ 838 /* transmit 2.4 GHz probes only on first antenna */
803 scan_tx_antennas = first_antenna(scan_tx_antennas); 839 scan_tx_antennas = first_antenna(scan_tx_antennas);
804 } 840 }
@@ -809,8 +845,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
809 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]); 845 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
810 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags); 846 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
811 847
812 /* In power save mode use one chain, otherwise use all chains */ 848 /*
813 if (test_bit(STATUS_POWER_PMI, &priv->shrd->status)) { 849 * In power save mode while associated use one chain,
850 * otherwise use all chains
851 */
852 if (test_bit(STATUS_POWER_PMI, &priv->status) &&
853 !(priv->hw->conf.flags & IEEE80211_CONF_IDLE)) {
814 /* rx_ant has been set to all valid chains previously */ 854 /* rx_ant has been set to all valid chains previously */
815 active_chains = rx_ant & 855 active_chains = rx_ant &
816 ((u8)(priv->chain_noise_data.active_chains)); 856 ((u8)(priv->chain_noise_data.active_chains));
@@ -822,8 +862,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
822 862
823 rx_ant = first_antenna(active_chains); 863 rx_ant = first_antenna(active_chains);
824 } 864 }
825 if (cfg(priv)->bt_params && 865 if (priv->cfg->bt_params &&
826 cfg(priv)->bt_params->advanced_bt_coexist && 866 priv->cfg->bt_params->advanced_bt_coexist &&
827 priv->bt_full_concurrent) { 867 priv->bt_full_concurrent) {
828 /* operated as 1x1 in full concurrency mode */ 868 /* operated as 1x1 in full concurrency mode */
829 rx_ant = first_antenna(rx_ant); 869 rx_ant = first_antenna(rx_ant);
@@ -831,7 +871,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
831 871
832 /* MIMO is not used here, but value is required */ 872 /* MIMO is not used here, but value is required */
833 rx_chain |= 873 rx_chain |=
834 hw_params(priv).valid_rx_ant << RXON_RX_CHAIN_VALID_POS; 874 priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
835 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; 875 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
836 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; 876 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
837 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; 877 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
@@ -944,7 +984,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
944 984
945void iwl_init_scan_params(struct iwl_priv *priv) 985void iwl_init_scan_params(struct iwl_priv *priv)
946{ 986{
947 u8 ant_idx = fls(hw_params(priv).valid_tx_ant) - 1; 987 u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
948 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ]) 988 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
949 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; 989 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
950 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) 990 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.c b/drivers/net/wireless/iwlwifi/iwl-testmode.c
index 76f7f9251436..060aac3e22f1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.c
@@ -71,7 +71,6 @@
71#include <net/netlink.h> 71#include <net/netlink.h>
72 72
73#include "iwl-dev.h" 73#include "iwl-dev.h"
74#include "iwl-core.h"
75#include "iwl-debug.h" 74#include "iwl-debug.h"
76#include "iwl-io.h" 75#include "iwl-io.h"
77#include "iwl-agn.h" 76#include "iwl-agn.h"
@@ -184,9 +183,10 @@ static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv,
184 "Run out of memory for messages to user space ?\n"); 183 "Run out of memory for messages to user space ?\n");
185 return; 184 return;
186 } 185 }
187 NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT); 186 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
188 /* the length doesn't include len_n_flags field, so add it manually */ 187 /* the length doesn't include len_n_flags field, so add it manually */
189 NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data); 188 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data))
189 goto nla_put_failure;
190 cfg80211_testmode_event(skb, GFP_ATOMIC); 190 cfg80211_testmode_event(skb, GFP_ATOMIC);
191 return; 191 return;
192 192
@@ -218,7 +218,7 @@ static void iwl_trace_cleanup(struct iwl_priv *priv)
218 if (priv->testmode_trace.trace_enabled) { 218 if (priv->testmode_trace.trace_enabled) {
219 if (priv->testmode_trace.cpu_addr && 219 if (priv->testmode_trace.cpu_addr &&
220 priv->testmode_trace.dma_addr) 220 priv->testmode_trace.dma_addr)
221 dma_free_coherent(trans(priv)->dev, 221 dma_free_coherent(priv->trans->dev,
222 priv->testmode_trace.total_size, 222 priv->testmode_trace.total_size,
223 priv->testmode_trace.cpu_addr, 223 priv->testmode_trace.cpu_addr,
224 priv->testmode_trace.dma_addr); 224 priv->testmode_trace.dma_addr);
@@ -314,8 +314,9 @@ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
314 memcpy(reply_buf, &(pkt->hdr), reply_len); 314 memcpy(reply_buf, &(pkt->hdr), reply_len);
315 iwl_free_resp(&cmd); 315 iwl_free_resp(&cmd);
316 316
317 NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT); 317 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
318 NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf); 318 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
319 goto nla_put_failure;
319 return cfg80211_testmode_reply(skb); 320 return cfg80211_testmode_reply(skb);
320 321
321nla_put_failure: 322nla_put_failure:
@@ -371,7 +372,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
371 372
372 switch (cmd) { 373 switch (cmd) {
373 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: 374 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
374 val32 = iwl_read_direct32(trans(priv), ofs); 375 val32 = iwl_read_direct32(priv->trans, ofs);
375 IWL_INFO(priv, "32bit value to read 0x%x\n", val32); 376 IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
376 377
377 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); 378 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
@@ -379,7 +380,8 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
379 IWL_ERR(priv, "Memory allocation fail\n"); 380 IWL_ERR(priv, "Memory allocation fail\n");
380 return -ENOMEM; 381 return -ENOMEM;
381 } 382 }
382 NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32); 383 if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
384 goto nla_put_failure;
383 status = cfg80211_testmode_reply(skb); 385 status = cfg80211_testmode_reply(skb);
384 if (status < 0) 386 if (status < 0)
385 IWL_ERR(priv, "Error sending msg : %d\n", status); 387 IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -391,7 +393,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
391 } else { 393 } else {
392 val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]); 394 val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
393 IWL_INFO(priv, "32bit value to write 0x%x\n", val32); 395 IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
394 iwl_write_direct32(trans(priv), ofs, val32); 396 iwl_write_direct32(priv->trans, ofs, val32);
395 } 397 }
396 break; 398 break;
397 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: 399 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
@@ -401,7 +403,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
401 } else { 403 } else {
402 val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]); 404 val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
403 IWL_INFO(priv, "8bit value to write 0x%x\n", val8); 405 IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
404 iwl_write8(trans(priv), ofs, val8); 406 iwl_write8(priv->trans, ofs, val8);
405 } 407 }
406 break; 408 break;
407 default: 409 default:
@@ -420,10 +422,13 @@ nla_put_failure:
420static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv) 422static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
421{ 423{
422 struct iwl_notification_wait calib_wait; 424 struct iwl_notification_wait calib_wait;
425 static const u8 calib_complete[] = {
426 CALIBRATION_COMPLETE_NOTIFICATION
427 };
423 int ret; 428 int ret;
424 429
425 iwl_init_notification_wait(&priv->notif_wait, &calib_wait, 430 iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
426 CALIBRATION_COMPLETE_NOTIFICATION, 431 calib_complete, ARRAY_SIZE(calib_complete),
427 NULL, NULL); 432 NULL, NULL);
428 ret = iwl_init_alive_start(priv); 433 ret = iwl_init_alive_start(priv);
429 if (ret) { 434 if (ret) {
@@ -461,7 +466,7 @@ cfg_init_calib_error:
461static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) 466static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
462{ 467{
463 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 468 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
464 struct iwl_trans *trans = trans(priv); 469 struct iwl_trans *trans = priv->trans;
465 struct sk_buff *skb; 470 struct sk_buff *skb;
466 unsigned char *rsp_data_ptr = NULL; 471 unsigned char *rsp_data_ptr = NULL;
467 int status = 0, rsp_data_len = 0; 472 int status = 0, rsp_data_len = 0;
@@ -470,18 +475,19 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
470 475
471 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { 476 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
472 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME: 477 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
473 rsp_data_ptr = (unsigned char *)cfg(priv)->name; 478 rsp_data_ptr = (unsigned char *)priv->cfg->name;
474 rsp_data_len = strlen(cfg(priv)->name); 479 rsp_data_len = strlen(priv->cfg->name);
475 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 480 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
476 rsp_data_len + 20); 481 rsp_data_len + 20);
477 if (!skb) { 482 if (!skb) {
478 IWL_ERR(priv, "Memory allocation fail\n"); 483 IWL_ERR(priv, "Memory allocation fail\n");
479 return -ENOMEM; 484 return -ENOMEM;
480 } 485 }
481 NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, 486 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
482 IWL_TM_CMD_DEV2APP_SYNC_RSP); 487 IWL_TM_CMD_DEV2APP_SYNC_RSP) ||
483 NLA_PUT(skb, IWL_TM_ATTR_SYNC_RSP, 488 nla_put(skb, IWL_TM_ATTR_SYNC_RSP,
484 rsp_data_len, rsp_data_ptr); 489 rsp_data_len, rsp_data_ptr))
490 goto nla_put_failure;
485 status = cfg80211_testmode_reply(skb); 491 status = cfg80211_testmode_reply(skb);
486 if (status < 0) 492 if (status < 0)
487 IWL_ERR(priv, "Error sending msg : %d\n", status); 493 IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -529,18 +535,19 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
529 break; 535 break;
530 536
531 case IWL_TM_CMD_APP2DEV_GET_EEPROM: 537 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
532 if (priv->shrd->eeprom) { 538 if (priv->eeprom) {
533 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 539 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
534 cfg(priv)->base_params->eeprom_size + 20); 540 priv->cfg->base_params->eeprom_size + 20);
535 if (!skb) { 541 if (!skb) {
536 IWL_ERR(priv, "Memory allocation fail\n"); 542 IWL_ERR(priv, "Memory allocation fail\n");
537 return -ENOMEM; 543 return -ENOMEM;
538 } 544 }
539 NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, 545 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
540 IWL_TM_CMD_DEV2APP_EEPROM_RSP); 546 IWL_TM_CMD_DEV2APP_EEPROM_RSP) ||
541 NLA_PUT(skb, IWL_TM_ATTR_EEPROM, 547 nla_put(skb, IWL_TM_ATTR_EEPROM,
542 cfg(priv)->base_params->eeprom_size, 548 priv->cfg->base_params->eeprom_size,
543 priv->shrd->eeprom); 549 priv->eeprom))
550 goto nla_put_failure;
544 status = cfg80211_testmode_reply(skb); 551 status = cfg80211_testmode_reply(skb);
545 if (status < 0) 552 if (status < 0)
546 IWL_ERR(priv, "Error sending msg : %d\n", 553 IWL_ERR(priv, "Error sending msg : %d\n",
@@ -566,15 +573,16 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
566 IWL_ERR(priv, "Memory allocation fail\n"); 573 IWL_ERR(priv, "Memory allocation fail\n");
567 return -ENOMEM; 574 return -ENOMEM;
568 } 575 }
569 NLA_PUT_U32(skb, IWL_TM_ATTR_FW_VERSION, 576 if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION,
570 priv->fw->ucode_ver); 577 priv->fw->ucode_ver))
578 goto nla_put_failure;
571 status = cfg80211_testmode_reply(skb); 579 status = cfg80211_testmode_reply(skb);
572 if (status < 0) 580 if (status < 0)
573 IWL_ERR(priv, "Error sending msg : %d\n", status); 581 IWL_ERR(priv, "Error sending msg : %d\n", status);
574 break; 582 break;
575 583
576 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: 584 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
577 devid = trans(priv)->hw_id; 585 devid = priv->trans->hw_id;
578 IWL_INFO(priv, "hw version: 0x%x\n", devid); 586 IWL_INFO(priv, "hw version: 0x%x\n", devid);
579 587
580 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); 588 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
@@ -582,7 +590,8 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
582 IWL_ERR(priv, "Memory allocation fail\n"); 590 IWL_ERR(priv, "Memory allocation fail\n");
583 return -ENOMEM; 591 return -ENOMEM;
584 } 592 }
585 NLA_PUT_U32(skb, IWL_TM_ATTR_DEVICE_ID, devid); 593 if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
594 goto nla_put_failure;
586 status = cfg80211_testmode_reply(skb); 595 status = cfg80211_testmode_reply(skb);
587 if (status < 0) 596 if (status < 0)
588 IWL_ERR(priv, "Error sending msg : %d\n", status); 597 IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -598,13 +607,14 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
598 IWL_ERR(priv, "No uCode has not been loaded\n"); 607 IWL_ERR(priv, "No uCode has not been loaded\n");
599 return -EINVAL; 608 return -EINVAL;
600 } else { 609 } else {
601 img = &priv->fw->img[priv->shrd->ucode_type]; 610 img = &priv->fw->img[priv->cur_ucode];
602 inst_size = img->sec[IWL_UCODE_SECTION_INST].len; 611 inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
603 data_size = img->sec[IWL_UCODE_SECTION_DATA].len; 612 data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
604 } 613 }
605 NLA_PUT_U32(skb, IWL_TM_ATTR_FW_TYPE, priv->shrd->ucode_type); 614 if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) ||
606 NLA_PUT_U32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size); 615 nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
607 NLA_PUT_U32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size); 616 nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
617 goto nla_put_failure;
608 status = cfg80211_testmode_reply(skb); 618 status = cfg80211_testmode_reply(skb);
609 if (status < 0) 619 if (status < 0)
610 IWL_ERR(priv, "Error sending msg : %d\n", status); 620 IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -639,7 +649,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
639 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 649 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
640 struct sk_buff *skb; 650 struct sk_buff *skb;
641 int status = 0; 651 int status = 0;
642 struct device *dev = trans(priv)->dev; 652 struct device *dev = priv->trans->dev;
643 653
644 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { 654 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
645 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE: 655 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
@@ -678,9 +688,10 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
678 iwl_trace_cleanup(priv); 688 iwl_trace_cleanup(priv);
679 return -ENOMEM; 689 return -ENOMEM;
680 } 690 }
681 NLA_PUT(skb, IWL_TM_ATTR_TRACE_ADDR, 691 if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
682 sizeof(priv->testmode_trace.dma_addr), 692 sizeof(priv->testmode_trace.dma_addr),
683 (u64 *)&priv->testmode_trace.dma_addr); 693 (u64 *)&priv->testmode_trace.dma_addr))
694 goto nla_put_failure;
684 status = cfg80211_testmode_reply(skb); 695 status = cfg80211_testmode_reply(skb);
685 if (status < 0) { 696 if (status < 0) {
686 IWL_ERR(priv, "Error sending msg : %d\n", status); 697 IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -725,9 +736,10 @@ static int iwl_testmode_trace_dump(struct ieee80211_hw *hw,
725 length = priv->testmode_trace.buff_size % 736 length = priv->testmode_trace.buff_size %
726 DUMP_CHUNK_SIZE; 737 DUMP_CHUNK_SIZE;
727 738
728 NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length, 739 if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
729 priv->testmode_trace.trace_addr + 740 priv->testmode_trace.trace_addr +
730 (DUMP_CHUNK_SIZE * idx)); 741 (DUMP_CHUNK_SIZE * idx)))
742 goto nla_put_failure;
731 idx++; 743 idx++;
732 cb->args[4] = idx; 744 cb->args[4] = idx;
733 return 0; 745 return 0;
@@ -779,7 +791,7 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
779 791
780static int iwl_testmode_indirect_read(struct iwl_priv *priv, u32 addr, u32 size) 792static int iwl_testmode_indirect_read(struct iwl_priv *priv, u32 addr, u32 size)
781{ 793{
782 struct iwl_trans *trans = trans(priv); 794 struct iwl_trans *trans = priv->trans;
783 unsigned long flags; 795 unsigned long flags;
784 int i; 796 int i;
785 797
@@ -819,7 +831,7 @@ static int iwl_testmode_indirect_read(struct iwl_priv *priv, u32 addr, u32 size)
819static int iwl_testmode_indirect_write(struct iwl_priv *priv, u32 addr, 831static int iwl_testmode_indirect_write(struct iwl_priv *priv, u32 addr,
820 u32 size, unsigned char *buf) 832 u32 size, unsigned char *buf)
821{ 833{
822 struct iwl_trans *trans = trans(priv); 834 struct iwl_trans *trans = priv->trans;
823 u32 val, i; 835 u32 val, i;
824 unsigned long flags; 836 unsigned long flags;
825 837
@@ -922,9 +934,10 @@ static int iwl_testmode_buffer_dump(struct ieee80211_hw *hw,
922 length = priv->testmode_mem.buff_size % 934 length = priv->testmode_mem.buff_size %
923 DUMP_CHUNK_SIZE; 935 DUMP_CHUNK_SIZE;
924 936
925 NLA_PUT(skb, IWL_TM_ATTR_BUFFER_DUMP, length, 937 if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
926 priv->testmode_mem.buff_addr + 938 priv->testmode_mem.buff_addr +
927 (DUMP_CHUNK_SIZE * idx)); 939 (DUMP_CHUNK_SIZE * idx)))
940 goto nla_put_failure;
928 idx++; 941 idx++;
929 cb->args[4] = idx; 942 cb->args[4] = idx;
930 return 0; 943 return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
index 1c2fe87bd7e2..6213c05a4b52 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -34,17 +34,15 @@
34#include <linux/skbuff.h> 34#include <linux/skbuff.h>
35#include <linux/wait.h> 35#include <linux/wait.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <linux/timer.h>
37 38
38#include "iwl-fh.h" 39#include "iwl-fh.h"
39#include "iwl-csr.h" 40#include "iwl-csr.h"
40#include "iwl-shared.h"
41#include "iwl-trans.h" 41#include "iwl-trans.h"
42#include "iwl-debug.h" 42#include "iwl-debug.h"
43#include "iwl-io.h" 43#include "iwl-io.h"
44#include "iwl-op-mode.h" 44#include "iwl-op-mode.h"
45 45
46struct iwl_tx_queue;
47struct iwl_queue;
48struct iwl_host_cmd; 46struct iwl_host_cmd;
49 47
50/*This file includes the declaration that are internal to the 48/*This file includes the declaration that are internal to the
@@ -136,21 +134,14 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd)
136 return --index & (n_bd - 1); 134 return --index & (n_bd - 1);
137} 135}
138 136
139/*
140 * This queue number is required for proper operation
141 * because the ucode will stop/start the scheduler as
142 * required.
143 */
144#define IWL_IPAN_MCAST_QUEUE 8
145
146struct iwl_cmd_meta { 137struct iwl_cmd_meta {
147 /* only for SYNC commands, iff the reply skb is wanted */ 138 /* only for SYNC commands, iff the reply skb is wanted */
148 struct iwl_host_cmd *source; 139 struct iwl_host_cmd *source;
149 140
150 u32 flags;
151
152 DEFINE_DMA_UNMAP_ADDR(mapping); 141 DEFINE_DMA_UNMAP_ADDR(mapping);
153 DEFINE_DMA_UNMAP_LEN(len); 142 DEFINE_DMA_UNMAP_LEN(len);
143
144 u32 flags;
154}; 145};
155 146
156/* 147/*
@@ -188,72 +179,66 @@ struct iwl_queue {
188 * space less than this */ 179 * space less than this */
189}; 180};
190 181
182#define TFD_TX_CMD_SLOTS 256
183#define TFD_CMD_SLOTS 32
184
185struct iwl_pcie_tx_queue_entry {
186 struct iwl_device_cmd *cmd;
187 struct sk_buff *skb;
188 struct iwl_cmd_meta meta;
189};
190
191/** 191/**
192 * struct iwl_tx_queue - Tx Queue for DMA 192 * struct iwl_tx_queue - Tx Queue for DMA
193 * @q: generic Rx/Tx queue descriptor 193 * @q: generic Rx/Tx queue descriptor
194 * @bd: base of circular buffer of TFDs 194 * @tfds: transmit frame descriptors (DMA memory)
195 * @cmd: array of command/TX buffer pointers 195 * @entries: transmit entries (driver state)
196 * @meta: array of meta data for each command/tx buffer 196 * @lock: queue lock
197 * @dma_addr_cmd: physical address of cmd/tx buffer array 197 * @stuck_timer: timer that fires if queue gets stuck
198 * @txb: array of per-TFD driver data 198 * @trans_pcie: pointer back to transport (for timer)
199 * lock: queue lock
200 * @time_stamp: time (in jiffies) of last read_ptr change
201 * @need_update: indicates need to update read/write index 199 * @need_update: indicates need to update read/write index
202 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled 200 * @active: stores if queue is active
203 * @sta_id: valid if sched_retry is set
204 * @tid: valid if sched_retry is set
205 * 201 *
206 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 202 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
207 * descriptors) and required locking structures. 203 * descriptors) and required locking structures.
208 */ 204 */
209#define TFD_TX_CMD_SLOTS 256
210#define TFD_CMD_SLOTS 32
211
212struct iwl_tx_queue { 205struct iwl_tx_queue {
213 struct iwl_queue q; 206 struct iwl_queue q;
214 struct iwl_tfd *tfds; 207 struct iwl_tfd *tfds;
215 struct iwl_device_cmd **cmd; 208 struct iwl_pcie_tx_queue_entry *entries;
216 struct iwl_cmd_meta *meta;
217 struct sk_buff **skbs;
218 spinlock_t lock; 209 spinlock_t lock;
219 unsigned long time_stamp; 210 struct timer_list stuck_timer;
211 struct iwl_trans_pcie *trans_pcie;
220 u8 need_update; 212 u8 need_update;
221 u8 sched_retry;
222 u8 active; 213 u8 active;
223 u8 swq_id;
224
225 u16 sta_id;
226 u16 tid;
227}; 214};
228 215
229/** 216/**
230 * struct iwl_trans_pcie - PCIe transport specific data 217 * struct iwl_trans_pcie - PCIe transport specific data
231 * @rxq: all the RX queue data 218 * @rxq: all the RX queue data
232 * @rx_replenish: work that will be called when buffers need to be allocated 219 * @rx_replenish: work that will be called when buffers need to be allocated
220 * @drv - pointer to iwl_drv
233 * @trans: pointer to the generic transport area 221 * @trans: pointer to the generic transport area
234 * @irq - the irq number for the device 222 * @irq - the irq number for the device
235 * @irq_requested: true when the irq has been requested 223 * @irq_requested: true when the irq has been requested
236 * @scd_base_addr: scheduler sram base address in SRAM 224 * @scd_base_addr: scheduler sram base address in SRAM
237 * @scd_bc_tbls: pointer to the byte count table of the scheduler 225 * @scd_bc_tbls: pointer to the byte count table of the scheduler
238 * @kw: keep warm address 226 * @kw: keep warm address
239 * @ac_to_fifo: to what fifo is a specifc AC mapped ?
240 * @ac_to_queue: to what tx queue is a specifc AC mapped ?
241 * @mcast_queue:
242 * @txq: Tx DMA processing queues
243 * @txq_ctx_active_msk: what queue is active
244 * queue_stopped: tracks what queue is stopped
245 * queue_stop_count: tracks what SW queue is stopped
246 * @pci_dev: basic pci-network driver stuff 227 * @pci_dev: basic pci-network driver stuff
247 * @hw_base: pci hardware address support 228 * @hw_base: pci hardware address support
248 * @ucode_write_complete: indicates that the ucode has been copied. 229 * @ucode_write_complete: indicates that the ucode has been copied.
249 * @ucode_write_waitq: wait queue for uCode load 230 * @ucode_write_waitq: wait queue for uCode load
250 * @status - transport specific status flags 231 * @status - transport specific status flags
251 * @cmd_queue - command queue number 232 * @cmd_queue - command queue number
233 * @rx_buf_size_8k: 8 kB RX buffer size
234 * @rx_page_order: page order for receive buffer size
235 * @wd_timeout: queue watchdog timeout (jiffies)
252 */ 236 */
253struct iwl_trans_pcie { 237struct iwl_trans_pcie {
254 struct iwl_rx_queue rxq; 238 struct iwl_rx_queue rxq;
255 struct work_struct rx_replenish; 239 struct work_struct rx_replenish;
256 struct iwl_trans *trans; 240 struct iwl_trans *trans;
241 struct iwl_drv *drv;
257 242
258 /* INT ICT Table */ 243 /* INT ICT Table */
259 __le32 *ict_tbl; 244 __le32 *ict_tbl;
@@ -272,16 +257,9 @@ struct iwl_trans_pcie {
272 struct iwl_dma_ptr scd_bc_tbls; 257 struct iwl_dma_ptr scd_bc_tbls;
273 struct iwl_dma_ptr kw; 258 struct iwl_dma_ptr kw;
274 259
275 const u8 *ac_to_fifo[NUM_IWL_RXON_CTX];
276 const u8 *ac_to_queue[NUM_IWL_RXON_CTX];
277 u8 mcast_queue[NUM_IWL_RXON_CTX];
278 u8 agg_txq[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
279
280 struct iwl_tx_queue *txq; 260 struct iwl_tx_queue *txq;
281 unsigned long txq_ctx_active_msk; 261 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
282#define IWL_MAX_HW_QUEUES 32
283 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; 262 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
284 atomic_t queue_stop_count[4];
285 263
286 /* PCI bus related data */ 264 /* PCI bus related data */
287 struct pci_dev *pci_dev; 265 struct pci_dev *pci_dev;
@@ -293,11 +271,41 @@ struct iwl_trans_pcie {
293 u8 cmd_queue; 271 u8 cmd_queue;
294 u8 n_no_reclaim_cmds; 272 u8 n_no_reclaim_cmds;
295 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; 273 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
274 u8 setup_q_to_fifo[IWL_MAX_HW_QUEUES];
275 u8 n_q_to_fifo;
276
277 bool rx_buf_size_8k;
278 u32 rx_page_order;
279
280 const char **command_names;
281
282 /* queue watchdog */
283 unsigned long wd_timeout;
296}; 284};
297 285
286/*****************************************************
287* DRIVER STATUS FUNCTIONS
288******************************************************/
289#define STATUS_HCMD_ACTIVE 0
290#define STATUS_DEVICE_ENABLED 1
291#define STATUS_TPOWER_PMI 2
292#define STATUS_INT_ENABLED 3
293
298#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \ 294#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
299 ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific)) 295 ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
300 296
297static inline struct iwl_trans *
298iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
299{
300 return container_of((void *)trans_pcie, struct iwl_trans,
301 trans_specific);
302}
303
304struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
305 const struct pci_device_id *ent,
306 const struct iwl_cfg *cfg);
307void iwl_trans_pcie_free(struct iwl_trans *trans);
308
301/***************************************************** 309/*****************************************************
302* RX 310* RX
303******************************************************/ 311******************************************************/
@@ -331,15 +339,12 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans,
331void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 339void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
332 struct iwl_tx_queue *txq, 340 struct iwl_tx_queue *txq,
333 u16 byte_cnt); 341 u16 byte_cnt);
334int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, 342void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int queue);
335 int sta_id, int tid);
336void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index); 343void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
337void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, 344void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
338 struct iwl_tx_queue *txq, 345 struct iwl_tx_queue *txq,
339 int tx_fifo_id, int scd_retry); 346 int tx_fifo_id, bool active);
340int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, int sta_id, int tid); 347void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo,
341void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
342 enum iwl_rxon_context_id ctx,
343 int sta_id, int tid, int frame_limit, u16 ssn); 348 int sta_id, int tid, int frame_limit, u16 ssn);
344void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, 349void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
345 int index, enum dma_data_direction dma_dir); 350 int index, enum dma_data_direction dma_dir);
@@ -350,8 +355,6 @@ int iwl_queue_space(const struct iwl_queue *q);
350/***************************************************** 355/*****************************************************
351* Error handling 356* Error handling
352******************************************************/ 357******************************************************/
353int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
354 char **buf, bool display);
355int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display); 358int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
356void iwl_dump_csr(struct iwl_trans *trans); 359void iwl_dump_csr(struct iwl_trans *trans);
357 360
@@ -388,91 +391,28 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
388 iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL); 391 iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
389} 392}
390 393
391/*
392 * we have 8 bits used like this:
393 *
394 * 7 6 5 4 3 2 1 0
395 * | | | | | | | |
396 * | | | | | | +-+-------- AC queue (0-3)
397 * | | | | | |
398 * | +-+-+-+-+------------ HW queue ID
399 * |
400 * +---------------------- unused
401 */
402static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
403{
404 BUG_ON(ac > 3); /* only have 2 bits */
405 BUG_ON(hwq > 31); /* only use 5 bits */
406
407 txq->swq_id = (hwq << 2) | ac;
408}
409
410static inline u8 iwl_get_queue_ac(struct iwl_tx_queue *txq)
411{
412 return txq->swq_id & 0x3;
413}
414
415static inline void iwl_wake_queue(struct iwl_trans *trans, 394static inline void iwl_wake_queue(struct iwl_trans *trans,
416 struct iwl_tx_queue *txq) 395 struct iwl_tx_queue *txq)
417{ 396{
418 u8 queue = txq->swq_id; 397 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
419 u8 ac = queue & 3; 398
420 u8 hwq = (queue >> 2) & 0x1f; 399 if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
421 struct iwl_trans_pcie *trans_pcie = 400 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
422 IWL_TRANS_GET_PCIE_TRANS(trans); 401 iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
423
424 if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) {
425 if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) {
426 iwl_op_mode_queue_not_full(trans->op_mode, ac);
427 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d",
428 hwq, ac);
429 } else {
430 IWL_DEBUG_TX_QUEUES(trans,
431 "Don't wake hwq %d ac %d stop count %d",
432 hwq, ac,
433 atomic_read(&trans_pcie->queue_stop_count[ac]));
434 }
435 } 402 }
436} 403}
437 404
438static inline void iwl_stop_queue(struct iwl_trans *trans, 405static inline void iwl_stop_queue(struct iwl_trans *trans,
439 struct iwl_tx_queue *txq) 406 struct iwl_tx_queue *txq)
440{ 407{
441 u8 queue = txq->swq_id; 408 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
442 u8 ac = queue & 3;
443 u8 hwq = (queue >> 2) & 0x1f;
444 struct iwl_trans_pcie *trans_pcie =
445 IWL_TRANS_GET_PCIE_TRANS(trans);
446
447 if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) {
448 if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) {
449 iwl_op_mode_queue_full(trans->op_mode, ac);
450 IWL_DEBUG_TX_QUEUES(trans,
451 "Stop hwq %d ac %d stop count %d",
452 hwq, ac,
453 atomic_read(&trans_pcie->queue_stop_count[ac]));
454 } else {
455 IWL_DEBUG_TX_QUEUES(trans,
456 "Don't stop hwq %d ac %d stop count %d",
457 hwq, ac,
458 atomic_read(&trans_pcie->queue_stop_count[ac]));
459 }
460 } else {
461 IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped",
462 hwq);
463 }
464}
465
466static inline void iwl_txq_ctx_activate(struct iwl_trans_pcie *trans_pcie,
467 int txq_id)
468{
469 set_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
470}
471 409
472static inline void iwl_txq_ctx_deactivate(struct iwl_trans_pcie *trans_pcie, 410 if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
473 int txq_id) 411 iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
474{ 412 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
475 clear_bit(txq_id, &trans_pcie->txq_ctx_active_msk); 413 } else
414 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
415 txq->q.id);
476} 416}
477 417
478static inline int iwl_queue_used(const struct iwl_queue *q, int i) 418static inline int iwl_queue_used(const struct iwl_queue *q, int i)
@@ -487,19 +427,18 @@ static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
487 return index & (q->n_window - 1); 427 return index & (q->n_window - 1);
488} 428}
489 429
490#define IWL_TX_FIFO_BK 0 /* shared */ 430static inline const char *
491#define IWL_TX_FIFO_BE 1 431trans_pcie_get_cmd_string(struct iwl_trans_pcie *trans_pcie, u8 cmd)
492#define IWL_TX_FIFO_VI 2 /* shared */ 432{
493#define IWL_TX_FIFO_VO 3 433 if (!trans_pcie->command_names || !trans_pcie->command_names[cmd])
494#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK 434 return "UNKNOWN";
495#define IWL_TX_FIFO_BE_IPAN 4 435 return trans_pcie->command_names[cmd];
496#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI 436}
497#define IWL_TX_FIFO_VO_IPAN 5 437
498/* re-uses the VO FIFO, uCode will properly flush/schedule */ 438static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
499#define IWL_TX_FIFO_AUX 5 439{
500#define IWL_TX_FIFO_UNUSED -1 440 return !(iwl_read32(trans, CSR_GP_CNTRL) &
501 441 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
502/* AUX (TX during scan dwell) queue */ 442}
503#define IWL_AUX_QUEUE 10
504 443
505#endif /* __iwl_trans_int_pcie_h__ */ 444#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
index 8b1a7988e176..08517d3c80bb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
@@ -140,14 +140,17 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
140 if (q->need_update == 0) 140 if (q->need_update == 0)
141 goto exit_unlock; 141 goto exit_unlock;
142 142
143 if (cfg(trans)->base_params->shadow_reg_enable) { 143 if (trans->cfg->base_params->shadow_reg_enable) {
144 /* shadow register enabled */ 144 /* shadow register enabled */
145 /* Device expects a multiple of 8 */ 145 /* Device expects a multiple of 8 */
146 q->write_actual = (q->write & ~0x7); 146 q->write_actual = (q->write & ~0x7);
147 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual); 147 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
148 } else { 148 } else {
149 struct iwl_trans_pcie *trans_pcie =
150 IWL_TRANS_GET_PCIE_TRANS(trans);
151
149 /* If power-saving is in use, make sure device is awake */ 152 /* If power-saving is in use, make sure device is awake */
150 if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) { 153 if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
151 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 154 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
152 155
153 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 156 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
@@ -271,17 +274,17 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
271 if (rxq->free_count > RX_LOW_WATERMARK) 274 if (rxq->free_count > RX_LOW_WATERMARK)
272 gfp_mask |= __GFP_NOWARN; 275 gfp_mask |= __GFP_NOWARN;
273 276
274 if (hw_params(trans).rx_page_order > 0) 277 if (trans_pcie->rx_page_order > 0)
275 gfp_mask |= __GFP_COMP; 278 gfp_mask |= __GFP_COMP;
276 279
277 /* Alloc a new receive buffer */ 280 /* Alloc a new receive buffer */
278 page = alloc_pages(gfp_mask, 281 page = alloc_pages(gfp_mask,
279 hw_params(trans).rx_page_order); 282 trans_pcie->rx_page_order);
280 if (!page) { 283 if (!page) {
281 if (net_ratelimit()) 284 if (net_ratelimit())
282 IWL_DEBUG_INFO(trans, "alloc_pages failed, " 285 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
283 "order: %d\n", 286 "order: %d\n",
284 hw_params(trans).rx_page_order); 287 trans_pcie->rx_page_order);
285 288
286 if ((rxq->free_count <= RX_LOW_WATERMARK) && 289 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
287 net_ratelimit()) 290 net_ratelimit())
@@ -300,7 +303,7 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
300 303
301 if (list_empty(&rxq->rx_used)) { 304 if (list_empty(&rxq->rx_used)) {
302 spin_unlock_irqrestore(&rxq->lock, flags); 305 spin_unlock_irqrestore(&rxq->lock, flags);
303 __free_pages(page, hw_params(trans).rx_page_order); 306 __free_pages(page, trans_pcie->rx_page_order);
304 return; 307 return;
305 } 308 }
306 element = rxq->rx_used.next; 309 element = rxq->rx_used.next;
@@ -313,7 +316,7 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
313 rxb->page = page; 316 rxb->page = page;
314 /* Get physical address of the RB */ 317 /* Get physical address of the RB */
315 rxb->page_dma = dma_map_page(trans->dev, page, 0, 318 rxb->page_dma = dma_map_page(trans->dev, page, 0,
316 PAGE_SIZE << hw_params(trans).rx_page_order, 319 PAGE_SIZE << trans_pcie->rx_page_order,
317 DMA_FROM_DEVICE); 320 DMA_FROM_DEVICE);
318 /* dma address must be no more than 36 bits */ 321 /* dma address must be no more than 36 bits */
319 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); 322 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
@@ -362,83 +365,98 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
362 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 365 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
363 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 366 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
364 struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 367 struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
365 struct iwl_device_cmd *cmd;
366 unsigned long flags; 368 unsigned long flags;
367 int len, err; 369 bool page_stolen = false;
368 u16 sequence; 370 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
369 struct iwl_rx_cmd_buffer rxcb; 371 u32 offset = 0;
370 struct iwl_rx_packet *pkt;
371 bool reclaim;
372 int index, cmd_index;
373 372
374 if (WARN_ON(!rxb)) 373 if (WARN_ON(!rxb))
375 return; 374 return;
376 375
377 dma_unmap_page(trans->dev, rxb->page_dma, 376 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
378 PAGE_SIZE << hw_params(trans).rx_page_order,
379 DMA_FROM_DEVICE);
380
381 rxcb._page = rxb->page;
382 pkt = rxb_addr(&rxcb);
383 377
384 IWL_DEBUG_RX(trans, "%s, 0x%02x\n", 378 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
385 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 379 struct iwl_rx_packet *pkt;
380 struct iwl_device_cmd *cmd;
381 u16 sequence;
382 bool reclaim;
383 int index, cmd_index, err, len;
384 struct iwl_rx_cmd_buffer rxcb = {
385 ._offset = offset,
386 ._page = rxb->page,
387 ._page_stolen = false,
388 .truesize = max_len,
389 };
386 390
391 pkt = rxb_addr(&rxcb);
387 392
388 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 393 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
389 len += sizeof(u32); /* account for status word */ 394 break;
390 trace_iwlwifi_dev_rx(trans->dev, pkt, len);
391
392 /* Reclaim a command buffer only if this packet is a response
393 * to a (driver-originated) command.
394 * If the packet (e.g. Rx frame) originated from uCode,
395 * there is no command buffer to reclaim.
396 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
397 * but apparently a few don't get set; catch them here. */
398 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
399 if (reclaim) {
400 int i;
401 395
402 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { 396 IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
403 if (trans_pcie->no_reclaim_cmds[i] == pkt->hdr.cmd) { 397 rxcb._offset,
404 reclaim = false; 398 trans_pcie_get_cmd_string(trans_pcie, pkt->hdr.cmd),
405 break; 399 pkt->hdr.cmd);
400
401 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
402 len += sizeof(u32); /* account for status word */
403 trace_iwlwifi_dev_rx(trans->dev, pkt, len);
404
405 /* Reclaim a command buffer only if this packet is a response
406 * to a (driver-originated) command.
407 * If the packet (e.g. Rx frame) originated from uCode,
408 * there is no command buffer to reclaim.
409 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
410 * but apparently a few don't get set; catch them here. */
411 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
412 if (reclaim) {
413 int i;
414
415 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
416 if (trans_pcie->no_reclaim_cmds[i] ==
417 pkt->hdr.cmd) {
418 reclaim = false;
419 break;
420 }
406 } 421 }
407 } 422 }
408 }
409 423
410 sequence = le16_to_cpu(pkt->hdr.sequence); 424 sequence = le16_to_cpu(pkt->hdr.sequence);
411 index = SEQ_TO_INDEX(sequence); 425 index = SEQ_TO_INDEX(sequence);
412 cmd_index = get_cmd_index(&txq->q, index); 426 cmd_index = get_cmd_index(&txq->q, index);
413 427
414 if (reclaim) 428 if (reclaim)
415 cmd = txq->cmd[cmd_index]; 429 cmd = txq->entries[cmd_index].cmd;
416 else 430 else
417 cmd = NULL; 431 cmd = NULL;
418 432
419 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); 433 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
420 434
421 /* 435 /*
422 * XXX: After here, we should always check rxcb._page 436 * After here, we should always check rxcb._page_stolen,
423 * against NULL before touching it or its virtual 437 * if it is true then one of the handlers took the page.
424 * memory (pkt). Because some rx_handler might have 438 */
425 * already taken or freed the pages.
426 */
427 439
428 if (reclaim) { 440 if (reclaim) {
429 /* Invoke any callbacks, transfer the buffer to caller, 441 /* Invoke any callbacks, transfer the buffer to caller,
430 * and fire off the (possibly) blocking 442 * and fire off the (possibly) blocking
431 * iwl_trans_send_cmd() 443 * iwl_trans_send_cmd()
432 * as we reclaim the driver command queue */ 444 * as we reclaim the driver command queue */
433 if (rxcb._page) 445 if (!rxcb._page_stolen)
434 iwl_tx_cmd_complete(trans, &rxcb, err); 446 iwl_tx_cmd_complete(trans, &rxcb, err);
435 else 447 else
436 IWL_WARN(trans, "Claim null rxb?\n"); 448 IWL_WARN(trans, "Claim null rxb?\n");
449 }
450
451 page_stolen |= rxcb._page_stolen;
452 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
437 } 453 }
438 454
439 /* page was stolen from us */ 455 /* page was stolen from us -- free our reference */
440 if (rxcb._page == NULL) 456 if (page_stolen) {
457 __free_pages(rxb->page, trans_pcie->rx_page_order);
441 rxb->page = NULL; 458 rxb->page = NULL;
459 }
442 460
443 /* Reuse the page if possible. For notification packets and 461 /* Reuse the page if possible. For notification packets and
444 * SKBs that fail to Rx correctly, add them back into the 462 * SKBs that fail to Rx correctly, add them back into the
@@ -447,7 +465,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
447 if (rxb->page != NULL) { 465 if (rxb->page != NULL) {
448 rxb->page_dma = 466 rxb->page_dma =
449 dma_map_page(trans->dev, rxb->page, 0, 467 dma_map_page(trans->dev, rxb->page, 0,
450 PAGE_SIZE << hw_params(trans).rx_page_order, 468 PAGE_SIZE << trans_pcie->rx_page_order,
451 DMA_FROM_DEVICE); 469 DMA_FROM_DEVICE);
452 list_add_tail(&rxb->list, &rxq->rx_free); 470 list_add_tail(&rxb->list, &rxq->rx_free);
453 rxq->free_count++; 471 rxq->free_count++;
@@ -520,412 +538,32 @@ static void iwl_rx_handle(struct iwl_trans *trans)
520 iwlagn_rx_queue_restock(trans); 538 iwlagn_rx_queue_restock(trans);
521} 539}
522 540
523static const char * const desc_lookup_text[] = {
524 "OK",
525 "FAIL",
526 "BAD_PARAM",
527 "BAD_CHECKSUM",
528 "NMI_INTERRUPT_WDG",
529 "SYSASSERT",
530 "FATAL_ERROR",
531 "BAD_COMMAND",
532 "HW_ERROR_TUNE_LOCK",
533 "HW_ERROR_TEMPERATURE",
534 "ILLEGAL_CHAN_FREQ",
535 "VCC_NOT_STABLE",
536 "FH_ERROR",
537 "NMI_INTERRUPT_HOST",
538 "NMI_INTERRUPT_ACTION_PT",
539 "NMI_INTERRUPT_UNKNOWN",
540 "UCODE_VERSION_MISMATCH",
541 "HW_ERROR_ABS_LOCK",
542 "HW_ERROR_CAL_LOCK_FAIL",
543 "NMI_INTERRUPT_INST_ACTION_PT",
544 "NMI_INTERRUPT_DATA_ACTION_PT",
545 "NMI_TRM_HW_ER",
546 "NMI_INTERRUPT_TRM",
547 "NMI_INTERRUPT_BREAK_POINT",
548 "DEBUG_0",
549 "DEBUG_1",
550 "DEBUG_2",
551 "DEBUG_3",
552};
553
554static struct { char *name; u8 num; } advanced_lookup[] = {
555 { "NMI_INTERRUPT_WDG", 0x34 },
556 { "SYSASSERT", 0x35 },
557 { "UCODE_VERSION_MISMATCH", 0x37 },
558 { "BAD_COMMAND", 0x38 },
559 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
560 { "FATAL_ERROR", 0x3D },
561 { "NMI_TRM_HW_ERR", 0x46 },
562 { "NMI_INTERRUPT_TRM", 0x4C },
563 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
564 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
565 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
566 { "NMI_INTERRUPT_HOST", 0x66 },
567 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
568 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
569 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
570 { "ADVANCED_SYSASSERT", 0 },
571};
572
573static const char *desc_lookup(u32 num)
574{
575 int i;
576 int max = ARRAY_SIZE(desc_lookup_text);
577
578 if (num < max)
579 return desc_lookup_text[num];
580
581 max = ARRAY_SIZE(advanced_lookup) - 1;
582 for (i = 0; i < max; i++) {
583 if (advanced_lookup[i].num == num)
584 break;
585 }
586 return advanced_lookup[i].name;
587}
588
589#define ERROR_START_OFFSET (1 * sizeof(u32))
590#define ERROR_ELEM_SIZE (7 * sizeof(u32))
591
592static void iwl_dump_nic_error_log(struct iwl_trans *trans)
593{
594 u32 base;
595 struct iwl_error_event_table table;
596 struct iwl_trans_pcie *trans_pcie =
597 IWL_TRANS_GET_PCIE_TRANS(trans);
598
599 base = trans->shrd->device_pointers.error_event_table;
600 if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
601 if (!base)
602 base = trans->shrd->fw->init_errlog_ptr;
603 } else {
604 if (!base)
605 base = trans->shrd->fw->inst_errlog_ptr;
606 }
607
608 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
609 IWL_ERR(trans,
610 "Not valid error log pointer 0x%08X for %s uCode\n",
611 base,
612 (trans->shrd->ucode_type == IWL_UCODE_INIT)
613 ? "Init" : "RT");
614 return;
615 }
616
617 iwl_read_targ_mem_words(trans, base, &table, sizeof(table));
618
619 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
620 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
621 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
622 trans->shrd->status, table.valid);
623 }
624
625 trans_pcie->isr_stats.err_code = table.error_id;
626
627 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
628 table.data1, table.data2, table.line,
629 table.blink1, table.blink2, table.ilink1,
630 table.ilink2, table.bcon_time, table.gp1,
631 table.gp2, table.gp3, table.ucode_ver,
632 table.hw_ver, table.brd_ver);
633 IWL_ERR(trans, "0x%08X | %-28s\n", table.error_id,
634 desc_lookup(table.error_id));
635 IWL_ERR(trans, "0x%08X | uPc\n", table.pc);
636 IWL_ERR(trans, "0x%08X | branchlink1\n", table.blink1);
637 IWL_ERR(trans, "0x%08X | branchlink2\n", table.blink2);
638 IWL_ERR(trans, "0x%08X | interruptlink1\n", table.ilink1);
639 IWL_ERR(trans, "0x%08X | interruptlink2\n", table.ilink2);
640 IWL_ERR(trans, "0x%08X | data1\n", table.data1);
641 IWL_ERR(trans, "0x%08X | data2\n", table.data2);
642 IWL_ERR(trans, "0x%08X | line\n", table.line);
643 IWL_ERR(trans, "0x%08X | beacon time\n", table.bcon_time);
644 IWL_ERR(trans, "0x%08X | tsf low\n", table.tsf_low);
645 IWL_ERR(trans, "0x%08X | tsf hi\n", table.tsf_hi);
646 IWL_ERR(trans, "0x%08X | time gp1\n", table.gp1);
647 IWL_ERR(trans, "0x%08X | time gp2\n", table.gp2);
648 IWL_ERR(trans, "0x%08X | time gp3\n", table.gp3);
649 IWL_ERR(trans, "0x%08X | uCode version\n", table.ucode_ver);
650 IWL_ERR(trans, "0x%08X | hw version\n", table.hw_ver);
651 IWL_ERR(trans, "0x%08X | board version\n", table.brd_ver);
652 IWL_ERR(trans, "0x%08X | hcmd\n", table.hcmd);
653
654 IWL_ERR(trans, "0x%08X | isr0\n", table.isr0);
655 IWL_ERR(trans, "0x%08X | isr1\n", table.isr1);
656 IWL_ERR(trans, "0x%08X | isr2\n", table.isr2);
657 IWL_ERR(trans, "0x%08X | isr3\n", table.isr3);
658 IWL_ERR(trans, "0x%08X | isr4\n", table.isr4);
659 IWL_ERR(trans, "0x%08X | isr_pref\n", table.isr_pref);
660 IWL_ERR(trans, "0x%08X | wait_event\n", table.wait_event);
661 IWL_ERR(trans, "0x%08X | l2p_control\n", table.l2p_control);
662 IWL_ERR(trans, "0x%08X | l2p_duration\n", table.l2p_duration);
663 IWL_ERR(trans, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
664 IWL_ERR(trans, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
665 IWL_ERR(trans, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
666 IWL_ERR(trans, "0x%08X | timestamp\n", table.u_timestamp);
667 IWL_ERR(trans, "0x%08X | flow_handler\n", table.flow_handler);
668}
669
670/** 541/**
671 * iwl_irq_handle_error - called for HW or SW error interrupt from card 542 * iwl_irq_handle_error - called for HW or SW error interrupt from card
672 */ 543 */
673static void iwl_irq_handle_error(struct iwl_trans *trans) 544static void iwl_irq_handle_error(struct iwl_trans *trans)
674{ 545{
675 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ 546 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
676 if (cfg(trans)->internal_wimax_coex && 547 if (trans->cfg->internal_wimax_coex &&
677 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & 548 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
678 APMS_CLK_VAL_MRB_FUNC_MODE) || 549 APMS_CLK_VAL_MRB_FUNC_MODE) ||
679 (iwl_read_prph(trans, APMG_PS_CTRL_REG) & 550 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
680 APMG_PS_CTRL_VAL_RESET_REQ))) { 551 APMG_PS_CTRL_VAL_RESET_REQ))) {
681 /* 552 struct iwl_trans_pcie *trans_pcie;
682 * Keep the restart process from trying to send host 553
683 * commands by clearing the ready bit. 554 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
684 */ 555 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
685 clear_bit(STATUS_READY, &trans->shrd->status); 556 iwl_op_mode_wimax_active(trans->op_mode);
686 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
687 wake_up(&trans->wait_command_queue); 557 wake_up(&trans->wait_command_queue);
688 IWL_ERR(trans, "RF is used by WiMAX\n");
689 return; 558 return;
690 } 559 }
691 560
692 IWL_ERR(trans, "Loaded firmware version: %s\n",
693 trans->shrd->fw->fw_version);
694
695 iwl_dump_nic_error_log(trans);
696 iwl_dump_csr(trans); 561 iwl_dump_csr(trans);
697 iwl_dump_fh(trans, NULL, false); 562 iwl_dump_fh(trans, NULL, false);
698 iwl_dump_nic_event_log(trans, false, NULL, false);
699 563
700 iwl_op_mode_nic_error(trans->op_mode); 564 iwl_op_mode_nic_error(trans->op_mode);
701} 565}
702 566
703#define EVENT_START_OFFSET (4 * sizeof(u32))
704
705/**
706 * iwl_print_event_log - Dump error event log to syslog
707 *
708 */
709static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
710 u32 num_events, u32 mode,
711 int pos, char **buf, size_t bufsz)
712{
713 u32 i;
714 u32 base; /* SRAM byte address of event log header */
715 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
716 u32 ptr; /* SRAM byte address of log data */
717 u32 ev, time, data; /* event log data */
718 unsigned long reg_flags;
719
720 if (num_events == 0)
721 return pos;
722
723 base = trans->shrd->device_pointers.log_event_table;
724 if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
725 if (!base)
726 base = trans->shrd->fw->init_evtlog_ptr;
727 } else {
728 if (!base)
729 base = trans->shrd->fw->inst_evtlog_ptr;
730 }
731
732 if (mode == 0)
733 event_size = 2 * sizeof(u32);
734 else
735 event_size = 3 * sizeof(u32);
736
737 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
738
739 /* Make sure device is powered up for SRAM reads */
740 spin_lock_irqsave(&trans->reg_lock, reg_flags);
741 if (unlikely(!iwl_grab_nic_access(trans)))
742 goto out_unlock;
743
744 /* Set starting address; reads will auto-increment */
745 iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr);
746
747 /* "time" is actually "data" for mode 0 (no timestamp).
748 * place event id # at far right for easier visual parsing. */
749 for (i = 0; i < num_events; i++) {
750 ev = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
751 time = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
752 if (mode == 0) {
753 /* data, ev */
754 if (bufsz) {
755 pos += scnprintf(*buf + pos, bufsz - pos,
756 "EVT_LOG:0x%08x:%04u\n",
757 time, ev);
758 } else {
759 trace_iwlwifi_dev_ucode_event(trans->dev, 0,
760 time, ev);
761 IWL_ERR(trans, "EVT_LOG:0x%08x:%04u\n",
762 time, ev);
763 }
764 } else {
765 data = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
766 if (bufsz) {
767 pos += scnprintf(*buf + pos, bufsz - pos,
768 "EVT_LOGT:%010u:0x%08x:%04u\n",
769 time, data, ev);
770 } else {
771 IWL_ERR(trans, "EVT_LOGT:%010u:0x%08x:%04u\n",
772 time, data, ev);
773 trace_iwlwifi_dev_ucode_event(trans->dev, time,
774 data, ev);
775 }
776 }
777 }
778
779 /* Allow device to power down */
780 iwl_release_nic_access(trans);
781out_unlock:
782 spin_unlock_irqrestore(&trans->reg_lock, reg_flags);
783 return pos;
784}
785
786/**
787 * iwl_print_last_event_logs - Dump the newest # of event log to syslog
788 */
789static int iwl_print_last_event_logs(struct iwl_trans *trans, u32 capacity,
790 u32 num_wraps, u32 next_entry,
791 u32 size, u32 mode,
792 int pos, char **buf, size_t bufsz)
793{
794 /*
795 * display the newest DEFAULT_LOG_ENTRIES entries
796 * i.e the entries just before the next ont that uCode would fill.
797 */
798 if (num_wraps) {
799 if (next_entry < size) {
800 pos = iwl_print_event_log(trans,
801 capacity - (size - next_entry),
802 size - next_entry, mode,
803 pos, buf, bufsz);
804 pos = iwl_print_event_log(trans, 0,
805 next_entry, mode,
806 pos, buf, bufsz);
807 } else
808 pos = iwl_print_event_log(trans, next_entry - size,
809 size, mode, pos, buf, bufsz);
810 } else {
811 if (next_entry < size) {
812 pos = iwl_print_event_log(trans, 0, next_entry,
813 mode, pos, buf, bufsz);
814 } else {
815 pos = iwl_print_event_log(trans, next_entry - size,
816 size, mode, pos, buf, bufsz);
817 }
818 }
819 return pos;
820}
821
822#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
823
824int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
825 char **buf, bool display)
826{
827 u32 base; /* SRAM byte address of event log header */
828 u32 capacity; /* event log capacity in # entries */
829 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
830 u32 num_wraps; /* # times uCode wrapped to top of log */
831 u32 next_entry; /* index of next entry to be written by uCode */
832 u32 size; /* # entries that we'll print */
833 u32 logsize;
834 int pos = 0;
835 size_t bufsz = 0;
836
837 base = trans->shrd->device_pointers.log_event_table;
838 if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
839 logsize = trans->shrd->fw->init_evtlog_size;
840 if (!base)
841 base = trans->shrd->fw->init_evtlog_ptr;
842 } else {
843 logsize = trans->shrd->fw->inst_evtlog_size;
844 if (!base)
845 base = trans->shrd->fw->inst_evtlog_ptr;
846 }
847
848 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
849 IWL_ERR(trans,
850 "Invalid event log pointer 0x%08X for %s uCode\n",
851 base,
852 (trans->shrd->ucode_type == IWL_UCODE_INIT)
853 ? "Init" : "RT");
854 return -EINVAL;
855 }
856
857 /* event log header */
858 capacity = iwl_read_targ_mem(trans, base);
859 mode = iwl_read_targ_mem(trans, base + (1 * sizeof(u32)));
860 num_wraps = iwl_read_targ_mem(trans, base + (2 * sizeof(u32)));
861 next_entry = iwl_read_targ_mem(trans, base + (3 * sizeof(u32)));
862
863 if (capacity > logsize) {
864 IWL_ERR(trans, "Log capacity %d is bogus, limit to %d "
865 "entries\n", capacity, logsize);
866 capacity = logsize;
867 }
868
869 if (next_entry > logsize) {
870 IWL_ERR(trans, "Log write index %d is bogus, limit to %d\n",
871 next_entry, logsize);
872 next_entry = logsize;
873 }
874
875 size = num_wraps ? capacity : next_entry;
876
877 /* bail out if nothing in log */
878 if (size == 0) {
879 IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n");
880 return pos;
881 }
882
883#ifdef CONFIG_IWLWIFI_DEBUG
884 if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log)
885 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
886 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
887#else
888 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
889 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
890#endif
891 IWL_ERR(trans, "Start IWL Event Log Dump: display last %u entries\n",
892 size);
893
894#ifdef CONFIG_IWLWIFI_DEBUG
895 if (display) {
896 if (full_log)
897 bufsz = capacity * 48;
898 else
899 bufsz = size * 48;
900 *buf = kmalloc(bufsz, GFP_KERNEL);
901 if (!*buf)
902 return -ENOMEM;
903 }
904 if (iwl_have_debug_level(IWL_DL_FW_ERRORS) || full_log) {
905 /*
906 * if uCode has wrapped back to top of log,
907 * start at the oldest entry,
908 * i.e the next one that uCode would fill.
909 */
910 if (num_wraps)
911 pos = iwl_print_event_log(trans, next_entry,
912 capacity - next_entry, mode,
913 pos, buf, bufsz);
914 /* (then/else) start at top of log */
915 pos = iwl_print_event_log(trans, 0,
916 next_entry, mode, pos, buf, bufsz);
917 } else
918 pos = iwl_print_last_event_logs(trans, capacity, num_wraps,
919 next_entry, size, mode,
920 pos, buf, bufsz);
921#else
922 pos = iwl_print_last_event_logs(trans, capacity, num_wraps,
923 next_entry, size, mode,
924 pos, buf, bufsz);
925#endif
926 return pos;
927}
928
929/* tasklet for iwlagn interrupt */ 567/* tasklet for iwlagn interrupt */
930void iwl_irq_tasklet(struct iwl_trans *trans) 568void iwl_irq_tasklet(struct iwl_trans *trans)
931{ 569{
@@ -963,7 +601,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
963 if (iwl_have_debug_level(IWL_DL_ISR)) { 601 if (iwl_have_debug_level(IWL_DL_ISR)) {
964 /* just for debug */ 602 /* just for debug */
965 inta_mask = iwl_read32(trans, CSR_INT_MASK); 603 inta_mask = iwl_read32(trans, CSR_INT_MASK);
966 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n ", 604 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
967 inta, inta_mask); 605 inta, inta_mask);
968 } 606 }
969#endif 607#endif
@@ -1011,8 +649,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
1011 if (inta & CSR_INT_BIT_RF_KILL) { 649 if (inta & CSR_INT_BIT_RF_KILL) {
1012 bool hw_rfkill; 650 bool hw_rfkill;
1013 651
1014 hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) & 652 hw_rfkill = iwl_is_rfkill_set(trans);
1015 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
1016 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", 653 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1017 hw_rfkill ? "disable radio" : "enable radio"); 654 hw_rfkill ? "disable radio" : "enable radio");
1018 655
@@ -1043,7 +680,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
1043 if (inta & CSR_INT_BIT_WAKEUP) { 680 if (inta & CSR_INT_BIT_WAKEUP) {
1044 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 681 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1045 iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq); 682 iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
1046 for (i = 0; i < cfg(trans)->base_params->num_of_queues; i++) 683 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
1047 iwl_txq_update_write_ptr(trans, 684 iwl_txq_update_write_ptr(trans,
1048 &trans_pcie->txq[i]); 685 &trans_pcie->txq[i]);
1049 686
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index e92972fd6ecf..21a8a672fbb2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -37,47 +37,12 @@
37#include "iwl-agn-hw.h" 37#include "iwl-agn-hw.h"
38#include "iwl-op-mode.h" 38#include "iwl-op-mode.h"
39#include "iwl-trans-pcie-int.h" 39#include "iwl-trans-pcie-int.h"
40/* FIXME: need to abstract out TX command (once we know what it looks like) */
41#include "iwl-commands.h"
40 42
41#define IWL_TX_CRC_SIZE 4 43#define IWL_TX_CRC_SIZE 4
42#define IWL_TX_DELIMITER_SIZE 4 44#define IWL_TX_DELIMITER_SIZE 4
43 45
44/*
45 * mac80211 queues, ACs, hardware queues, FIFOs.
46 *
47 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
48 *
49 * Mac80211 uses the following numbers, which we get as from it
50 * by way of skb_get_queue_mapping(skb):
51 *
52 * VO 0
53 * VI 1
54 * BE 2
55 * BK 3
56 *
57 *
58 * Regular (not A-MPDU) frames are put into hardware queues corresponding
59 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
60 * own queue per aggregation session (RA/TID combination), such queues are
61 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
62 * order to map frames to the right queue, we also need an AC->hw queue
63 * mapping. This is implemented here.
64 *
65 * Due to the way hw queues are set up (by the hw specific code), the AC->hw
66 * queue mapping is the identity mapping.
67 */
68
69static const u8 tid_to_ac[] = {
70 IEEE80211_AC_BE,
71 IEEE80211_AC_BK,
72 IEEE80211_AC_BK,
73 IEEE80211_AC_BE,
74 IEEE80211_AC_VI,
75 IEEE80211_AC_VI,
76 IEEE80211_AC_VO,
77 IEEE80211_AC_VO
78};
79
80
81/** 46/**
82 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 47 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
83 */ 48 */
@@ -95,7 +60,7 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
95 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 60 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
96 __le16 bc_ent; 61 __le16 bc_ent;
97 struct iwl_tx_cmd *tx_cmd = 62 struct iwl_tx_cmd *tx_cmd =
98 (struct iwl_tx_cmd *) txq->cmd[txq->q.write_ptr]->payload; 63 (void *) txq->entries[txq->q.write_ptr].cmd->payload;
99 64
100 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; 65 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
101 66
@@ -136,13 +101,15 @@ void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
136 if (txq->need_update == 0) 101 if (txq->need_update == 0)
137 return; 102 return;
138 103
139 if (cfg(trans)->base_params->shadow_reg_enable) { 104 if (trans->cfg->base_params->shadow_reg_enable) {
140 /* shadow register enabled */ 105 /* shadow register enabled */
141 iwl_write32(trans, HBUS_TARG_WRPTR, 106 iwl_write32(trans, HBUS_TARG_WRPTR,
142 txq->q.write_ptr | (txq_id << 8)); 107 txq->q.write_ptr | (txq_id << 8));
143 } else { 108 } else {
109 struct iwl_trans_pcie *trans_pcie =
110 IWL_TRANS_GET_PCIE_TRANS(trans);
144 /* if we're trying to save power */ 111 /* if we're trying to save power */
145 if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) { 112 if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
146 /* wake up nic if it's powered down ... 113 /* wake up nic if it's powered down ...
147 * uCode will wake up, and interrupt us again, so next 114 * uCode will wake up, and interrupt us again, so next
148 * time we'll skip this part. */ 115 * time we'll skip this part. */
@@ -256,13 +223,14 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
256 223
257 lockdep_assert_held(&txq->lock); 224 lockdep_assert_held(&txq->lock);
258 225
259 iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir); 226 iwlagn_unmap_tfd(trans, &txq->entries[index].meta,
227 &tfd_tmp[index], dma_dir);
260 228
261 /* free SKB */ 229 /* free SKB */
262 if (txq->skbs) { 230 if (txq->entries) {
263 struct sk_buff *skb; 231 struct sk_buff *skb;
264 232
265 skb = txq->skbs[index]; 233 skb = txq->entries[index].skb;
266 234
267 /* Can be called from irqs-disabled context 235 /* Can be called from irqs-disabled context
268 * If skb is not NULL, it means that the whole queue is being 236 * If skb is not NULL, it means that the whole queue is being
@@ -270,7 +238,7 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
270 */ 238 */
271 if (skb) { 239 if (skb) {
272 iwl_op_mode_free_skb(trans->op_mode, skb); 240 iwl_op_mode_free_skb(trans->op_mode, skb);
273 txq->skbs[index] = NULL; 241 txq->entries[index].skb = NULL;
274 } 242 }
275 } 243 }
276} 244}
@@ -393,7 +361,7 @@ static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
393 u8 sta_id = 0; 361 u8 sta_id = 0;
394 __le16 bc_ent; 362 __le16 bc_ent;
395 struct iwl_tx_cmd *tx_cmd = 363 struct iwl_tx_cmd *tx_cmd =
396 (struct iwl_tx_cmd *) txq->cmd[txq->q.read_ptr]->payload; 364 (void *)txq->entries[txq->q.read_ptr].cmd->payload;
397 365
398 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 366 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
399 367
@@ -448,20 +416,17 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
448void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, 416void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
449 int txq_id, u32 index) 417 int txq_id, u32 index)
450{ 418{
451 IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d", txq_id, index & 0xff); 419 IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d\n", txq_id, index & 0xff);
452 iwl_write_direct32(trans, HBUS_TARG_WRPTR, 420 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
453 (index & 0xff) | (txq_id << 8)); 421 (index & 0xff) | (txq_id << 8));
454 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index); 422 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
455} 423}
456 424
457void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, 425void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
458 struct iwl_tx_queue *txq, 426 struct iwl_tx_queue *txq,
459 int tx_fifo_id, int scd_retry) 427 int tx_fifo_id, bool active)
460{ 428{
461 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
462 int txq_id = txq->q.id; 429 int txq_id = txq->q.id;
463 int active =
464 test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
465 430
466 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 431 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
467 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 432 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
@@ -469,77 +434,22 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
469 (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 434 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
470 SCD_QUEUE_STTS_REG_MSK); 435 SCD_QUEUE_STTS_REG_MSK);
471 436
472 txq->sched_retry = scd_retry;
473
474 if (active) 437 if (active)
475 IWL_DEBUG_TX_QUEUES(trans, "Activate %s Queue %d on FIFO %d\n", 438 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n",
476 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); 439 txq_id, tx_fifo_id);
477 else 440 else
478 IWL_DEBUG_TX_QUEUES(trans, "Deactivate %s Queue %d\n", 441 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
479 scd_retry ? "BA" : "AC/CMD", txq_id);
480}
481
482static inline int get_ac_from_tid(u16 tid)
483{
484 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
485 return tid_to_ac[tid];
486
487 /* no support for TIDs 8-15 yet */
488 return -EINVAL;
489}
490
491static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
492 u8 ctx, u16 tid)
493{
494 const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
495 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
496 return ac_to_fifo[tid_to_ac[tid]];
497
498 /* no support for TIDs 8-15 yet */
499 return -EINVAL;
500} 442}
501 443
502static inline bool is_agg_txqid_valid(struct iwl_trans *trans, int txq_id) 444void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int txq_id, int fifo,
445 int sta_id, int tid, int frame_limit, u16 ssn)
503{ 446{
504 if (txq_id < IWLAGN_FIRST_AMPDU_QUEUE) 447 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
505 return false;
506 return txq_id < (IWLAGN_FIRST_AMPDU_QUEUE +
507 hw_params(trans).num_ampdu_queues);
508}
509
510void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
511 enum iwl_rxon_context_id ctx, int sta_id,
512 int tid, int frame_limit, u16 ssn)
513{
514 int tx_fifo, txq_id;
515 u16 ra_tid;
516 unsigned long flags; 448 unsigned long flags;
449 u16 ra_tid = BUILD_RAxTID(sta_id, tid);
517 450
518 struct iwl_trans_pcie *trans_pcie = 451 if (test_and_set_bit(txq_id, trans_pcie->queue_used))
519 IWL_TRANS_GET_PCIE_TRANS(trans); 452 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
520
521 if (WARN_ON(sta_id == IWL_INVALID_STATION))
522 return;
523 if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
524 return;
525
526 tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
527 if (WARN_ON(tx_fifo < 0)) {
528 IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
529 return;
530 }
531
532 txq_id = trans_pcie->agg_txq[sta_id][tid];
533 if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) {
534 IWL_ERR(trans,
535 "queue number out of range: %d, must be %d to %d\n",
536 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
537 IWLAGN_FIRST_AMPDU_QUEUE +
538 hw_params(trans).num_ampdu_queues - 1);
539 return;
540 }
541
542 ra_tid = BUILD_RAxTID(sta_id, tid);
543 453
544 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 454 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
545 455
@@ -550,10 +460,10 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
550 iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id); 460 iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
551 461
552 /* Set this queue as a chain-building queue */ 462 /* Set this queue as a chain-building queue */
553 iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, (1<<txq_id)); 463 iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
554 464
555 /* enable aggregations for the queue */ 465 /* enable aggregations for the queue */
556 iwl_set_bits_prph(trans, SCD_AGGR_SEL, (1<<txq_id)); 466 iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
557 467
558 /* Place first TFD at index corresponding to start sequence number. 468 /* Place first TFD at index corresponding to start sequence number.
559 * Assumes that ssn_idx is valid (!= 0xFFF) */ 469 * Assumes that ssn_idx is valid (!= 0xFFF) */
@@ -563,92 +473,42 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
563 473
564 /* Set up Tx window size and frame limit for this queue */ 474 /* Set up Tx window size and frame limit for this queue */
565 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + 475 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
566 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + 476 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
567 sizeof(u32), 477 ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
568 ((frame_limit << 478 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
569 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 479 ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
570 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 480 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
571 ((frame_limit <<
572 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
573 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
574 481
575 iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id)); 482 iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
576 483
577 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ 484 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
578 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 485 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
579 tx_fifo, 1); 486 fifo, true);
580
581 trans_pcie->txq[txq_id].sta_id = sta_id;
582 trans_pcie->txq[txq_id].tid = tid;
583 487
584 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 488 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
585} 489}
586 490
587/* 491void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int txq_id)
588 * Find first available (lowest unused) Tx Queue, mark it "active".
589 * Called only when finding queue for aggregation.
590 * Should never return anything < 7, because they should already
591 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
592 */
593static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
594{
595 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
596 int txq_id;
597
598 for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
599 txq_id++)
600 if (!test_and_set_bit(txq_id,
601 &trans_pcie->txq_ctx_active_msk))
602 return txq_id;
603 return -1;
604}
605
606int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
607 int sta_id, int tid)
608{
609 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
610 int txq_id;
611
612 txq_id = iwlagn_txq_ctx_activate_free(trans);
613 if (txq_id == -1) {
614 IWL_ERR(trans, "No free aggregation queue available\n");
615 return -ENXIO;
616 }
617
618 trans_pcie->agg_txq[sta_id][tid] = txq_id;
619 iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
620
621 return 0;
622}
623
624int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
625{ 492{
626 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 493 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
627 u8 txq_id = trans_pcie->agg_txq[sta_id][tid];
628 494
629 if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) { 495 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
630 IWL_ERR(trans, 496 WARN_ONCE(1, "queue %d not used", txq_id);
631 "queue number out of range: %d, must be %d to %d\n", 497 return;
632 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
633 IWLAGN_FIRST_AMPDU_QUEUE +
634 hw_params(trans).num_ampdu_queues - 1);
635 return -EINVAL;
636 } 498 }
637 499
638 iwlagn_tx_queue_stop_scheduler(trans, txq_id); 500 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
639 501
640 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, (1 << txq_id)); 502 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
641 503
642 trans_pcie->agg_txq[sta_id][tid] = 0;
643 trans_pcie->txq[txq_id].q.read_ptr = 0; 504 trans_pcie->txq[txq_id].q.read_ptr = 0;
644 trans_pcie->txq[txq_id].q.write_ptr = 0; 505 trans_pcie->txq[txq_id].q.write_ptr = 0;
645 /* supposes that ssn_idx is valid (!= 0xFFF) */
646 iwl_trans_set_wr_ptrs(trans, txq_id, 0); 506 iwl_trans_set_wr_ptrs(trans, txq_id, 0);
647 507
648 iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id)); 508 iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, BIT(txq_id));
649 iwl_txq_ctx_deactivate(trans_pcie, txq_id); 509
650 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0); 510 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
651 return 0; 511 0, false);
652} 512}
653 513
654/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 514/*************** HOST COMMAND QUEUE FUNCTIONS *****/
@@ -681,11 +541,6 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
681 int trace_idx; 541 int trace_idx;
682#endif 542#endif
683 543
684 if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
685 IWL_WARN(trans, "fw recovery, no hcmd send\n");
686 return -EIO;
687 }
688
689 copy_size = sizeof(out_cmd->hdr); 544 copy_size = sizeof(out_cmd->hdr);
690 cmd_size = sizeof(out_cmd->hdr); 545 cmd_size = sizeof(out_cmd->hdr);
691 546
@@ -726,8 +581,8 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
726 } 581 }
727 582
728 idx = get_cmd_index(q, q->write_ptr); 583 idx = get_cmd_index(q, q->write_ptr);
729 out_cmd = txq->cmd[idx]; 584 out_cmd = txq->entries[idx].cmd;
730 out_meta = &txq->meta[idx]; 585 out_meta = &txq->entries[idx].meta;
731 586
732 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 587 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
733 if (cmd->flags & CMD_WANT_SKB) 588 if (cmd->flags & CMD_WANT_SKB)
@@ -753,12 +608,11 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
753 cmd_dest += cmd->len[i]; 608 cmd_dest += cmd->len[i];
754 } 609 }
755 610
756 IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, " 611 IWL_DEBUG_HC(trans,
757 "%d bytes at %d[%d]:%d\n", 612 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
758 get_cmd_string(out_cmd->hdr.cmd), 613 trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
759 out_cmd->hdr.cmd, 614 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
760 le16_to_cpu(out_cmd->hdr.sequence), cmd_size, 615 q->write_ptr, idx, trans_pcie->cmd_queue);
761 q->write_ptr, idx, trans_pcie->cmd_queue);
762 616
763 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size, 617 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
764 DMA_BIDIRECTIONAL); 618 DMA_BIDIRECTIONAL);
@@ -816,6 +670,10 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
816 trace_bufs[2], trace_lens[2]); 670 trace_bufs[2], trace_lens[2]);
817#endif 671#endif
818 672
673 /* start timer if queue currently empty */
674 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
675 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
676
819 /* Increment and update queue's write index */ 677 /* Increment and update queue's write index */
820 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 678 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
821 iwl_txq_update_write_ptr(trans, txq); 679 iwl_txq_update_write_ptr(trans, txq);
@@ -825,6 +683,22 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
825 return idx; 683 return idx;
826} 684}
827 685
686static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie,
687 struct iwl_tx_queue *txq)
688{
689 if (!trans_pcie->wd_timeout)
690 return;
691
692 /*
693 * if empty delete timer, otherwise move timer forward
694 * since we're making progress on this queue
695 */
696 if (txq->q.read_ptr == txq->q.write_ptr)
697 del_timer(&txq->stuck_timer);
698 else
699 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
700}
701
828/** 702/**
829 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd 703 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
830 * 704 *
@@ -859,6 +733,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
859 } 733 }
860 734
861 } 735 }
736
737 iwl_queue_progress(trans_pcie, txq);
862} 738}
863 739
864/** 740/**
@@ -899,10 +775,8 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
899 spin_lock(&txq->lock); 775 spin_lock(&txq->lock);
900 776
901 cmd_index = get_cmd_index(&txq->q, index); 777 cmd_index = get_cmd_index(&txq->q, index);
902 cmd = txq->cmd[cmd_index]; 778 cmd = txq->entries[cmd_index].cmd;
903 meta = &txq->meta[cmd_index]; 779 meta = &txq->entries[cmd_index].meta;
904
905 txq->time_stamp = jiffies;
906 780
907 iwlagn_unmap_tfd(trans, meta, &txq->tfds[index], 781 iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
908 DMA_BIDIRECTIONAL); 782 DMA_BIDIRECTIONAL);
@@ -913,21 +787,23 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
913 787
914 meta->source->resp_pkt = pkt; 788 meta->source->resp_pkt = pkt;
915 meta->source->_rx_page_addr = (unsigned long)page_address(p); 789 meta->source->_rx_page_addr = (unsigned long)page_address(p);
916 meta->source->_rx_page_order = hw_params(trans).rx_page_order; 790 meta->source->_rx_page_order = trans_pcie->rx_page_order;
917 meta->source->handler_status = handler_status; 791 meta->source->handler_status = handler_status;
918 } 792 }
919 793
920 iwl_hcmd_queue_reclaim(trans, txq_id, index); 794 iwl_hcmd_queue_reclaim(trans, txq_id, index);
921 795
922 if (!(meta->flags & CMD_ASYNC)) { 796 if (!(meta->flags & CMD_ASYNC)) {
923 if (!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) { 797 if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
924 IWL_WARN(trans, 798 IWL_WARN(trans,
925 "HCMD_ACTIVE already clear for command %s\n", 799 "HCMD_ACTIVE already clear for command %s\n",
926 get_cmd_string(cmd->hdr.cmd)); 800 trans_pcie_get_cmd_string(trans_pcie,
801 cmd->hdr.cmd));
927 } 802 }
928 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); 803 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
929 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 804 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
930 get_cmd_string(cmd->hdr.cmd)); 805 trans_pcie_get_cmd_string(trans_pcie,
806 cmd->hdr.cmd));
931 wake_up(&trans->wait_command_queue); 807 wake_up(&trans->wait_command_queue);
932 } 808 }
933 809
@@ -940,6 +816,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
940 816
941static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 817static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
942{ 818{
819 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
943 int ret; 820 int ret;
944 821
945 /* An asynchronous command can not expect an SKB to be set. */ 822 /* An asynchronous command can not expect an SKB to be set. */
@@ -951,7 +828,7 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
951 if (ret < 0) { 828 if (ret < 0) {
952 IWL_ERR(trans, 829 IWL_ERR(trans,
953 "Error sending %s: enqueue_hcmd failed: %d\n", 830 "Error sending %s: enqueue_hcmd failed: %d\n",
954 get_cmd_string(cmd->id), ret); 831 trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
955 return ret; 832 return ret;
956 } 833 }
957 return 0; 834 return 0;
@@ -964,55 +841,51 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
964 int ret; 841 int ret;
965 842
966 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", 843 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
967 get_cmd_string(cmd->id)); 844 trans_pcie_get_cmd_string(trans_pcie, cmd->id));
968
969 if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
970 IWL_ERR(trans, "Command %s failed: FW Error\n",
971 get_cmd_string(cmd->id));
972 return -EIO;
973 }
974 845
975 if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE, 846 if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
976 &trans->shrd->status))) { 847 &trans_pcie->status))) {
977 IWL_ERR(trans, "Command %s: a command is already active!\n", 848 IWL_ERR(trans, "Command %s: a command is already active!\n",
978 get_cmd_string(cmd->id)); 849 trans_pcie_get_cmd_string(trans_pcie, cmd->id));
979 return -EIO; 850 return -EIO;
980 } 851 }
981 852
982 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", 853 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
983 get_cmd_string(cmd->id)); 854 trans_pcie_get_cmd_string(trans_pcie, cmd->id));
984 855
985 cmd_idx = iwl_enqueue_hcmd(trans, cmd); 856 cmd_idx = iwl_enqueue_hcmd(trans, cmd);
986 if (cmd_idx < 0) { 857 if (cmd_idx < 0) {
987 ret = cmd_idx; 858 ret = cmd_idx;
988 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); 859 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
989 IWL_ERR(trans, 860 IWL_ERR(trans,
990 "Error sending %s: enqueue_hcmd failed: %d\n", 861 "Error sending %s: enqueue_hcmd failed: %d\n",
991 get_cmd_string(cmd->id), ret); 862 trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
992 return ret; 863 return ret;
993 } 864 }
994 865
995 ret = wait_event_timeout(trans->wait_command_queue, 866 ret = wait_event_timeout(trans->wait_command_queue,
996 !test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status), 867 !test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status),
997 HOST_COMPLETE_TIMEOUT); 868 HOST_COMPLETE_TIMEOUT);
998 if (!ret) { 869 if (!ret) {
999 if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) { 870 if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
1000 struct iwl_tx_queue *txq = 871 struct iwl_tx_queue *txq =
1001 &trans_pcie->txq[trans_pcie->cmd_queue]; 872 &trans_pcie->txq[trans_pcie->cmd_queue];
1002 struct iwl_queue *q = &txq->q; 873 struct iwl_queue *q = &txq->q;
1003 874
1004 IWL_ERR(trans, 875 IWL_ERR(trans,
1005 "Error sending %s: time out after %dms.\n", 876 "Error sending %s: time out after %dms.\n",
1006 get_cmd_string(cmd->id), 877 trans_pcie_get_cmd_string(trans_pcie, cmd->id),
1007 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 878 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1008 879
1009 IWL_ERR(trans, 880 IWL_ERR(trans,
1010 "Current CMD queue read_ptr %d write_ptr %d\n", 881 "Current CMD queue read_ptr %d write_ptr %d\n",
1011 q->read_ptr, q->write_ptr); 882 q->read_ptr, q->write_ptr);
1012 883
1013 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); 884 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
1014 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command" 885 IWL_DEBUG_INFO(trans,
1015 "%s\n", get_cmd_string(cmd->id)); 886 "Clearing HCMD_ACTIVE for command %s\n",
887 trans_pcie_get_cmd_string(trans_pcie,
888 cmd->id));
1016 ret = -ETIMEDOUT; 889 ret = -ETIMEDOUT;
1017 goto cancel; 890 goto cancel;
1018 } 891 }
@@ -1020,7 +893,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1020 893
1021 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 894 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1022 IWL_ERR(trans, "Error: Response NULL in '%s'\n", 895 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1023 get_cmd_string(cmd->id)); 896 trans_pcie_get_cmd_string(trans_pcie, cmd->id));
1024 ret = -EIO; 897 ret = -EIO;
1025 goto cancel; 898 goto cancel;
1026 } 899 }
@@ -1035,8 +908,8 @@ cancel:
1035 * in later, it will possibly set an invalid 908 * in later, it will possibly set an invalid
1036 * address (cmd->meta.source). 909 * address (cmd->meta.source).
1037 */ 910 */
1038 trans_pcie->txq[trans_pcie->cmd_queue].meta[cmd_idx].flags &= 911 trans_pcie->txq[trans_pcie->cmd_queue].
1039 ~CMD_WANT_SKB; 912 entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1040 } 913 }
1041 914
1042 if (cmd->resp_pkt) { 915 if (cmd->resp_pkt) {
@@ -1091,17 +964,20 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
1091 q->read_ptr != index; 964 q->read_ptr != index;
1092 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 965 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1093 966
1094 if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL)) 967 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
1095 continue; 968 continue;
1096 969
1097 __skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]); 970 __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
1098 971
1099 txq->skbs[txq->q.read_ptr] = NULL; 972 txq->entries[txq->q.read_ptr].skb = NULL;
1100 973
1101 iwlagn_txq_inval_byte_cnt_tbl(trans, txq); 974 iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
1102 975
1103 iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE); 976 iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
1104 freed++; 977 freed++;
1105 } 978 }
979
980 iwl_queue_progress(trans_pcie, txq);
981
1106 return freed; 982 return freed;
1107} 983}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index 4d7b30d3e648..2e57161854b9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -68,18 +68,20 @@
68#include <linux/bitops.h> 68#include <linux/bitops.h>
69#include <linux/gfp.h> 69#include <linux/gfp.h>
70 70
71#include "iwl-drv.h"
71#include "iwl-trans.h" 72#include "iwl-trans.h"
72#include "iwl-trans-pcie-int.h" 73#include "iwl-trans-pcie-int.h"
73#include "iwl-csr.h" 74#include "iwl-csr.h"
74#include "iwl-prph.h" 75#include "iwl-prph.h"
75#include "iwl-shared.h"
76#include "iwl-eeprom.h" 76#include "iwl-eeprom.h"
77#include "iwl-agn-hw.h" 77#include "iwl-agn-hw.h"
78/* FIXME: need to abstract out TX command (once we know what it looks like) */
79#include "iwl-commands.h"
78 80
79#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) 81#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
80 82
81#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \ 83#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
82 (((1<<cfg(trans)->base_params->num_of_queues) - 1) &\ 84 (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
83 (~(1<<(trans_pcie)->cmd_queue))) 85 (~(1<<(trans_pcie)->cmd_queue)))
84 86
85static int iwl_trans_rx_alloc(struct iwl_trans *trans) 87static int iwl_trans_rx_alloc(struct iwl_trans *trans)
@@ -132,10 +134,10 @@ static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
132 * to an SKB, so we need to unmap and free potential storage */ 134 * to an SKB, so we need to unmap and free potential storage */
133 if (rxq->pool[i].page != NULL) { 135 if (rxq->pool[i].page != NULL) {
134 dma_unmap_page(trans->dev, rxq->pool[i].page_dma, 136 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
135 PAGE_SIZE << hw_params(trans).rx_page_order, 137 PAGE_SIZE << trans_pcie->rx_page_order,
136 DMA_FROM_DEVICE); 138 DMA_FROM_DEVICE);
137 __free_pages(rxq->pool[i].page, 139 __free_pages(rxq->pool[i].page,
138 hw_params(trans).rx_page_order); 140 trans_pcie->rx_page_order);
139 rxq->pool[i].page = NULL; 141 rxq->pool[i].page = NULL;
140 } 142 }
141 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 143 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -145,11 +147,12 @@ static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
145static void iwl_trans_rx_hw_init(struct iwl_trans *trans, 147static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
146 struct iwl_rx_queue *rxq) 148 struct iwl_rx_queue *rxq)
147{ 149{
150 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
148 u32 rb_size; 151 u32 rb_size;
149 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ 152 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
150 u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */ 153 u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
151 154
152 if (iwlagn_mod_params.amsdu_size_8K) 155 if (trans_pcie->rx_buf_size_8k)
153 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; 156 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
154 else 157 else
155 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 158 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
@@ -180,7 +183,6 @@ static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
180 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 183 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
181 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | 184 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
182 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 185 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
183 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
184 rb_size| 186 rb_size|
185 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| 187 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
186 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); 188 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
@@ -299,6 +301,33 @@ static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
299 memset(ptr, 0, sizeof(*ptr)); 301 memset(ptr, 0, sizeof(*ptr));
300} 302}
301 303
304static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
305{
306 struct iwl_tx_queue *txq = (void *)data;
307 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
308 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
309
310 spin_lock(&txq->lock);
311 /* check if triggered erroneously */
312 if (txq->q.read_ptr == txq->q.write_ptr) {
313 spin_unlock(&txq->lock);
314 return;
315 }
316 spin_unlock(&txq->lock);
317
318
319 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
320 jiffies_to_msecs(trans_pcie->wd_timeout));
321 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
322 txq->q.read_ptr, txq->q.write_ptr);
323 IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
324 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq->q.id))
325 & (TFD_QUEUE_SIZE_MAX - 1),
326 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq->q.id)));
327
328 iwl_op_mode_nic_error(trans->op_mode);
329}
330
302static int iwl_trans_txq_alloc(struct iwl_trans *trans, 331static int iwl_trans_txq_alloc(struct iwl_trans *trans,
303 struct iwl_tx_queue *txq, int slots_num, 332 struct iwl_tx_queue *txq, int slots_num,
304 u32 txq_id) 333 u32 txq_id)
@@ -307,40 +336,31 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans,
307 int i; 336 int i;
308 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 337 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
309 338
310 if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds)) 339 if (WARN_ON(txq->entries || txq->tfds))
311 return -EINVAL; 340 return -EINVAL;
312 341
342 setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer,
343 (unsigned long)txq);
344 txq->trans_pcie = trans_pcie;
345
313 txq->q.n_window = slots_num; 346 txq->q.n_window = slots_num;
314 347
315 txq->meta = kcalloc(slots_num, sizeof(txq->meta[0]), GFP_KERNEL); 348 txq->entries = kcalloc(slots_num,
316 txq->cmd = kcalloc(slots_num, sizeof(txq->cmd[0]), GFP_KERNEL); 349 sizeof(struct iwl_pcie_tx_queue_entry),
350 GFP_KERNEL);
317 351
318 if (!txq->meta || !txq->cmd) 352 if (!txq->entries)
319 goto error; 353 goto error;
320 354
321 if (txq_id == trans_pcie->cmd_queue) 355 if (txq_id == trans_pcie->cmd_queue)
322 for (i = 0; i < slots_num; i++) { 356 for (i = 0; i < slots_num; i++) {
323 txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd), 357 txq->entries[i].cmd =
324 GFP_KERNEL); 358 kmalloc(sizeof(struct iwl_device_cmd),
325 if (!txq->cmd[i]) 359 GFP_KERNEL);
360 if (!txq->entries[i].cmd)
326 goto error; 361 goto error;
327 } 362 }
328 363
329 /* Alloc driver data array and TFD circular buffer */
330 /* Driver private data, only for Tx (not command) queues,
331 * not shared with device. */
332 if (txq_id != trans_pcie->cmd_queue) {
333 txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->skbs[0]),
334 GFP_KERNEL);
335 if (!txq->skbs) {
336 IWL_ERR(trans, "kmalloc for auxiliary BD "
337 "structures failed\n");
338 goto error;
339 }
340 } else {
341 txq->skbs = NULL;
342 }
343
344 /* Circular buffer of transmit frame descriptors (TFDs), 364 /* Circular buffer of transmit frame descriptors (TFDs),
345 * shared with device */ 365 * shared with device */
346 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, 366 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
@@ -353,37 +373,22 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans,
353 373
354 return 0; 374 return 0;
355error: 375error:
356 kfree(txq->skbs); 376 if (txq->entries && txq_id == trans_pcie->cmd_queue)
357 txq->skbs = NULL;
358 /* since txq->cmd has been zeroed,
359 * all non allocated cmd[i] will be NULL */
360 if (txq->cmd && txq_id == trans_pcie->cmd_queue)
361 for (i = 0; i < slots_num; i++) 377 for (i = 0; i < slots_num; i++)
362 kfree(txq->cmd[i]); 378 kfree(txq->entries[i].cmd);
363 kfree(txq->meta); 379 kfree(txq->entries);
364 kfree(txq->cmd); 380 txq->entries = NULL;
365 txq->meta = NULL;
366 txq->cmd = NULL;
367 381
368 return -ENOMEM; 382 return -ENOMEM;
369 383
370} 384}
371 385
372static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq, 386static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
373 int slots_num, u32 txq_id) 387 int slots_num, u32 txq_id)
374{ 388{
375 int ret; 389 int ret;
376 390
377 txq->need_update = 0; 391 txq->need_update = 0;
378 memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
379
380 /*
381 * For the default queues 0-3, set up the swq_id
382 * already -- all others need to get one later
383 * (if they need one at all).
384 */
385 if (txq_id < 4)
386 iwl_set_swq_id(txq, txq_id, txq_id);
387 392
388 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 393 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
389 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ 394 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
@@ -461,7 +466,7 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
461 466
462 if (txq_id == trans_pcie->cmd_queue) 467 if (txq_id == trans_pcie->cmd_queue)
463 for (i = 0; i < txq->q.n_window; i++) 468 for (i = 0; i < txq->q.n_window; i++)
464 kfree(txq->cmd[i]); 469 kfree(txq->entries[i].cmd);
465 470
466 /* De-alloc circular buffer of TFDs */ 471 /* De-alloc circular buffer of TFDs */
467 if (txq->q.n_bd) { 472 if (txq->q.n_bd) {
@@ -470,15 +475,10 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
470 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr)); 475 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
471 } 476 }
472 477
473 /* De-alloc array of per-TFD driver data */ 478 kfree(txq->entries);
474 kfree(txq->skbs); 479 txq->entries = NULL;
475 txq->skbs = NULL;
476 480
477 /* deallocate arrays */ 481 del_timer_sync(&txq->stuck_timer);
478 kfree(txq->cmd);
479 kfree(txq->meta);
480 txq->cmd = NULL;
481 txq->meta = NULL;
482 482
483 /* 0-fill queue descriptor structure */ 483 /* 0-fill queue descriptor structure */
484 memset(txq, 0, sizeof(*txq)); 484 memset(txq, 0, sizeof(*txq));
@@ -497,7 +497,7 @@ static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
497 /* Tx queues */ 497 /* Tx queues */
498 if (trans_pcie->txq) { 498 if (trans_pcie->txq) {
499 for (txq_id = 0; 499 for (txq_id = 0;
500 txq_id < cfg(trans)->base_params->num_of_queues; txq_id++) 500 txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
501 iwl_tx_queue_free(trans, txq_id); 501 iwl_tx_queue_free(trans, txq_id);
502 } 502 }
503 503
@@ -522,7 +522,7 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans)
522 int txq_id, slots_num; 522 int txq_id, slots_num;
523 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 523 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
524 524
525 u16 scd_bc_tbls_size = cfg(trans)->base_params->num_of_queues * 525 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
526 sizeof(struct iwlagn_scd_bc_tbl); 526 sizeof(struct iwlagn_scd_bc_tbl);
527 527
528 /*It is not allowed to alloc twice, so warn when this happens. 528 /*It is not allowed to alloc twice, so warn when this happens.
@@ -546,7 +546,7 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans)
546 goto error; 546 goto error;
547 } 547 }
548 548
549 trans_pcie->txq = kcalloc(cfg(trans)->base_params->num_of_queues, 549 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
550 sizeof(struct iwl_tx_queue), GFP_KERNEL); 550 sizeof(struct iwl_tx_queue), GFP_KERNEL);
551 if (!trans_pcie->txq) { 551 if (!trans_pcie->txq) {
552 IWL_ERR(trans, "Not enough memory for txq\n"); 552 IWL_ERR(trans, "Not enough memory for txq\n");
@@ -555,7 +555,7 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans)
555 } 555 }
556 556
557 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 557 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
558 for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues; 558 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
559 txq_id++) { 559 txq_id++) {
560 slots_num = (txq_id == trans_pcie->cmd_queue) ? 560 slots_num = (txq_id == trans_pcie->cmd_queue) ?
561 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 561 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
@@ -601,7 +601,7 @@ static int iwl_tx_init(struct iwl_trans *trans)
601 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 601 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
602 602
603 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 603 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
604 for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues; 604 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
605 txq_id++) { 605 txq_id++) {
606 slots_num = (txq_id == trans_pcie->cmd_queue) ? 606 slots_num = (txq_id == trans_pcie->cmd_queue) ?
607 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 607 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
@@ -724,9 +724,9 @@ static int iwl_apm_init(struct iwl_trans *trans)
724 iwl_apm_config(trans); 724 iwl_apm_config(trans);
725 725
726 /* Configure analog phase-lock-loop before activating to D0A */ 726 /* Configure analog phase-lock-loop before activating to D0A */
727 if (cfg(trans)->base_params->pll_cfg_val) 727 if (trans->cfg->base_params->pll_cfg_val)
728 iwl_set_bit(trans, CSR_ANA_PLL_CFG, 728 iwl_set_bit(trans, CSR_ANA_PLL_CFG,
729 cfg(trans)->base_params->pll_cfg_val); 729 trans->cfg->base_params->pll_cfg_val);
730 730
731 /* 731 /*
732 * Set "initialization complete" bit to move adapter from 732 * Set "initialization complete" bit to move adapter from
@@ -836,7 +836,7 @@ static int iwl_nic_init(struct iwl_trans *trans)
836 if (iwl_tx_init(trans)) 836 if (iwl_tx_init(trans))
837 return -ENOMEM; 837 return -ENOMEM;
838 838
839 if (cfg(trans)->base_params->shadow_reg_enable) { 839 if (trans->cfg->base_params->shadow_reg_enable) {
840 /* enable shadow regs in HW */ 840 /* enable shadow regs in HW */
841 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 841 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
842 0x800FFFFF); 842 0x800FFFFF);
@@ -895,59 +895,6 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
895 return ret; 895 return ret;
896} 896}
897 897
898#define IWL_AC_UNSET -1
899
900struct queue_to_fifo_ac {
901 s8 fifo, ac;
902};
903
904static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
905 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
906 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
907 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
908 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
909 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
910 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
911 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
912 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
913 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
914 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
915 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
916};
917
918static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
919 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
920 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
921 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
922 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
923 { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
924 { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
925 { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
926 { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
927 { IWL_TX_FIFO_BE_IPAN, 2, },
928 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
929 { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
930};
931
932static const u8 iwlagn_bss_ac_to_fifo[] = {
933 IWL_TX_FIFO_VO,
934 IWL_TX_FIFO_VI,
935 IWL_TX_FIFO_BE,
936 IWL_TX_FIFO_BK,
937};
938static const u8 iwlagn_bss_ac_to_queue[] = {
939 0, 1, 2, 3,
940};
941static const u8 iwlagn_pan_ac_to_fifo[] = {
942 IWL_TX_FIFO_VO_IPAN,
943 IWL_TX_FIFO_VI_IPAN,
944 IWL_TX_FIFO_BE_IPAN,
945 IWL_TX_FIFO_BK_IPAN,
946};
947static const u8 iwlagn_pan_ac_to_queue[] = {
948 7, 6, 5, 4,
949};
950
951/* 898/*
952 * ucode 899 * ucode
953 */ 900 */
@@ -1028,34 +975,21 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1028 const struct fw_img *fw) 975 const struct fw_img *fw)
1029{ 976{
1030 int ret; 977 int ret;
1031 struct iwl_trans_pcie *trans_pcie =
1032 IWL_TRANS_GET_PCIE_TRANS(trans);
1033 bool hw_rfkill; 978 bool hw_rfkill;
1034 979
1035 trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue;
1036 trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue;
1037
1038 trans_pcie->ac_to_fifo[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_fifo;
1039 trans_pcie->ac_to_fifo[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_fifo;
1040
1041 trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0;
1042 trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE;
1043
1044 /* This may fail if AMT took ownership of the device */ 980 /* This may fail if AMT took ownership of the device */
1045 if (iwl_prepare_card_hw(trans)) { 981 if (iwl_prepare_card_hw(trans)) {
1046 IWL_WARN(trans, "Exit HW not ready\n"); 982 IWL_WARN(trans, "Exit HW not ready\n");
1047 return -EIO; 983 return -EIO;
1048 } 984 }
1049 985
986 iwl_enable_rfkill_int(trans);
987
1050 /* If platform's RF_KILL switch is NOT set to KILL */ 988 /* If platform's RF_KILL switch is NOT set to KILL */
1051 hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) & 989 hw_rfkill = iwl_is_rfkill_set(trans);
1052 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
1053 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 990 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
1054 991 if (hw_rfkill)
1055 if (hw_rfkill) {
1056 iwl_enable_rfkill_int(trans);
1057 return -ERFKILL; 992 return -ERFKILL;
1058 }
1059 993
1060 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 994 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1061 995
@@ -1098,9 +1032,7 @@ static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
1098 1032
1099static void iwl_tx_start(struct iwl_trans *trans) 1033static void iwl_tx_start(struct iwl_trans *trans)
1100{ 1034{
1101 const struct queue_to_fifo_ac *queue_to_fifo; 1035 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1102 struct iwl_trans_pcie *trans_pcie =
1103 IWL_TRANS_GET_PCIE_TRANS(trans);
1104 u32 a; 1036 u32 a;
1105 unsigned long flags; 1037 unsigned long flags;
1106 int i, chan; 1038 int i, chan;
@@ -1121,7 +1053,7 @@ static void iwl_tx_start(struct iwl_trans *trans)
1121 iwl_write_targ_mem(trans, a, 0); 1053 iwl_write_targ_mem(trans, a, 0);
1122 for (; a < trans_pcie->scd_base_addr + 1054 for (; a < trans_pcie->scd_base_addr +
1123 SCD_TRANS_TBL_OFFSET_QUEUE( 1055 SCD_TRANS_TBL_OFFSET_QUEUE(
1124 cfg(trans)->base_params->num_of_queues); 1056 trans->cfg->base_params->num_of_queues);
1125 a += 4) 1057 a += 4)
1126 iwl_write_targ_mem(trans, a, 0); 1058 iwl_write_targ_mem(trans, a, 0);
1127 1059
@@ -1144,7 +1076,7 @@ static void iwl_tx_start(struct iwl_trans *trans)
1144 iwl_write_prph(trans, SCD_AGGR_SEL, 0); 1076 iwl_write_prph(trans, SCD_AGGR_SEL, 0);
1145 1077
1146 /* initiate the queues */ 1078 /* initiate the queues */
1147 for (i = 0; i < cfg(trans)->base_params->num_of_queues; i++) { 1079 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
1148 iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0); 1080 iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0);
1149 iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8)); 1081 iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8));
1150 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + 1082 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
@@ -1161,46 +1093,24 @@ static void iwl_tx_start(struct iwl_trans *trans)
1161 } 1093 }
1162 1094
1163 iwl_write_prph(trans, SCD_INTERRUPT_MASK, 1095 iwl_write_prph(trans, SCD_INTERRUPT_MASK,
1164 IWL_MASK(0, cfg(trans)->base_params->num_of_queues)); 1096 IWL_MASK(0, trans->cfg->base_params->num_of_queues));
1165 1097
1166 /* Activate all Tx DMA/FIFO channels */ 1098 /* Activate all Tx DMA/FIFO channels */
1167 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7)); 1099 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
1168 1100
1169 /* map queues to FIFOs */
1170 if (trans->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS))
1171 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
1172 else
1173 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
1174
1175 iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0); 1101 iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
1176 1102
1177 /* make sure all queue are not stopped */ 1103 /* make sure all queue are not stopped/used */
1178 memset(&trans_pcie->queue_stopped[0], 0, 1104 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
1179 sizeof(trans_pcie->queue_stopped)); 1105 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
1180 for (i = 0; i < 4; i++)
1181 atomic_set(&trans_pcie->queue_stop_count[i], 0);
1182
1183 /* reset to 0 to enable all the queue first */
1184 trans_pcie->txq_ctx_active_msk = 0;
1185 1106
1186 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) < 1107 for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
1187 IWLAGN_FIRST_AMPDU_QUEUE); 1108 int fifo = trans_pcie->setup_q_to_fifo[i];
1188 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) <
1189 IWLAGN_FIRST_AMPDU_QUEUE);
1190 1109
1191 for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) { 1110 set_bit(i, trans_pcie->queue_used);
1192 int fifo = queue_to_fifo[i].fifo;
1193 int ac = queue_to_fifo[i].ac;
1194 1111
1195 iwl_txq_ctx_activate(trans_pcie, i);
1196
1197 if (fifo == IWL_TX_FIFO_UNUSED)
1198 continue;
1199
1200 if (ac != IWL_AC_UNSET)
1201 iwl_set_swq_id(&trans_pcie->txq[i], ac, i);
1202 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i], 1112 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
1203 fifo, 0); 1113 fifo, true);
1204 } 1114 }
1205 1115
1206 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 1116 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
@@ -1251,7 +1161,7 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
1251 } 1161 }
1252 1162
1253 /* Unmap DMA from host system and free skb's */ 1163 /* Unmap DMA from host system and free skb's */
1254 for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues; 1164 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
1255 txq_id++) 1165 txq_id++)
1256 iwl_tx_queue_unmap(trans, txq_id); 1166 iwl_tx_queue_unmap(trans, txq_id);
1257 1167
@@ -1303,6 +1213,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1303 iwl_disable_interrupts(trans); 1213 iwl_disable_interrupts(trans);
1304 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 1214 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1305 1215
1216 iwl_enable_rfkill_int(trans);
1217
1306 /* wait to make sure we flush pending tasklet*/ 1218 /* wait to make sure we flush pending tasklet*/
1307 synchronize_irq(trans_pcie->irq); 1219 synchronize_irq(trans_pcie->irq);
1308 tasklet_kill(&trans_pcie->irq_tasklet); 1220 tasklet_kill(&trans_pcie->irq_tasklet);
@@ -1311,6 +1223,12 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1311 1223
1312 /* stop and reset the on-board processor */ 1224 /* stop and reset the on-board processor */
1313 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 1225 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
1226
1227 /* clear all status bits */
1228 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
1229 clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
1230 clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
1231 clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
1314} 1232}
1315 1233
1316static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans) 1234static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
@@ -1325,81 +1243,43 @@ static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
1325} 1243}
1326 1244
1327static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 1245static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1328 struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, 1246 struct iwl_device_cmd *dev_cmd, int txq_id)
1329 u8 sta_id, u8 tid)
1330{ 1247{
1331 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1248 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1332 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1249 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1333 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1334 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload; 1250 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
1335 struct iwl_cmd_meta *out_meta; 1251 struct iwl_cmd_meta *out_meta;
1336 struct iwl_tx_queue *txq; 1252 struct iwl_tx_queue *txq;
1337 struct iwl_queue *q; 1253 struct iwl_queue *q;
1338
1339 dma_addr_t phys_addr = 0; 1254 dma_addr_t phys_addr = 0;
1340 dma_addr_t txcmd_phys; 1255 dma_addr_t txcmd_phys;
1341 dma_addr_t scratch_phys; 1256 dma_addr_t scratch_phys;
1342 u16 len, firstlen, secondlen; 1257 u16 len, firstlen, secondlen;
1343 u8 wait_write_ptr = 0; 1258 u8 wait_write_ptr = 0;
1344 u8 txq_id;
1345 bool is_agg = false;
1346 __le16 fc = hdr->frame_control; 1259 __le16 fc = hdr->frame_control;
1347 u8 hdr_len = ieee80211_hdrlen(fc); 1260 u8 hdr_len = ieee80211_hdrlen(fc);
1348 u16 __maybe_unused wifi_seq; 1261 u16 __maybe_unused wifi_seq;
1349 1262
1350 /*
1351 * Send this frame after DTIM -- there's a special queue
1352 * reserved for this for contexts that support AP mode.
1353 */
1354 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1355 txq_id = trans_pcie->mcast_queue[ctx];
1356
1357 /*
1358 * The microcode will clear the more data
1359 * bit in the last frame it transmits.
1360 */
1361 hdr->frame_control |=
1362 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1363 } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
1364 txq_id = IWL_AUX_QUEUE;
1365 else
1366 txq_id =
1367 trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)];
1368
1369 /* aggregation is on for this <sta,tid> */
1370 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1371 WARN_ON(tid >= IWL_MAX_TID_COUNT);
1372 txq_id = trans_pcie->agg_txq[sta_id][tid];
1373 is_agg = true;
1374 }
1375
1376 txq = &trans_pcie->txq[txq_id]; 1263 txq = &trans_pcie->txq[txq_id];
1377 q = &txq->q; 1264 q = &txq->q;
1378 1265
1379 spin_lock(&txq->lock); 1266 if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
1267 WARN_ON_ONCE(1);
1268 return -EINVAL;
1269 }
1380 1270
1381 /* In AGG mode, the index in the ring must correspond to the WiFi 1271 spin_lock(&txq->lock);
1382 * sequence number. This is a HW requirements to help the SCD to parse
1383 * the BA.
1384 * Check here that the packets are in the right place on the ring.
1385 */
1386#ifdef CONFIG_IWLWIFI_DEBUG
1387 wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1388 WARN_ONCE(is_agg && ((wifi_seq & 0xff) != q->write_ptr),
1389 "Q: %d WiFi Seq %d tfdNum %d",
1390 txq_id, wifi_seq, q->write_ptr);
1391#endif
1392 1272
1393 /* Set up driver data for this TFD */ 1273 /* Set up driver data for this TFD */
1394 txq->skbs[q->write_ptr] = skb; 1274 txq->entries[q->write_ptr].skb = skb;
1395 txq->cmd[q->write_ptr] = dev_cmd; 1275 txq->entries[q->write_ptr].cmd = dev_cmd;
1396 1276
1397 dev_cmd->hdr.cmd = REPLY_TX; 1277 dev_cmd->hdr.cmd = REPLY_TX;
1398 dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 1278 dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1399 INDEX_TO_SEQ(q->write_ptr))); 1279 INDEX_TO_SEQ(q->write_ptr)));
1400 1280
1401 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 1281 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1402 out_meta = &txq->meta[q->write_ptr]; 1282 out_meta = &txq->entries[q->write_ptr].meta;
1403 1283
1404 /* 1284 /*
1405 * Use the first empty entry in this queue's command buffer array 1285 * Use the first empty entry in this queue's command buffer array
@@ -1481,6 +1361,10 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1481 &dev_cmd->hdr, firstlen, 1361 &dev_cmd->hdr, firstlen,
1482 skb->data + hdr_len, secondlen); 1362 skb->data + hdr_len, secondlen);
1483 1363
1364 /* start timer if queue currently empty */
1365 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
1366 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1367
1484 /* Tell device the write index *just past* this latest filled TFD */ 1368 /* Tell device the write index *just past* this latest filled TFD */
1485 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 1369 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1486 iwl_txq_update_write_ptr(trans, txq); 1370 iwl_txq_update_write_ptr(trans, txq);
@@ -1541,8 +1425,10 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1541 1425
1542 iwl_apm_init(trans); 1426 iwl_apm_init(trans);
1543 1427
1544 hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) & 1428 /* From now on, the op_mode will be kept updated about RF kill state */
1545 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 1429 iwl_enable_rfkill_int(trans);
1430
1431 hw_rfkill = iwl_is_rfkill_set(trans);
1546 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 1432 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
1547 1433
1548 return err; 1434 return err;
@@ -1555,18 +1441,41 @@ error:
1555 return err; 1441 return err;
1556} 1442}
1557 1443
1558static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans) 1444static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
1445 bool op_mode_leaving)
1559{ 1446{
1447 bool hw_rfkill;
1448 unsigned long flags;
1449 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1450
1560 iwl_apm_stop(trans); 1451 iwl_apm_stop(trans);
1561 1452
1453 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1454 iwl_disable_interrupts(trans);
1455 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1456
1562 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1457 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1563 1458
1564 /* Even if we stop the HW, we still want the RF kill interrupt */ 1459 if (!op_mode_leaving) {
1565 iwl_enable_rfkill_int(trans); 1460 /*
1461 * Even if we stop the HW, we still want the RF kill
1462 * interrupt
1463 */
1464 iwl_enable_rfkill_int(trans);
1465
1466 /*
1467 * Check again since the RF kill state may have changed while
1468 * all the interrupts were disabled, in this case we couldn't
1469 * receive the RF kill interrupt and update the state in the
1470 * op_mode.
1471 */
1472 hw_rfkill = iwl_is_rfkill_set(trans);
1473 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
1474 }
1566} 1475}
1567 1476
1568static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, 1477static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1569 int txq_id, int ssn, struct sk_buff_head *skbs) 1478 struct sk_buff_head *skbs)
1570{ 1479{
1571 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1480 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1572 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; 1481 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
@@ -1576,35 +1485,15 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
1576 1485
1577 spin_lock(&txq->lock); 1486 spin_lock(&txq->lock);
1578 1487
1579 txq->time_stamp = jiffies;
1580
1581 if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
1582 tid != IWL_TID_NON_QOS &&
1583 txq_id != trans_pcie->agg_txq[sta_id][tid])) {
1584 /*
1585 * FIXME: this is a uCode bug which need to be addressed,
1586 * log the information and return for now.
1587 * Since it is can possibly happen very often and in order
1588 * not to fill the syslog, don't use IWL_ERR or IWL_WARN
1589 */
1590 IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, "
1591 "agg_txq[sta_id[tid] %d", txq_id,
1592 trans_pcie->agg_txq[sta_id][tid]);
1593 spin_unlock(&txq->lock);
1594 return 1;
1595 }
1596
1597 if (txq->q.read_ptr != tfd_num) { 1488 if (txq->q.read_ptr != tfd_num) {
1598 IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n", 1489 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1599 txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr, 1490 txq_id, txq->q.read_ptr, tfd_num, ssn);
1600 tfd_num, ssn);
1601 freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs); 1491 freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
1602 if (iwl_queue_space(&txq->q) > txq->q.low_mark) 1492 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
1603 iwl_wake_queue(trans, txq); 1493 iwl_wake_queue(trans, txq);
1604 } 1494 }
1605 1495
1606 spin_unlock(&txq->lock); 1496 spin_unlock(&txq->lock);
1607 return 0;
1608} 1497}
1609 1498
1610static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) 1499static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
@@ -1623,7 +1512,7 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1623} 1512}
1624 1513
1625static void iwl_trans_pcie_configure(struct iwl_trans *trans, 1514static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1626 const struct iwl_trans_config *trans_cfg) 1515 const struct iwl_trans_config *trans_cfg)
1627{ 1516{
1628 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1517 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1629 1518
@@ -1635,9 +1524,31 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1635 if (trans_pcie->n_no_reclaim_cmds) 1524 if (trans_pcie->n_no_reclaim_cmds)
1636 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, 1525 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1637 trans_pcie->n_no_reclaim_cmds * sizeof(u8)); 1526 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1527
1528 trans_pcie->n_q_to_fifo = trans_cfg->n_queue_to_fifo;
1529
1530 if (WARN_ON(trans_pcie->n_q_to_fifo > IWL_MAX_HW_QUEUES))
1531 trans_pcie->n_q_to_fifo = IWL_MAX_HW_QUEUES;
1532
1533 /* at least the command queue must be mapped */
1534 WARN_ON(!trans_pcie->n_q_to_fifo);
1535
1536 memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo,
1537 trans_pcie->n_q_to_fifo * sizeof(u8));
1538
1539 trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
1540 if (trans_pcie->rx_buf_size_8k)
1541 trans_pcie->rx_page_order = get_order(8 * 1024);
1542 else
1543 trans_pcie->rx_page_order = get_order(4 * 1024);
1544
1545 trans_pcie->wd_timeout =
1546 msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
1547
1548 trans_pcie->command_names = trans_cfg->command_names;
1638} 1549}
1639 1550
1640static void iwl_trans_pcie_free(struct iwl_trans *trans) 1551void iwl_trans_pcie_free(struct iwl_trans *trans)
1641{ 1552{
1642 struct iwl_trans_pcie *trans_pcie = 1553 struct iwl_trans_pcie *trans_pcie =
1643 IWL_TRANS_GET_PCIE_TRANS(trans); 1554 IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1656,10 +1567,19 @@ static void iwl_trans_pcie_free(struct iwl_trans *trans)
1656 pci_release_regions(trans_pcie->pci_dev); 1567 pci_release_regions(trans_pcie->pci_dev);
1657 pci_disable_device(trans_pcie->pci_dev); 1568 pci_disable_device(trans_pcie->pci_dev);
1658 1569
1659 trans->shrd->trans = NULL;
1660 kfree(trans); 1570 kfree(trans);
1661} 1571}
1662 1572
1573static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
1574{
1575 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1576
1577 if (state)
1578 set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
1579 else
1580 clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
1581}
1582
1663#ifdef CONFIG_PM_SLEEP 1583#ifdef CONFIG_PM_SLEEP
1664static int iwl_trans_pcie_suspend(struct iwl_trans *trans) 1584static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1665{ 1585{
@@ -1670,16 +1590,14 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1670{ 1590{
1671 bool hw_rfkill; 1591 bool hw_rfkill;
1672 1592
1673 hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) & 1593 iwl_enable_rfkill_int(trans);
1674 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
1675
1676 if (hw_rfkill)
1677 iwl_enable_rfkill_int(trans);
1678 else
1679 iwl_enable_interrupts(trans);
1680 1594
1595 hw_rfkill = iwl_is_rfkill_set(trans);
1681 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 1596 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
1682 1597
1598 if (!hw_rfkill)
1599 iwl_enable_interrupts(trans);
1600
1683 return 0; 1601 return 0;
1684} 1602}
1685#endif /* CONFIG_PM_SLEEP */ 1603#endif /* CONFIG_PM_SLEEP */
@@ -1696,7 +1614,7 @@ static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
1696 int ret = 0; 1614 int ret = 0;
1697 1615
1698 /* waiting for all the tx frames complete might take a while */ 1616 /* waiting for all the tx frames complete might take a while */
1699 for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) { 1617 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1700 if (cnt == trans_pcie->cmd_queue) 1618 if (cnt == trans_pcie->cmd_queue)
1701 continue; 1619 continue;
1702 txq = &trans_pcie->txq[cnt]; 1620 txq = &trans_pcie->txq[cnt];
@@ -1714,42 +1632,9 @@ static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
1714 return ret; 1632 return ret;
1715} 1633}
1716 1634
1717/*
1718 * On every watchdog tick we check (latest) time stamp. If it does not
1719 * change during timeout period and queue is not empty we reset firmware.
1720 */
1721static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
1722{
1723 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1724 struct iwl_tx_queue *txq = &trans_pcie->txq[cnt];
1725 struct iwl_queue *q = &txq->q;
1726 unsigned long timeout;
1727
1728 if (q->read_ptr == q->write_ptr) {
1729 txq->time_stamp = jiffies;
1730 return 0;
1731 }
1732
1733 timeout = txq->time_stamp +
1734 msecs_to_jiffies(hw_params(trans).wd_timeout);
1735
1736 if (time_after(jiffies, timeout)) {
1737 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id,
1738 hw_params(trans).wd_timeout);
1739 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
1740 q->read_ptr, q->write_ptr);
1741 IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
1742 iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt))
1743 & (TFD_QUEUE_SIZE_MAX - 1),
1744 iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
1745 return 1;
1746 }
1747
1748 return 0;
1749}
1750
1751static const char *get_fh_string(int cmd) 1635static const char *get_fh_string(int cmd)
1752{ 1636{
1637#define IWL_CMD(x) case x: return #x
1753 switch (cmd) { 1638 switch (cmd) {
1754 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); 1639 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1755 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); 1640 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
@@ -1763,6 +1648,7 @@ static const char *get_fh_string(int cmd)
1763 default: 1648 default:
1764 return "UNKNOWN"; 1649 return "UNKNOWN";
1765 } 1650 }
1651#undef IWL_CMD
1766} 1652}
1767 1653
1768int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) 1654int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
@@ -1811,6 +1697,7 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
1811 1697
1812static const char *get_csr_string(int cmd) 1698static const char *get_csr_string(int cmd)
1813{ 1699{
1700#define IWL_CMD(x) case x: return #x
1814 switch (cmd) { 1701 switch (cmd) {
1815 IWL_CMD(CSR_HW_IF_CONFIG_REG); 1702 IWL_CMD(CSR_HW_IF_CONFIG_REG);
1816 IWL_CMD(CSR_INT_COALESCING); 1703 IWL_CMD(CSR_INT_COALESCING);
@@ -1838,6 +1725,7 @@ static const char *get_csr_string(int cmd)
1838 default: 1725 default:
1839 return "UNKNOWN"; 1726 return "UNKNOWN";
1840 } 1727 }
1728#undef IWL_CMD
1841} 1729}
1842 1730
1843void iwl_dump_csr(struct iwl_trans *trans) 1731void iwl_dump_csr(struct iwl_trans *trans)
@@ -1938,32 +1826,23 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1938 int ret; 1826 int ret;
1939 size_t bufsz; 1827 size_t bufsz;
1940 1828
1941 bufsz = sizeof(char) * 64 * cfg(trans)->base_params->num_of_queues; 1829 bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
1942 1830
1943 if (!trans_pcie->txq) { 1831 if (!trans_pcie->txq)
1944 IWL_ERR(trans, "txq not ready\n");
1945 return -EAGAIN; 1832 return -EAGAIN;
1946 } 1833
1947 buf = kzalloc(bufsz, GFP_KERNEL); 1834 buf = kzalloc(bufsz, GFP_KERNEL);
1948 if (!buf) 1835 if (!buf)
1949 return -ENOMEM; 1836 return -ENOMEM;
1950 1837
1951 for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) { 1838 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1952 txq = &trans_pcie->txq[cnt]; 1839 txq = &trans_pcie->txq[cnt];
1953 q = &txq->q; 1840 q = &txq->q;
1954 pos += scnprintf(buf + pos, bufsz - pos, 1841 pos += scnprintf(buf + pos, bufsz - pos,
1955 "hwq %.2d: read=%u write=%u stop=%d" 1842 "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
1956 " swq_id=%#.2x (ac %d/hwq %d)\n",
1957 cnt, q->read_ptr, q->write_ptr, 1843 cnt, q->read_ptr, q->write_ptr,
1958 !!test_bit(cnt, trans_pcie->queue_stopped), 1844 !!test_bit(cnt, trans_pcie->queue_used),
1959 txq->swq_id, txq->swq_id & 3, 1845 !!test_bit(cnt, trans_pcie->queue_stopped));
1960 (txq->swq_id >> 2) & 0x1f);
1961 if (cnt >= 4)
1962 continue;
1963 /* for the ACs, display the stop count too */
1964 pos += scnprintf(buf + pos, bufsz - pos,
1965 " stop-count: %d\n",
1966 atomic_read(&trans_pcie->queue_stop_count[cnt]));
1967 } 1846 }
1968 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1847 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1969 kfree(buf); 1848 kfree(buf);
@@ -1997,44 +1876,6 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1997 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1876 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1998} 1877}
1999 1878
2000static ssize_t iwl_dbgfs_log_event_read(struct file *file,
2001 char __user *user_buf,
2002 size_t count, loff_t *ppos)
2003{
2004 struct iwl_trans *trans = file->private_data;
2005 char *buf;
2006 int pos = 0;
2007 ssize_t ret = -ENOMEM;
2008
2009 ret = pos = iwl_dump_nic_event_log(trans, true, &buf, true);
2010 if (buf) {
2011 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2012 kfree(buf);
2013 }
2014 return ret;
2015}
2016
2017static ssize_t iwl_dbgfs_log_event_write(struct file *file,
2018 const char __user *user_buf,
2019 size_t count, loff_t *ppos)
2020{
2021 struct iwl_trans *trans = file->private_data;
2022 u32 event_log_flag;
2023 char buf[8];
2024 int buf_size;
2025
2026 memset(buf, 0, sizeof(buf));
2027 buf_size = min(count, sizeof(buf) - 1);
2028 if (copy_from_user(buf, user_buf, buf_size))
2029 return -EFAULT;
2030 if (sscanf(buf, "%d", &event_log_flag) != 1)
2031 return -EFAULT;
2032 if (event_log_flag == 1)
2033 iwl_dump_nic_event_log(trans, true, NULL, false);
2034
2035 return count;
2036}
2037
2038static ssize_t iwl_dbgfs_interrupt_read(struct file *file, 1879static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
2039 char __user *user_buf, 1880 char __user *user_buf,
2040 size_t count, loff_t *ppos) { 1881 size_t count, loff_t *ppos) {
@@ -2050,10 +1891,8 @@ static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
2050 ssize_t ret; 1891 ssize_t ret;
2051 1892
2052 buf = kzalloc(bufsz, GFP_KERNEL); 1893 buf = kzalloc(bufsz, GFP_KERNEL);
2053 if (!buf) { 1894 if (!buf)
2054 IWL_ERR(trans, "Can not allocate Buffer\n");
2055 return -ENOMEM; 1895 return -ENOMEM;
2056 }
2057 1896
2058 pos += scnprintf(buf + pos, bufsz - pos, 1897 pos += scnprintf(buf + pos, bufsz - pos,
2059 "Interrupt Statistics Report:\n"); 1898 "Interrupt Statistics Report:\n");
@@ -2161,12 +2000,26 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2161 return ret; 2000 return ret;
2162} 2001}
2163 2002
2164DEBUGFS_READ_WRITE_FILE_OPS(log_event); 2003static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
2004 const char __user *user_buf,
2005 size_t count, loff_t *ppos)
2006{
2007 struct iwl_trans *trans = file->private_data;
2008
2009 if (!trans->op_mode)
2010 return -EAGAIN;
2011
2012 iwl_op_mode_nic_error(trans->op_mode);
2013
2014 return count;
2015}
2016
2165DEBUGFS_READ_WRITE_FILE_OPS(interrupt); 2017DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
2166DEBUGFS_READ_FILE_OPS(fh_reg); 2018DEBUGFS_READ_FILE_OPS(fh_reg);
2167DEBUGFS_READ_FILE_OPS(rx_queue); 2019DEBUGFS_READ_FILE_OPS(rx_queue);
2168DEBUGFS_READ_FILE_OPS(tx_queue); 2020DEBUGFS_READ_FILE_OPS(tx_queue);
2169DEBUGFS_WRITE_FILE_OPS(csr); 2021DEBUGFS_WRITE_FILE_OPS(csr);
2022DEBUGFS_WRITE_FILE_OPS(fw_restart);
2170 2023
2171/* 2024/*
2172 * Create the debugfs files and directories 2025 * Create the debugfs files and directories
@@ -2177,10 +2030,10 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2177{ 2030{
2178 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR); 2031 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
2179 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR); 2032 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
2180 DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
2181 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR); 2033 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
2182 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR); 2034 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
2183 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); 2035 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
2036 DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
2184 return 0; 2037 return 0;
2185} 2038}
2186#else 2039#else
@@ -2190,7 +2043,7 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2190 2043
2191#endif /*CONFIG_IWLWIFI_DEBUGFS */ 2044#endif /*CONFIG_IWLWIFI_DEBUGFS */
2192 2045
2193const struct iwl_trans_ops trans_ops_pcie = { 2046static const struct iwl_trans_ops trans_ops_pcie = {
2194 .start_hw = iwl_trans_pcie_start_hw, 2047 .start_hw = iwl_trans_pcie_start_hw,
2195 .stop_hw = iwl_trans_pcie_stop_hw, 2048 .stop_hw = iwl_trans_pcie_stop_hw,
2196 .fw_alive = iwl_trans_pcie_fw_alive, 2049 .fw_alive = iwl_trans_pcie_fw_alive,
@@ -2205,15 +2058,11 @@ const struct iwl_trans_ops trans_ops_pcie = {
2205 .reclaim = iwl_trans_pcie_reclaim, 2058 .reclaim = iwl_trans_pcie_reclaim,
2206 2059
2207 .tx_agg_disable = iwl_trans_pcie_tx_agg_disable, 2060 .tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
2208 .tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc,
2209 .tx_agg_setup = iwl_trans_pcie_tx_agg_setup, 2061 .tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
2210 2062
2211 .free = iwl_trans_pcie_free,
2212
2213 .dbgfs_register = iwl_trans_pcie_dbgfs_register, 2063 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
2214 2064
2215 .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty, 2065 .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
2216 .check_stuck_queue = iwl_trans_pcie_check_stuck_queue,
2217 2066
2218#ifdef CONFIG_PM_SLEEP 2067#ifdef CONFIG_PM_SLEEP
2219 .suspend = iwl_trans_pcie_suspend, 2068 .suspend = iwl_trans_pcie_suspend,
@@ -2223,11 +2072,12 @@ const struct iwl_trans_ops trans_ops_pcie = {
2223 .write32 = iwl_trans_pcie_write32, 2072 .write32 = iwl_trans_pcie_write32,
2224 .read32 = iwl_trans_pcie_read32, 2073 .read32 = iwl_trans_pcie_read32,
2225 .configure = iwl_trans_pcie_configure, 2074 .configure = iwl_trans_pcie_configure,
2075 .set_pmi = iwl_trans_pcie_set_pmi,
2226}; 2076};
2227 2077
2228struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd, 2078struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2229 struct pci_dev *pdev, 2079 const struct pci_device_id *ent,
2230 const struct pci_device_id *ent) 2080 const struct iwl_cfg *cfg)
2231{ 2081{
2232 struct iwl_trans_pcie *trans_pcie; 2082 struct iwl_trans_pcie *trans_pcie;
2233 struct iwl_trans *trans; 2083 struct iwl_trans *trans;
@@ -2243,7 +2093,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
2243 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2093 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2244 2094
2245 trans->ops = &trans_ops_pcie; 2095 trans->ops = &trans_ops_pcie;
2246 trans->shrd = shrd; 2096 trans->cfg = cfg;
2247 trans_pcie->trans = trans; 2097 trans_pcie->trans = trans;
2248 spin_lock_init(&trans_pcie->irq_lock); 2098 spin_lock_init(&trans_pcie->irq_lock);
2249 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 2099 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
@@ -2325,6 +2175,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
2325 2175
2326 /* Initialize the wait queue for commands */ 2176 /* Initialize the wait queue for commands */
2327 init_waitqueue_head(&trans->wait_command_queue); 2177 init_waitqueue_head(&trans->wait_command_queue);
2178 spin_lock_init(&trans->reg_lock);
2328 2179
2329 return trans; 2180 return trans;
2330 2181
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 0c81cbaa8088..79a1e7ae4995 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -66,8 +66,9 @@
66#include <linux/ieee80211.h> 66#include <linux/ieee80211.h>
67#include <linux/mm.h> /* for page_address */ 67#include <linux/mm.h> /* for page_address */
68 68
69#include "iwl-shared.h"
70#include "iwl-debug.h" 69#include "iwl-debug.h"
70#include "iwl-config.h"
71#include "iwl-fw.h"
71 72
72/** 73/**
73 * DOC: Transport layer - what is it ? 74 * DOC: Transport layer - what is it ?
@@ -104,13 +105,6 @@
104 * 6) Eventually, the free function will be called. 105 * 6) Eventually, the free function will be called.
105 */ 106 */
106 107
107struct iwl_priv;
108struct iwl_shared;
109struct iwl_op_mode;
110struct fw_img;
111struct sk_buff;
112struct dentry;
113
114/** 108/**
115 * DOC: Host command section 109 * DOC: Host command section
116 * 110 *
@@ -162,6 +156,8 @@ struct iwl_cmd_header {
162 156
163 157
164#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */ 158#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
159#define FH_RSCSR_FRAME_INVALID 0x55550000
160#define FH_RSCSR_FRAME_ALIGN 0x40
165 161
166struct iwl_rx_packet { 162struct iwl_rx_packet {
167 /* 163 /*
@@ -260,27 +256,43 @@ static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
260 256
261struct iwl_rx_cmd_buffer { 257struct iwl_rx_cmd_buffer {
262 struct page *_page; 258 struct page *_page;
259 int _offset;
260 bool _page_stolen;
261 unsigned int truesize;
263}; 262};
264 263
265static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r) 264static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
266{ 265{
267 return page_address(r->_page); 266 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
267}
268
269static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
270{
271 return r->_offset;
268} 272}
269 273
270static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r) 274static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
271{ 275{
272 struct page *p = r->_page; 276 r->_page_stolen = true;
273 r->_page = NULL; 277 get_page(r->_page);
274 return p; 278 return r->_page;
275} 279}
276 280
277#define MAX_NO_RECLAIM_CMDS 6 281#define MAX_NO_RECLAIM_CMDS 6
278 282
283/*
284 * Maximum number of HW queues the transport layer
285 * currently supports
286 */
287#define IWL_MAX_HW_QUEUES 32
288
279/** 289/**
280 * struct iwl_trans_config - transport configuration 290 * struct iwl_trans_config - transport configuration
281 * 291 *
282 * @op_mode: pointer to the upper layer. 292 * @op_mode: pointer to the upper layer.
283 * Must be set before any other call. 293 * @queue_to_fifo: queue to FIFO mapping to set up by
294 * default
295 * @n_queue_to_fifo: number of queues to set up
284 * @cmd_queue: the index of the command queue. 296 * @cmd_queue: the index of the command queue.
285 * Must be set before start_fw. 297 * Must be set before start_fw.
286 * @no_reclaim_cmds: Some devices erroneously don't set the 298 * @no_reclaim_cmds: Some devices erroneously don't set the
@@ -288,14 +300,29 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
288 * list of such notifications to filter. Max length is 300 * list of such notifications to filter. Max length is
289 * %MAX_NO_RECLAIM_CMDS. 301 * %MAX_NO_RECLAIM_CMDS.
290 * @n_no_reclaim_cmds: # of commands in list 302 * @n_no_reclaim_cmds: # of commands in list
303 * @rx_buf_size_8k: 8 kB RX buffer size needed for A-MSDUs,
304 * if unset 4k will be the RX buffer size
305 * @queue_watchdog_timeout: time (in ms) after which queues
306 * are considered stuck and will trigger device restart
307 * @command_names: array of command names, must be 256 entries
308 * (one for each command); for debugging only
291 */ 309 */
292struct iwl_trans_config { 310struct iwl_trans_config {
293 struct iwl_op_mode *op_mode; 311 struct iwl_op_mode *op_mode;
312 const u8 *queue_to_fifo;
313 u8 n_queue_to_fifo;
314
294 u8 cmd_queue; 315 u8 cmd_queue;
295 const u8 *no_reclaim_cmds; 316 const u8 *no_reclaim_cmds;
296 int n_no_reclaim_cmds; 317 int n_no_reclaim_cmds;
318
319 bool rx_buf_size_8k;
320 unsigned int queue_watchdog_timeout;
321 const char **command_names;
297}; 322};
298 323
324struct iwl_trans;
325
299/** 326/**
300 * struct iwl_trans_ops - transport specific operations 327 * struct iwl_trans_ops - transport specific operations
301 * 328 *
@@ -304,7 +331,8 @@ struct iwl_trans_config {
304 * @start_hw: starts the HW- from that point on, the HW can send interrupts 331 * @start_hw: starts the HW- from that point on, the HW can send interrupts
305 * May sleep 332 * May sleep
306 * @stop_hw: stops the HW- from that point on, the HW will be in low power but 333 * @stop_hw: stops the HW- from that point on, the HW will be in low power but
307 * will still issue interrupt if the HW RF kill is triggered. 334 * will still issue interrupt if the HW RF kill is triggered unless
335 * op_mode_leaving is true.
308 * May sleep 336 * May sleep
309 * @start_fw: allocates and inits all the resources for the transport 337 * @start_fw: allocates and inits all the resources for the transport
310 * layer. Also kick a fw image. 338 * layer. Also kick a fw image.
@@ -322,18 +350,11 @@ struct iwl_trans_config {
322 * Must be atomic 350 * Must be atomic
323 * @reclaim: free packet until ssn. Returns a list of freed packets. 351 * @reclaim: free packet until ssn. Returns a list of freed packets.
324 * Must be atomic 352 * Must be atomic
325 * @tx_agg_alloc: allocate resources for a TX BA session
326 * Must be atomic
327 * @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is 353 * @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
328 * ready and a successful ADDBA response has been received. 354 * ready and a successful ADDBA response has been received.
329 * May sleep 355 * May sleep
330 * @tx_agg_disable: de-configure a Tx queue to send AMPDUs 356 * @tx_agg_disable: de-configure a Tx queue to send AMPDUs
331 * Must be atomic 357 * Must be atomic
332 * @free: release all the ressource for the transport layer itself such as
333 * irq, tasklet etc... From this point on, the device may not issue
334 * any interrupt (incl. RFKILL).
335 * May sleep
336 * @check_stuck_queue: check if a specific queue is stuck
337 * @wait_tx_queue_empty: wait until all tx queues are empty 358 * @wait_tx_queue_empty: wait until all tx queues are empty
338 * May sleep 359 * May sleep
339 * @dbgfs_register: add the dbgfs files under this directory. Files will be 360 * @dbgfs_register: add the dbgfs files under this directory. Files will be
@@ -346,11 +367,12 @@ struct iwl_trans_config {
346 * @configure: configure parameters required by the transport layer from 367 * @configure: configure parameters required by the transport layer from
347 * the op_mode. May be called several times before start_fw, can't be 368 * the op_mode. May be called several times before start_fw, can't be
348 * called after that. 369 * called after that.
370 * @set_pmi: set the power pmi state
349 */ 371 */
350struct iwl_trans_ops { 372struct iwl_trans_ops {
351 373
352 int (*start_hw)(struct iwl_trans *iwl_trans); 374 int (*start_hw)(struct iwl_trans *iwl_trans);
353 void (*stop_hw)(struct iwl_trans *iwl_trans); 375 void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving);
354 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw); 376 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw);
355 void (*fw_alive)(struct iwl_trans *trans); 377 void (*fw_alive)(struct iwl_trans *trans);
356 void (*stop_device)(struct iwl_trans *trans); 378 void (*stop_device)(struct iwl_trans *trans);
@@ -360,23 +382,15 @@ struct iwl_trans_ops {
360 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 382 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
361 383
362 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, 384 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
363 struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, 385 struct iwl_device_cmd *dev_cmd, int queue);
364 u8 sta_id, u8 tid); 386 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
365 int (*reclaim)(struct iwl_trans *trans, int sta_id, int tid, 387 struct sk_buff_head *skbs);
366 int txq_id, int ssn, struct sk_buff_head *skbs);
367 388
368 int (*tx_agg_disable)(struct iwl_trans *trans, 389 void (*tx_agg_setup)(struct iwl_trans *trans, int queue, int fifo,
369 int sta_id, int tid); 390 int sta_id, int tid, int frame_limit, u16 ssn);
370 int (*tx_agg_alloc)(struct iwl_trans *trans, 391 void (*tx_agg_disable)(struct iwl_trans *trans, int queue);
371 int sta_id, int tid);
372 void (*tx_agg_setup)(struct iwl_trans *trans,
373 enum iwl_rxon_context_id ctx, int sta_id, int tid,
374 int frame_limit, u16 ssn);
375
376 void (*free)(struct iwl_trans *trans);
377 392
378 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); 393 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
379 int (*check_stuck_queue)(struct iwl_trans *trans, int q);
380 int (*wait_tx_queue_empty)(struct iwl_trans *trans); 394 int (*wait_tx_queue_empty)(struct iwl_trans *trans);
381#ifdef CONFIG_PM_SLEEP 395#ifdef CONFIG_PM_SLEEP
382 int (*suspend)(struct iwl_trans *trans); 396 int (*suspend)(struct iwl_trans *trans);
@@ -387,6 +401,7 @@ struct iwl_trans_ops {
387 u32 (*read32)(struct iwl_trans *trans, u32 ofs); 401 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
388 void (*configure)(struct iwl_trans *trans, 402 void (*configure)(struct iwl_trans *trans,
389 const struct iwl_trans_config *trans_cfg); 403 const struct iwl_trans_config *trans_cfg);
404 void (*set_pmi)(struct iwl_trans *trans, bool state);
390}; 405};
391 406
392/** 407/**
@@ -405,20 +420,19 @@ enum iwl_trans_state {
405 * 420 *
406 * @ops - pointer to iwl_trans_ops 421 * @ops - pointer to iwl_trans_ops
407 * @op_mode - pointer to the op_mode 422 * @op_mode - pointer to the op_mode
408 * @shrd - pointer to iwl_shared which holds shared data from the upper layer 423 * @cfg - pointer to the configuration
409 * @reg_lock - protect hw register access 424 * @reg_lock - protect hw register access
410 * @dev - pointer to struct device * that represents the device 425 * @dev - pointer to struct device * that represents the device
411 * @hw_id: a u32 with the ID of the device / subdevice. 426 * @hw_id: a u32 with the ID of the device / subdevice.
412 * Set during transport allocation. 427 * Set during transport allocation.
413 * @hw_id_str: a string with info about HW ID. Set during transport allocation. 428 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
414 * @nvm_device_type: indicates OTP or eeprom
415 * @pm_support: set to true in start_hw if link pm is supported 429 * @pm_support: set to true in start_hw if link pm is supported
416 * @wait_command_queue: the wait_queue for SYNC host commands 430 * @wait_command_queue: the wait_queue for SYNC host commands
417 */ 431 */
418struct iwl_trans { 432struct iwl_trans {
419 const struct iwl_trans_ops *ops; 433 const struct iwl_trans_ops *ops;
420 struct iwl_op_mode *op_mode; 434 struct iwl_op_mode *op_mode;
421 struct iwl_shared *shrd; 435 const struct iwl_cfg *cfg;
422 enum iwl_trans_state state; 436 enum iwl_trans_state state;
423 spinlock_t reg_lock; 437 spinlock_t reg_lock;
424 438
@@ -427,7 +441,6 @@ struct iwl_trans {
427 u32 hw_id; 441 u32 hw_id;
428 char hw_id_str[52]; 442 char hw_id_str[52];
429 443
430 int nvm_device_type;
431 bool pm_support; 444 bool pm_support;
432 445
433 wait_queue_head_t wait_command_queue; 446 wait_queue_head_t wait_command_queue;
@@ -456,11 +469,12 @@ static inline int iwl_trans_start_hw(struct iwl_trans *trans)
456 return trans->ops->start_hw(trans); 469 return trans->ops->start_hw(trans);
457} 470}
458 471
459static inline void iwl_trans_stop_hw(struct iwl_trans *trans) 472static inline void iwl_trans_stop_hw(struct iwl_trans *trans,
473 bool op_mode_leaving)
460{ 474{
461 might_sleep(); 475 might_sleep();
462 476
463 trans->ops->stop_hw(trans); 477 trans->ops->stop_hw(trans, op_mode_leaving);
464 478
465 trans->state = IWL_TRANS_NO_FW; 479 trans->state = IWL_TRANS_NO_FW;
466} 480}
@@ -507,60 +521,42 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
507} 521}
508 522
509static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, 523static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
510 struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, 524 struct iwl_device_cmd *dev_cmd, int queue)
511 u8 sta_id, u8 tid)
512{
513 if (trans->state != IWL_TRANS_FW_ALIVE)
514 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
515
516 return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id, tid);
517}
518
519static inline int iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
520 int tid, int txq_id, int ssn,
521 struct sk_buff_head *skbs)
522{ 525{
523 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 526 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
524 "%s bad state = %d", __func__, trans->state); 527 "%s bad state = %d", __func__, trans->state);
525 528
526 return trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, skbs); 529 return trans->ops->tx(trans, skb, dev_cmd, queue);
527} 530}
528 531
529static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans, 532static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
530 int sta_id, int tid) 533 int ssn, struct sk_buff_head *skbs)
531{ 534{
532 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 535 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
533 "%s bad state = %d", __func__, trans->state); 536 "%s bad state = %d", __func__, trans->state);
534 537
535 return trans->ops->tx_agg_disable(trans, sta_id, tid); 538 trans->ops->reclaim(trans, queue, ssn, skbs);
536} 539}
537 540
538static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans, 541static inline void iwl_trans_tx_agg_disable(struct iwl_trans *trans, int queue)
539 int sta_id, int tid)
540{ 542{
541 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 543 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
542 "%s bad state = %d", __func__, trans->state); 544 "%s bad state = %d", __func__, trans->state);
543 545
544 return trans->ops->tx_agg_alloc(trans, sta_id, tid); 546 trans->ops->tx_agg_disable(trans, queue);
545} 547}
546 548
547 549static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans, int queue,
548static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans, 550 int fifo, int sta_id, int tid,
549 enum iwl_rxon_context_id ctx, 551 int frame_limit, u16 ssn)
550 int sta_id, int tid,
551 int frame_limit, u16 ssn)
552{ 552{
553 might_sleep(); 553 might_sleep();
554 554
555 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 555 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
556 "%s bad state = %d", __func__, trans->state); 556 "%s bad state = %d", __func__, trans->state);
557 557
558 trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit, ssn); 558 trans->ops->tx_agg_setup(trans, queue, fifo, sta_id, tid,
559} 559 frame_limit, ssn);
560
561static inline void iwl_trans_free(struct iwl_trans *trans)
562{
563 trans->ops->free(trans);
564} 560}
565 561
566static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans) 562static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
@@ -571,13 +567,6 @@ static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
571 return trans->ops->wait_tx_queue_empty(trans); 567 return trans->ops->wait_tx_queue_empty(trans);
572} 568}
573 569
574static inline int iwl_trans_check_stuck_queue(struct iwl_trans *trans, int q)
575{
576 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
577 "%s bad state = %d", __func__, trans->state);
578
579 return trans->ops->check_stuck_queue(trans, q);
580}
581static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans, 570static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
582 struct dentry *dir) 571 struct dentry *dir)
583{ 572{
@@ -611,20 +600,15 @@ static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
611 return trans->ops->read32(trans, ofs); 600 return trans->ops->read32(trans, ofs);
612} 601}
613 602
603static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
604{
605 trans->ops->set_pmi(trans, state);
606}
607
614/***************************************************** 608/*****************************************************
615* Transport layers implementations + their allocation function 609* driver (transport) register/unregister functions
616******************************************************/ 610******************************************************/
617struct pci_dev;
618struct pci_device_id;
619extern const struct iwl_trans_ops trans_ops_pcie;
620struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
621 struct pci_dev *pdev,
622 const struct pci_device_id *ent);
623int __must_check iwl_pci_register_driver(void); 611int __must_check iwl_pci_register_driver(void);
624void iwl_pci_unregister_driver(void); 612void iwl_pci_unregister_driver(void);
625 613
626extern const struct iwl_trans_ops trans_ops_idi;
627struct iwl_trans *iwl_trans_idi_alloc(struct iwl_shared *shrd,
628 void *pdev_void,
629 const void *ent_void);
630#endif /* __iwl_trans_h__ */ 614#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-ucode.c b/drivers/net/wireless/iwlwifi/iwl-ucode.c
index 252828728837..bc40dc68b0f4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-ucode.c
@@ -31,7 +31,6 @@
31#include <linux/init.h> 31#include <linux/init.h>
32 32
33#include "iwl-dev.h" 33#include "iwl-dev.h"
34#include "iwl-core.h"
35#include "iwl-io.h" 34#include "iwl-io.h"
36#include "iwl-agn-hw.h" 35#include "iwl-agn-hw.h"
37#include "iwl-agn.h" 36#include "iwl-agn.h"
@@ -40,37 +39,6 @@
40#include "iwl-fh.h" 39#include "iwl-fh.h"
41#include "iwl-op-mode.h" 40#include "iwl-op-mode.h"
42 41
43static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
44 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
45 0, COEX_UNASSOC_IDLE_FLAGS},
46 {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
47 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
48 {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
49 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
50 {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
51 0, COEX_CALIBRATION_FLAGS},
52 {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
53 0, COEX_PERIODIC_CALIBRATION_FLAGS},
54 {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
55 0, COEX_CONNECTION_ESTAB_FLAGS},
56 {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
57 0, COEX_ASSOCIATED_IDLE_FLAGS},
58 {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
59 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
60 {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
61 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
62 {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
63 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
64 {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
65 {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
66 {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
67 0, COEX_STAND_ALONE_DEBUG_FLAGS},
68 {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
69 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
70 {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
71 {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
72};
73
74/****************************************************************************** 42/******************************************************************************
75 * 43 *
76 * uCode download functions 44 * uCode download functions
@@ -93,7 +61,7 @@ static int iwl_set_Xtal_calib(struct iwl_priv *priv)
93{ 61{
94 struct iwl_calib_xtal_freq_cmd cmd; 62 struct iwl_calib_xtal_freq_cmd cmd;
95 __le16 *xtal_calib = 63 __le16 *xtal_calib =
96 (__le16 *)iwl_eeprom_query_addr(priv->shrd, EEPROM_XTAL); 64 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
97 65
98 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD); 66 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
99 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]); 67 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
@@ -105,8 +73,7 @@ static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
105{ 73{
106 struct iwl_calib_temperature_offset_cmd cmd; 74 struct iwl_calib_temperature_offset_cmd cmd;
107 __le16 *offset_calib = 75 __le16 *offset_calib =
108 (__le16 *)iwl_eeprom_query_addr(priv->shrd, 76 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
109 EEPROM_RAW_TEMPERATURE);
110 77
111 memset(&cmd, 0, sizeof(cmd)); 78 memset(&cmd, 0, sizeof(cmd));
112 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); 79 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
@@ -122,16 +89,15 @@ static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
122static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv) 89static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv)
123{ 90{
124 struct iwl_calib_temperature_offset_v2_cmd cmd; 91 struct iwl_calib_temperature_offset_v2_cmd cmd;
125 __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv->shrd, 92 __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv,
126 EEPROM_KELVIN_TEMPERATURE); 93 EEPROM_KELVIN_TEMPERATURE);
127 __le16 *offset_calib_low = 94 __le16 *offset_calib_low =
128 (__le16 *)iwl_eeprom_query_addr(priv->shrd, 95 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
129 EEPROM_RAW_TEMPERATURE);
130 struct iwl_eeprom_calib_hdr *hdr; 96 struct iwl_eeprom_calib_hdr *hdr;
131 97
132 memset(&cmd, 0, sizeof(cmd)); 98 memset(&cmd, 0, sizeof(cmd));
133 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); 99 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
134 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv->shrd, 100 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
135 EEPROM_CALIB_ALL); 101 EEPROM_CALIB_ALL);
136 memcpy(&cmd.radio_sensor_offset_high, offset_calib_high, 102 memcpy(&cmd.radio_sensor_offset_high, offset_calib_high,
137 sizeof(*offset_calib_high)); 103 sizeof(*offset_calib_high));
@@ -174,30 +140,12 @@ static int iwl_send_calib_cfg(struct iwl_priv *priv)
174 return iwl_dvm_send_cmd(priv, &cmd); 140 return iwl_dvm_send_cmd(priv, &cmd);
175} 141}
176 142
177int iwlagn_rx_calib_result(struct iwl_priv *priv,
178 struct iwl_rx_cmd_buffer *rxb,
179 struct iwl_device_cmd *cmd)
180{
181 struct iwl_rx_packet *pkt = rxb_addr(rxb);
182 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->data;
183 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
184
185 /* reduce the size of the length field itself */
186 len -= 4;
187
188 if (iwl_calib_set(priv, hdr, len))
189 IWL_ERR(priv, "Failed to record calibration data %d\n",
190 hdr->op_code);
191
192 return 0;
193}
194
195int iwl_init_alive_start(struct iwl_priv *priv) 143int iwl_init_alive_start(struct iwl_priv *priv)
196{ 144{
197 int ret; 145 int ret;
198 146
199 if (cfg(priv)->bt_params && 147 if (priv->cfg->bt_params &&
200 cfg(priv)->bt_params->advanced_bt_coexist) { 148 priv->cfg->bt_params->advanced_bt_coexist) {
201 /* 149 /*
202 * Tell uCode we are ready to perform calibration 150 * Tell uCode we are ready to perform calibration
203 * need to perform this before any calibration 151 * need to perform this before any calibration
@@ -219,8 +167,8 @@ int iwl_init_alive_start(struct iwl_priv *priv)
219 * temperature offset calibration is only needed for runtime ucode, 167 * temperature offset calibration is only needed for runtime ucode,
220 * so prepare the value now. 168 * so prepare the value now.
221 */ 169 */
222 if (cfg(priv)->need_temp_offset_calib) { 170 if (priv->cfg->need_temp_offset_calib) {
223 if (cfg(priv)->temp_offset_v2) 171 if (priv->cfg->temp_offset_v2)
224 return iwl_set_temperature_offset_calib_v2(priv); 172 return iwl_set_temperature_offset_calib_v2(priv);
225 else 173 else
226 return iwl_set_temperature_offset_calib(priv); 174 return iwl_set_temperature_offset_calib(priv);
@@ -229,29 +177,13 @@ int iwl_init_alive_start(struct iwl_priv *priv)
229 return 0; 177 return 0;
230} 178}
231 179
232static int iwl_send_wimax_coex(struct iwl_priv *priv) 180int iwl_send_wimax_coex(struct iwl_priv *priv)
233{ 181{
234 struct iwl_wimax_coex_cmd coex_cmd; 182 struct iwl_wimax_coex_cmd coex_cmd;
235 183
236 if (cfg(priv)->base_params->support_wimax_coexist) { 184 /* coexistence is disabled */
237 /* UnMask wake up src at associated sleep */ 185 memset(&coex_cmd, 0, sizeof(coex_cmd));
238 coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
239 186
240 /* UnMask wake up src at unassociated sleep */
241 coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
242 memcpy(coex_cmd.sta_prio, cu_priorities,
243 sizeof(struct iwl_wimax_coex_event_entry) *
244 COEX_NUM_OF_EVENTS);
245
246 /* enabling the coexistence feature */
247 coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
248
249 /* enabling the priorities tables */
250 coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
251 } else {
252 /* coexistence is disabled */
253 memset(&coex_cmd, 0, sizeof(coex_cmd));
254 }
255 return iwl_dvm_send_cmd_pdu(priv, 187 return iwl_dvm_send_cmd_pdu(priv,
256 COEX_PRIORITY_TABLE_CMD, CMD_SYNC, 188 COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
257 sizeof(coex_cmd), &coex_cmd); 189 sizeof(coex_cmd), &coex_cmd);
@@ -311,7 +243,7 @@ static int iwl_alive_notify(struct iwl_priv *priv)
311{ 243{
312 int ret; 244 int ret;
313 245
314 iwl_trans_fw_alive(trans(priv)); 246 iwl_trans_fw_alive(priv->trans);
315 247
316 priv->passive_no_rx = false; 248 priv->passive_no_rx = false;
317 priv->transport_queue_stop = 0; 249 priv->transport_queue_stop = 0;
@@ -320,7 +252,7 @@ static int iwl_alive_notify(struct iwl_priv *priv)
320 if (ret) 252 if (ret)
321 return ret; 253 return ret;
322 254
323 if (!cfg(priv)->no_xtal_calib) { 255 if (!priv->cfg->no_xtal_calib) {
324 ret = iwl_set_Xtal_calib(priv); 256 ret = iwl_set_Xtal_calib(priv);
325 if (ret) 257 if (ret)
326 return ret; 258 return ret;
@@ -349,9 +281,9 @@ static int iwl_verify_sec_sparse(struct iwl_priv *priv,
349 /* read data comes through single port, auto-incr addr */ 281 /* read data comes through single port, auto-incr addr */
350 /* NOTE: Use the debugless read so we don't flood kernel log 282 /* NOTE: Use the debugless read so we don't flood kernel log
351 * if IWL_DL_IO is set */ 283 * if IWL_DL_IO is set */
352 iwl_write_direct32(trans(priv), HBUS_TARG_MEM_RADDR, 284 iwl_write_direct32(priv->trans, HBUS_TARG_MEM_RADDR,
353 i + fw_desc->offset); 285 i + fw_desc->offset);
354 val = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT); 286 val = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
355 if (val != le32_to_cpu(*image)) 287 if (val != le32_to_cpu(*image))
356 return -EIO; 288 return -EIO;
357 } 289 }
@@ -370,14 +302,14 @@ static void iwl_print_mismatch_sec(struct iwl_priv *priv,
370 302
371 IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len); 303 IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
372 304
373 iwl_write_direct32(trans(priv), HBUS_TARG_MEM_RADDR, 305 iwl_write_direct32(priv->trans, HBUS_TARG_MEM_RADDR,
374 fw_desc->offset); 306 fw_desc->offset);
375 307
376 for (offs = 0; 308 for (offs = 0;
377 offs < len && errors < 20; 309 offs < len && errors < 20;
378 offs += sizeof(u32), image++) { 310 offs += sizeof(u32), image++) {
379 /* read data comes through single port, auto-incr addr */ 311 /* read data comes through single port, auto-incr addr */
380 val = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT); 312 val = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
381 if (val != le32_to_cpu(*image)) { 313 if (val != le32_to_cpu(*image)) {
382 IWL_ERR(priv, "uCode INST section at " 314 IWL_ERR(priv, "uCode INST section at "
383 "offset 0x%x, is 0x%x, s/b 0x%x\n", 315 "offset 0x%x, is 0x%x, s/b 0x%x\n",
@@ -417,9 +349,8 @@ struct iwl_alive_data {
417 u8 subtype; 349 u8 subtype;
418}; 350};
419 351
420static void iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, 352static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
421 struct iwl_rx_packet *pkt, 353 struct iwl_rx_packet *pkt, void *data)
422 void *data)
423{ 354{
424 struct iwl_priv *priv = 355 struct iwl_priv *priv =
425 container_of(notif_wait, struct iwl_priv, notif_wait); 356 container_of(notif_wait, struct iwl_priv, notif_wait);
@@ -433,13 +364,15 @@ static void iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
433 palive->is_valid, palive->ver_type, 364 palive->is_valid, palive->ver_type,
434 palive->ver_subtype); 365 palive->ver_subtype);
435 366
436 priv->shrd->device_pointers.error_event_table = 367 priv->device_pointers.error_event_table =
437 le32_to_cpu(palive->error_event_table_ptr); 368 le32_to_cpu(palive->error_event_table_ptr);
438 priv->shrd->device_pointers.log_event_table = 369 priv->device_pointers.log_event_table =
439 le32_to_cpu(palive->log_event_table_ptr); 370 le32_to_cpu(palive->log_event_table_ptr);
440 371
441 alive_data->subtype = palive->ver_subtype; 372 alive_data->subtype = palive->ver_subtype;
442 alive_data->valid = palive->is_valid == UCODE_VALID_OK; 373 alive_data->valid = palive->is_valid == UCODE_VALID_OK;
374
375 return true;
443} 376}
444 377
445#define UCODE_ALIVE_TIMEOUT HZ 378#define UCODE_ALIVE_TIMEOUT HZ
@@ -453,9 +386,10 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
453 const struct fw_img *fw; 386 const struct fw_img *fw;
454 int ret; 387 int ret;
455 enum iwl_ucode_type old_type; 388 enum iwl_ucode_type old_type;
389 static const u8 alive_cmd[] = { REPLY_ALIVE };
456 390
457 old_type = priv->shrd->ucode_type; 391 old_type = priv->cur_ucode;
458 priv->shrd->ucode_type = ucode_type; 392 priv->cur_ucode = ucode_type;
459 fw = iwl_get_ucode_image(priv, ucode_type); 393 fw = iwl_get_ucode_image(priv, ucode_type);
460 394
461 priv->ucode_loaded = false; 395 priv->ucode_loaded = false;
@@ -463,12 +397,13 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
463 if (!fw) 397 if (!fw)
464 return -EINVAL; 398 return -EINVAL;
465 399
466 iwl_init_notification_wait(&priv->notif_wait, &alive_wait, REPLY_ALIVE, 400 iwl_init_notification_wait(&priv->notif_wait, &alive_wait,
467 iwl_alive_fn, &alive_data); 401 alive_cmd, ARRAY_SIZE(alive_cmd),
402 iwl_alive_fn, &alive_data);
468 403
469 ret = iwl_trans_start_fw(trans(priv), fw); 404 ret = iwl_trans_start_fw(priv->trans, fw);
470 if (ret) { 405 if (ret) {
471 priv->shrd->ucode_type = old_type; 406 priv->cur_ucode = old_type;
472 iwl_remove_notification(&priv->notif_wait, &alive_wait); 407 iwl_remove_notification(&priv->notif_wait, &alive_wait);
473 return ret; 408 return ret;
474 } 409 }
@@ -480,13 +415,13 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
480 ret = iwl_wait_notification(&priv->notif_wait, &alive_wait, 415 ret = iwl_wait_notification(&priv->notif_wait, &alive_wait,
481 UCODE_ALIVE_TIMEOUT); 416 UCODE_ALIVE_TIMEOUT);
482 if (ret) { 417 if (ret) {
483 priv->shrd->ucode_type = old_type; 418 priv->cur_ucode = old_type;
484 return ret; 419 return ret;
485 } 420 }
486 421
487 if (!alive_data.valid) { 422 if (!alive_data.valid) {
488 IWL_ERR(priv, "Loaded ucode is not valid!\n"); 423 IWL_ERR(priv, "Loaded ucode is not valid!\n");
489 priv->shrd->ucode_type = old_type; 424 priv->cur_ucode = old_type;
490 return -EIO; 425 return -EIO;
491 } 426 }
492 427
@@ -498,7 +433,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
498 if (ucode_type != IWL_UCODE_WOWLAN) { 433 if (ucode_type != IWL_UCODE_WOWLAN) {
499 ret = iwl_verify_ucode(priv, ucode_type); 434 ret = iwl_verify_ucode(priv, ucode_type);
500 if (ret) { 435 if (ret) {
501 priv->shrd->ucode_type = old_type; 436 priv->cur_ucode = old_type;
502 return ret; 437 return ret;
503 } 438 }
504 439
@@ -510,7 +445,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
510 if (ret) { 445 if (ret) {
511 IWL_WARN(priv, 446 IWL_WARN(priv,
512 "Could not complete ALIVE transition: %d\n", ret); 447 "Could not complete ALIVE transition: %d\n", ret);
513 priv->shrd->ucode_type = old_type; 448 priv->cur_ucode = old_type;
514 return ret; 449 return ret;
515 } 450 }
516 451
@@ -519,9 +454,38 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
519 return 0; 454 return 0;
520} 455}
521 456
457static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
458 struct iwl_rx_packet *pkt, void *data)
459{
460 struct iwl_priv *priv = data;
461 struct iwl_calib_hdr *hdr;
462 int len;
463
464 if (pkt->hdr.cmd != CALIBRATION_RES_NOTIFICATION) {
465 WARN_ON(pkt->hdr.cmd != CALIBRATION_COMPLETE_NOTIFICATION);
466 return true;
467 }
468
469 hdr = (struct iwl_calib_hdr *)pkt->data;
470 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
471
472 /* reduce the size by the length field itself */
473 len -= sizeof(__le32);
474
475 if (iwl_calib_set(priv, hdr, len))
476 IWL_ERR(priv, "Failed to record calibration data %d\n",
477 hdr->op_code);
478
479 return false;
480}
481
522int iwl_run_init_ucode(struct iwl_priv *priv) 482int iwl_run_init_ucode(struct iwl_priv *priv)
523{ 483{
524 struct iwl_notification_wait calib_wait; 484 struct iwl_notification_wait calib_wait;
485 static const u8 calib_complete[] = {
486 CALIBRATION_RES_NOTIFICATION,
487 CALIBRATION_COMPLETE_NOTIFICATION
488 };
525 int ret; 489 int ret;
526 490
527 lockdep_assert_held(&priv->mutex); 491 lockdep_assert_held(&priv->mutex);
@@ -534,8 +498,8 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
534 return 0; 498 return 0;
535 499
536 iwl_init_notification_wait(&priv->notif_wait, &calib_wait, 500 iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
537 CALIBRATION_COMPLETE_NOTIFICATION, 501 calib_complete, ARRAY_SIZE(calib_complete),
538 NULL, NULL); 502 iwlagn_wait_calib, priv);
539 503
540 /* Will also start the device */ 504 /* Will also start the device */
541 ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT); 505 ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
@@ -561,7 +525,7 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
561 iwl_remove_notification(&priv->notif_wait, &calib_wait); 525 iwl_remove_notification(&priv->notif_wait, &calib_wait);
562 out: 526 out:
563 /* Whatever happened, stop the device */ 527 /* Whatever happened, stop the device */
564 iwl_trans_stop_device(trans(priv)); 528 iwl_trans_stop_device(priv->trans);
565 priv->ucode_loaded = false; 529 priv->ucode_loaded = false;
566 530
567 return ret; 531 return ret;
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
index 03f998d098c5..7107ce53d4d4 100644
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ b/drivers/net/wireless/iwmc3200wifi/Kconfig
@@ -1,5 +1,5 @@
1config IWM 1config IWM
2 tristate "Intel Wireless Multicomm 3200 WiFi driver" 2 tristate "Intel Wireless Multicomm 3200 WiFi driver (EXPERIMENTAL)"
3 depends on MMC && EXPERIMENTAL 3 depends on MMC && EXPERIMENTAL
4 depends on CFG80211 4 depends on CFG80211
5 select FW_LOADER 5 select FW_LOADER
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index f7d01bfa2e4a..eac72f7bd341 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -6,6 +6,7 @@ libertas-y += ethtool.o
6libertas-y += main.o 6libertas-y += main.o
7libertas-y += rx.o 7libertas-y += rx.o
8libertas-y += tx.o 8libertas-y += tx.o
9libertas-y += firmware.o
9libertas-$(CONFIG_LIBERTAS_MESH) += mesh.o 10libertas-$(CONFIG_LIBERTAS_MESH) += mesh.o
10 11
11usb8xxx-objs += if_usb.o 12usb8xxx-objs += if_usb.o
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 3fa1ecebadfd..2fa879b015b6 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -103,7 +103,7 @@ static const u32 cipher_suites[] = {
103 * Convert NL80211's auth_type to the one from Libertas, see chapter 5.9.1 103 * Convert NL80211's auth_type to the one from Libertas, see chapter 5.9.1
104 * in the firmware spec 104 * in the firmware spec
105 */ 105 */
106static u8 lbs_auth_to_authtype(enum nl80211_auth_type auth_type) 106static int lbs_auth_to_authtype(enum nl80211_auth_type auth_type)
107{ 107{
108 int ret = -ENOTSUPP; 108 int ret = -ENOTSUPP;
109 109
@@ -1411,7 +1411,12 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev,
1411 goto done; 1411 goto done;
1412 } 1412 }
1413 1413
1414 lbs_set_authtype(priv, sme); 1414 ret = lbs_set_authtype(priv, sme);
1415 if (ret == -ENOTSUPP) {
1416 wiphy_err(wiphy, "unsupported authtype 0x%x\n", sme->auth_type);
1417 goto done;
1418 }
1419
1415 lbs_set_radio(priv, preamble, 1); 1420 lbs_set_radio(priv, preamble, 1);
1416 1421
1417 /* Do the actual association */ 1422 /* Do the actual association */
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index bc951ab4b681..84a3aa7ac570 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -19,6 +19,10 @@ struct lbs_fw_table {
19}; 19};
20 20
21struct lbs_private; 21struct lbs_private;
22typedef void (*lbs_fw_cb)(struct lbs_private *priv, int ret,
23 const struct firmware *helper, const struct firmware *mainfw);
24
25struct lbs_private;
22struct sk_buff; 26struct sk_buff;
23struct net_device; 27struct net_device;
24struct cmd_ds_command; 28struct cmd_ds_command;
@@ -66,10 +70,13 @@ int lbs_exit_auto_deep_sleep(struct lbs_private *priv);
66u32 lbs_fw_index_to_data_rate(u8 index); 70u32 lbs_fw_index_to_data_rate(u8 index);
67u8 lbs_data_rate_to_fw_index(u32 rate); 71u8 lbs_data_rate_to_fw_index(u32 rate);
68 72
69int lbs_get_firmware(struct device *dev, const char *user_helper, 73int lbs_get_firmware(struct device *dev, u32 card_model,
70 const char *user_mainfw, u32 card_model,
71 const struct lbs_fw_table *fw_table, 74 const struct lbs_fw_table *fw_table,
72 const struct firmware **helper, 75 const struct firmware **helper,
73 const struct firmware **mainfw); 76 const struct firmware **mainfw);
77int lbs_get_firmware_async(struct lbs_private *priv, struct device *device,
78 u32 card_model, const struct lbs_fw_table *fw_table,
79 lbs_fw_cb callback);
80void lbs_wait_for_firmware_load(struct lbs_private *priv);
74 81
75#endif 82#endif
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index f3fd447131c2..672005430aca 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -7,6 +7,7 @@
7#define _LBS_DEV_H_ 7#define _LBS_DEV_H_
8 8
9#include "defs.h" 9#include "defs.h"
10#include "decl.h"
10#include "host.h" 11#include "host.h"
11 12
12#include <linux/kfifo.h> 13#include <linux/kfifo.h>
@@ -180,6 +181,15 @@ struct lbs_private {
180 wait_queue_head_t scan_q; 181 wait_queue_head_t scan_q;
181 /* Whether the scan was initiated internally and not by cfg80211 */ 182 /* Whether the scan was initiated internally and not by cfg80211 */
182 bool internal_scan; 183 bool internal_scan;
184
185 /* Firmware load */
186 u32 fw_model;
187 wait_queue_head_t fw_waitq;
188 struct device *fw_device;
189 const struct firmware *helper_fw;
190 const struct lbs_fw_table *fw_table;
191 const struct lbs_fw_table *fw_iter;
192 lbs_fw_cb fw_callback;
183}; 193};
184 194
185extern struct cmd_confirm_sleep confirm_sleep; 195extern struct cmd_confirm_sleep confirm_sleep;
diff --git a/drivers/net/wireless/libertas/firmware.c b/drivers/net/wireless/libertas/firmware.c
new file mode 100644
index 000000000000..601f2075355e
--- /dev/null
+++ b/drivers/net/wireless/libertas/firmware.c
@@ -0,0 +1,224 @@
1/*
2 * Firmware loading and handling functions.
3 */
4
5#include <linux/sched.h>
6#include <linux/firmware.h>
7#include <linux/firmware.h>
8#include <linux/module.h>
9#include <linux/sched.h>
10
11#include "dev.h"
12#include "decl.h"
13
14static void load_next_firmware_from_table(struct lbs_private *private);
15
16static void lbs_fw_loaded(struct lbs_private *priv, int ret,
17 const struct firmware *helper, const struct firmware *mainfw)
18{
19 unsigned long flags;
20
21 lbs_deb_fw("firmware load complete, code %d\n", ret);
22
23 /* User must free helper/mainfw */
24 priv->fw_callback(priv, ret, helper, mainfw);
25
26 spin_lock_irqsave(&priv->driver_lock, flags);
27 priv->fw_callback = NULL;
28 wake_up(&priv->fw_waitq);
29 spin_unlock_irqrestore(&priv->driver_lock, flags);
30}
31
32static void do_load_firmware(struct lbs_private *priv, const char *name,
33 void (*cb)(const struct firmware *fw, void *context))
34{
35 int ret;
36
37 lbs_deb_fw("Requesting %s\n", name);
38 ret = request_firmware_nowait(THIS_MODULE, true, name,
39 priv->fw_device, GFP_KERNEL, priv, cb);
40 if (ret) {
41 lbs_deb_fw("request_firmware_nowait error %d\n", ret);
42 lbs_fw_loaded(priv, ret, NULL, NULL);
43 }
44}
45
46static void main_firmware_cb(const struct firmware *firmware, void *context)
47{
48 struct lbs_private *priv = context;
49
50 if (!firmware) {
51 /* Failed to find firmware: try next table entry */
52 load_next_firmware_from_table(priv);
53 return;
54 }
55
56 /* Firmware found! */
57 lbs_fw_loaded(priv, 0, priv->helper_fw, firmware);
58}
59
60static void helper_firmware_cb(const struct firmware *firmware, void *context)
61{
62 struct lbs_private *priv = context;
63
64 if (!firmware) {
65 /* Failed to find firmware: try next table entry */
66 load_next_firmware_from_table(priv);
67 return;
68 }
69
70 /* Firmware found! */
71 if (priv->fw_iter->fwname) {
72 priv->helper_fw = firmware;
73 do_load_firmware(priv, priv->fw_iter->fwname, main_firmware_cb);
74 } else {
75 /* No main firmware needed for this helper --> success! */
76 lbs_fw_loaded(priv, 0, firmware, NULL);
77 }
78}
79
80static void load_next_firmware_from_table(struct lbs_private *priv)
81{
82 const struct lbs_fw_table *iter;
83
84 if (!priv->fw_iter)
85 iter = priv->fw_table;
86 else
87 iter = ++priv->fw_iter;
88
89 if (priv->helper_fw) {
90 release_firmware(priv->helper_fw);
91 priv->helper_fw = NULL;
92 }
93
94next:
95 if (!iter->helper) {
96 /* End of table hit. */
97 lbs_fw_loaded(priv, -ENOENT, NULL, NULL);
98 return;
99 }
100
101 if (iter->model != priv->fw_model) {
102 iter++;
103 goto next;
104 }
105
106 priv->fw_iter = iter;
107 do_load_firmware(priv, iter->helper, helper_firmware_cb);
108}
109
110void lbs_wait_for_firmware_load(struct lbs_private *priv)
111{
112 wait_event(priv->fw_waitq, priv->fw_callback == NULL);
113}
114
115/**
116 * lbs_get_firmware_async - Retrieves firmware asynchronously. Can load
117 * either a helper firmware and a main firmware (2-stage), or just the helper.
118 *
119 * @priv: Pointer to lbs_private instance
120 * @dev: A pointer to &device structure
121 * @card_model: Bus-specific card model ID used to filter firmware table
122 * elements
123 * @fw_table: Table of firmware file names and device model numbers
124 * terminated by an entry with a NULL helper name
125 * @callback: User callback to invoke when firmware load succeeds or fails.
126 */
127int lbs_get_firmware_async(struct lbs_private *priv, struct device *device,
128 u32 card_model, const struct lbs_fw_table *fw_table,
129 lbs_fw_cb callback)
130{
131 unsigned long flags;
132
133 spin_lock_irqsave(&priv->driver_lock, flags);
134 if (priv->fw_callback) {
135 lbs_deb_fw("firmware load already in progress\n");
136 spin_unlock_irqrestore(&priv->driver_lock, flags);
137 return -EBUSY;
138 }
139
140 priv->fw_device = device;
141 priv->fw_callback = callback;
142 priv->fw_table = fw_table;
143 priv->fw_iter = NULL;
144 priv->fw_model = card_model;
145 spin_unlock_irqrestore(&priv->driver_lock, flags);
146
147 lbs_deb_fw("Starting async firmware load\n");
148 load_next_firmware_from_table(priv);
149 return 0;
150}
151EXPORT_SYMBOL_GPL(lbs_get_firmware_async);
152
153/**
154 * lbs_get_firmware - Retrieves two-stage firmware
155 *
156 * @dev: A pointer to &device structure
157 * @card_model: Bus-specific card model ID used to filter firmware table
158 * elements
159 * @fw_table: Table of firmware file names and device model numbers
160 * terminated by an entry with a NULL helper name
161 * @helper: On success, the helper firmware; caller must free
162 * @mainfw: On success, the main firmware; caller must free
163 *
164 * Deprecated: use lbs_get_firmware_async() instead.
165 *
166 * returns: 0 on success, non-zero on failure
167 */
168int lbs_get_firmware(struct device *dev, u32 card_model,
169 const struct lbs_fw_table *fw_table,
170 const struct firmware **helper,
171 const struct firmware **mainfw)
172{
173 const struct lbs_fw_table *iter;
174 int ret;
175
176 BUG_ON(helper == NULL);
177 BUG_ON(mainfw == NULL);
178
179 /* Search for firmware to use from the table. */
180 iter = fw_table;
181 while (iter && iter->helper) {
182 if (iter->model != card_model)
183 goto next;
184
185 if (*helper == NULL) {
186 ret = request_firmware(helper, iter->helper, dev);
187 if (ret)
188 goto next;
189
190 /* If the device has one-stage firmware (ie cf8305) and
191 * we've got it then we don't need to bother with the
192 * main firmware.
193 */
194 if (iter->fwname == NULL)
195 return 0;
196 }
197
198 if (*mainfw == NULL) {
199 ret = request_firmware(mainfw, iter->fwname, dev);
200 if (ret) {
201 /* Clear the helper to ensure we don't have
202 * mismatched firmware pairs.
203 */
204 release_firmware(*helper);
205 *helper = NULL;
206 }
207 }
208
209 if (*helper && *mainfw)
210 return 0;
211
212 next:
213 iter++;
214 }
215
216 /* Failed */
217 release_firmware(*helper);
218 *helper = NULL;
219 release_firmware(*mainfw);
220 *mainfw = NULL;
221
222 return -ENOENT;
223}
224EXPORT_SYMBOL_GPL(lbs_get_firmware);
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 234ee88dec95..16beaf39dc53 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -738,6 +738,50 @@ done:
738 return ret; 738 return ret;
739} 739}
740 740
741static void if_cs_prog_firmware(struct lbs_private *priv, int ret,
742 const struct firmware *helper,
743 const struct firmware *mainfw)
744{
745 struct if_cs_card *card = priv->card;
746
747 if (ret) {
748 pr_err("failed to find firmware (%d)\n", ret);
749 return;
750 }
751
752 /* Load the firmware */
753 ret = if_cs_prog_helper(card, helper);
754 if (ret == 0 && (card->model != MODEL_8305))
755 ret = if_cs_prog_real(card, mainfw);
756 if (ret)
757 goto out;
758
759 /* Now actually get the IRQ */
760 ret = request_irq(card->p_dev->irq, if_cs_interrupt,
761 IRQF_SHARED, DRV_NAME, card);
762 if (ret) {
763 pr_err("error in request_irq\n");
764 goto out;
765 }
766
767 /*
768 * Clear any interrupt cause that happened while sending
769 * firmware/initializing card
770 */
771 if_cs_write16(card, IF_CS_CARD_INT_CAUSE, IF_CS_BIT_MASK);
772 if_cs_enable_ints(card);
773
774 /* And finally bring the card up */
775 priv->fw_ready = 1;
776 if (lbs_start_card(priv) != 0) {
777 pr_err("could not activate card\n");
778 free_irq(card->p_dev->irq, card);
779 }
780
781out:
782 release_firmware(helper);
783 release_firmware(mainfw);
784}
741 785
742 786
743/********************************************************************/ 787/********************************************************************/
@@ -809,8 +853,6 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
809 unsigned int prod_id; 853 unsigned int prod_id;
810 struct lbs_private *priv; 854 struct lbs_private *priv;
811 struct if_cs_card *card; 855 struct if_cs_card *card;
812 const struct firmware *helper = NULL;
813 const struct firmware *mainfw = NULL;
814 856
815 lbs_deb_enter(LBS_DEB_CS); 857 lbs_deb_enter(LBS_DEB_CS);
816 858
@@ -890,20 +932,6 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
890 goto out2; 932 goto out2;
891 } 933 }
892 934
893 ret = lbs_get_firmware(&p_dev->dev, NULL, NULL, card->model,
894 &fw_table[0], &helper, &mainfw);
895 if (ret) {
896 pr_err("failed to find firmware (%d)\n", ret);
897 goto out2;
898 }
899
900 /* Load the firmware early, before calling into libertas.ko */
901 ret = if_cs_prog_helper(card, helper);
902 if (ret == 0 && (card->model != MODEL_8305))
903 ret = if_cs_prog_real(card, mainfw);
904 if (ret)
905 goto out2;
906
907 /* Make this card known to the libertas driver */ 935 /* Make this card known to the libertas driver */
908 priv = lbs_add_card(card, &p_dev->dev); 936 priv = lbs_add_card(card, &p_dev->dev);
909 if (!priv) { 937 if (!priv) {
@@ -911,37 +939,22 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
911 goto out2; 939 goto out2;
912 } 940 }
913 941
914 /* Finish setting up fields in lbs_private */ 942 /* Set up fields in lbs_private */
915 card->priv = priv; 943 card->priv = priv;
916 priv->card = card; 944 priv->card = card;
917 priv->hw_host_to_card = if_cs_host_to_card; 945 priv->hw_host_to_card = if_cs_host_to_card;
918 priv->enter_deep_sleep = NULL; 946 priv->enter_deep_sleep = NULL;
919 priv->exit_deep_sleep = NULL; 947 priv->exit_deep_sleep = NULL;
920 priv->reset_deep_sleep_wakeup = NULL; 948 priv->reset_deep_sleep_wakeup = NULL;
921 priv->fw_ready = 1;
922 949
923 /* Now actually get the IRQ */ 950 /* Get firmware */
924 ret = request_irq(p_dev->irq, if_cs_interrupt, 951 ret = lbs_get_firmware_async(priv, &p_dev->dev, card->model, fw_table,
925 IRQF_SHARED, DRV_NAME, card); 952 if_cs_prog_firmware);
926 if (ret) { 953 if (ret) {
927 pr_err("error in request_irq\n"); 954 pr_err("failed to find firmware (%d)\n", ret);
928 goto out3;
929 }
930
931 /*
932 * Clear any interrupt cause that happened while sending
933 * firmware/initializing card
934 */
935 if_cs_write16(card, IF_CS_CARD_INT_CAUSE, IF_CS_BIT_MASK);
936 if_cs_enable_ints(card);
937
938 /* And finally bring the card up */
939 if (lbs_start_card(priv) != 0) {
940 pr_err("could not activate card\n");
941 goto out3; 955 goto out3;
942 } 956 }
943 957
944 ret = 0;
945 goto out; 958 goto out;
946 959
947out3: 960out3:
@@ -951,11 +964,6 @@ out2:
951out1: 964out1:
952 pcmcia_disable_device(p_dev); 965 pcmcia_disable_device(p_dev);
953out: 966out:
954 if (helper)
955 release_firmware(helper);
956 if (mainfw)
957 release_firmware(mainfw);
958
959 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); 967 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
960 return ret; 968 return ret;
961} 969}
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 9804ebc892d4..76caebaa4397 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -65,12 +65,6 @@ static void if_sdio_interrupt(struct sdio_func *func);
65 */ 65 */
66static u8 user_rmmod; 66static u8 user_rmmod;
67 67
68static char *lbs_helper_name = NULL;
69module_param_named(helper_name, lbs_helper_name, charp, 0644);
70
71static char *lbs_fw_name = NULL;
72module_param_named(fw_name, lbs_fw_name, charp, 0644);
73
74static const struct sdio_device_id if_sdio_ids[] = { 68static const struct sdio_device_id if_sdio_ids[] = {
75 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 69 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL,
76 SDIO_DEVICE_ID_MARVELL_LIBERTAS) }, 70 SDIO_DEVICE_ID_MARVELL_LIBERTAS) },
@@ -123,11 +117,8 @@ struct if_sdio_card {
123 int model; 117 int model;
124 unsigned long ioport; 118 unsigned long ioport;
125 unsigned int scratch_reg; 119 unsigned int scratch_reg;
126 120 bool started;
127 const char *helper; 121 wait_queue_head_t pwron_waitq;
128 const char *firmware;
129 bool helper_allocated;
130 bool firmware_allocated;
131 122
132 u8 buffer[65536] __attribute__((aligned(4))); 123 u8 buffer[65536] __attribute__((aligned(4)));
133 124
@@ -140,6 +131,9 @@ struct if_sdio_card {
140 u8 rx_unit; 131 u8 rx_unit;
141}; 132};
142 133
134static void if_sdio_finish_power_on(struct if_sdio_card *card);
135static int if_sdio_power_off(struct if_sdio_card *card);
136
143/********************************************************************/ 137/********************************************************************/
144/* I/O */ 138/* I/O */
145/********************************************************************/ 139/********************************************************************/
@@ -680,12 +674,39 @@ out:
680 return ret; 674 return ret;
681} 675}
682 676
677static void if_sdio_do_prog_firmware(struct lbs_private *priv, int ret,
678 const struct firmware *helper,
679 const struct firmware *mainfw)
680{
681 struct if_sdio_card *card = priv->card;
682
683 if (ret) {
684 pr_err("failed to find firmware (%d)\n", ret);
685 return;
686 }
687
688 ret = if_sdio_prog_helper(card, helper);
689 if (ret)
690 goto out;
691
692 lbs_deb_sdio("Helper firmware loaded\n");
693
694 ret = if_sdio_prog_real(card, mainfw);
695 if (ret)
696 goto out;
697
698 lbs_deb_sdio("Firmware loaded\n");
699 if_sdio_finish_power_on(card);
700
701out:
702 release_firmware(helper);
703 release_firmware(mainfw);
704}
705
683static int if_sdio_prog_firmware(struct if_sdio_card *card) 706static int if_sdio_prog_firmware(struct if_sdio_card *card)
684{ 707{
685 int ret; 708 int ret;
686 u16 scratch; 709 u16 scratch;
687 const struct firmware *helper = NULL;
688 const struct firmware *mainfw = NULL;
689 710
690 lbs_deb_enter(LBS_DEB_SDIO); 711 lbs_deb_enter(LBS_DEB_SDIO);
691 712
@@ -719,43 +740,18 @@ static int if_sdio_prog_firmware(struct if_sdio_card *card)
719 */ 740 */
720 if (scratch == IF_SDIO_FIRMWARE_OK) { 741 if (scratch == IF_SDIO_FIRMWARE_OK) {
721 lbs_deb_sdio("firmware already loaded\n"); 742 lbs_deb_sdio("firmware already loaded\n");
722 goto success; 743 if_sdio_finish_power_on(card);
744 return 0;
723 } else if ((card->model == MODEL_8686) && (scratch & 0x7fff)) { 745 } else if ((card->model == MODEL_8686) && (scratch & 0x7fff)) {
724 lbs_deb_sdio("firmware may be running\n"); 746 lbs_deb_sdio("firmware may be running\n");
725 goto success; 747 if_sdio_finish_power_on(card);
726 } 748 return 0;
727
728 ret = lbs_get_firmware(&card->func->dev, lbs_helper_name, lbs_fw_name,
729 card->model, &fw_table[0], &helper, &mainfw);
730 if (ret) {
731 pr_err("failed to find firmware (%d)\n", ret);
732 goto out;
733 } 749 }
734 750
735 ret = if_sdio_prog_helper(card, helper); 751 ret = lbs_get_firmware_async(card->priv, &card->func->dev, card->model,
736 if (ret) 752 fw_table, if_sdio_do_prog_firmware);
737 goto out;
738
739 lbs_deb_sdio("Helper firmware loaded\n");
740
741 ret = if_sdio_prog_real(card, mainfw);
742 if (ret)
743 goto out;
744
745 lbs_deb_sdio("Firmware loaded\n");
746
747success:
748 sdio_claim_host(card->func);
749 sdio_set_block_size(card->func, IF_SDIO_BLOCK_SIZE);
750 sdio_release_host(card->func);
751 ret = 0;
752 753
753out: 754out:
754 if (helper)
755 release_firmware(helper);
756 if (mainfw)
757 release_firmware(mainfw);
758
759 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); 755 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
760 return ret; 756 return ret;
761} 757}
@@ -764,55 +760,15 @@ out:
764/* Power management */ 760/* Power management */
765/********************************************************************/ 761/********************************************************************/
766 762
767static int if_sdio_power_on(struct if_sdio_card *card) 763/* Finish power on sequence (after firmware is loaded) */
764static void if_sdio_finish_power_on(struct if_sdio_card *card)
768{ 765{
769 struct sdio_func *func = card->func; 766 struct sdio_func *func = card->func;
770 struct lbs_private *priv = card->priv; 767 struct lbs_private *priv = card->priv;
771 struct mmc_host *host = func->card->host;
772 int ret; 768 int ret;
773 769
774 sdio_claim_host(func); 770 sdio_claim_host(func);
775 771 sdio_set_block_size(card->func, IF_SDIO_BLOCK_SIZE);
776 ret = sdio_enable_func(func);
777 if (ret)
778 goto release;
779
780 /* For 1-bit transfers to the 8686 model, we need to enable the
781 * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
782 * bit to allow access to non-vendor registers. */
783 if ((card->model == MODEL_8686) &&
784 (host->caps & MMC_CAP_SDIO_IRQ) &&
785 (host->ios.bus_width == MMC_BUS_WIDTH_1)) {
786 u8 reg;
787
788 func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
789 reg = sdio_f0_readb(func, SDIO_CCCR_IF, &ret);
790 if (ret)
791 goto disable;
792
793 reg |= SDIO_BUS_ECSI;
794 sdio_f0_writeb(func, reg, SDIO_CCCR_IF, &ret);
795 if (ret)
796 goto disable;
797 }
798
799 card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret);
800 if (ret)
801 goto disable;
802
803 card->ioport |= sdio_readb(func, IF_SDIO_IOPORT + 1, &ret) << 8;
804 if (ret)
805 goto disable;
806
807 card->ioport |= sdio_readb(func, IF_SDIO_IOPORT + 2, &ret) << 16;
808 if (ret)
809 goto disable;
810
811 sdio_release_host(func);
812 ret = if_sdio_prog_firmware(card);
813 sdio_claim_host(func);
814 if (ret)
815 goto disable;
816 772
817 /* 773 /*
818 * Get rx_unit if the chip is SD8688 or newer. 774 * Get rx_unit if the chip is SD8688 or newer.
@@ -837,7 +793,7 @@ static int if_sdio_power_on(struct if_sdio_card *card)
837 */ 793 */
838 ret = sdio_claim_irq(func, if_sdio_interrupt); 794 ret = sdio_claim_irq(func, if_sdio_interrupt);
839 if (ret) 795 if (ret)
840 goto disable; 796 goto release;
841 797
842 /* 798 /*
843 * Enable interrupts now that everything is set up 799 * Enable interrupts now that everything is set up
@@ -863,11 +819,79 @@ static int if_sdio_power_on(struct if_sdio_card *card)
863 } 819 }
864 820
865 priv->fw_ready = 1; 821 priv->fw_ready = 1;
822 wake_up(&card->pwron_waitq);
866 823
867 return 0; 824 if (!card->started) {
825 ret = lbs_start_card(priv);
826 if_sdio_power_off(card);
827 if (ret == 0) {
828 card->started = true;
829 /* Tell PM core that we don't need the card to be
830 * powered now */
831 pm_runtime_put_noidle(&func->dev);
832 }
833 }
834
835 return;
868 836
869release_irq: 837release_irq:
870 sdio_release_irq(func); 838 sdio_release_irq(func);
839release:
840 sdio_release_host(func);
841}
842
843static int if_sdio_power_on(struct if_sdio_card *card)
844{
845 struct sdio_func *func = card->func;
846 struct mmc_host *host = func->card->host;
847 int ret;
848
849 sdio_claim_host(func);
850
851 ret = sdio_enable_func(func);
852 if (ret)
853 goto release;
854
855 /* For 1-bit transfers to the 8686 model, we need to enable the
856 * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
857 * bit to allow access to non-vendor registers. */
858 if ((card->model == MODEL_8686) &&
859 (host->caps & MMC_CAP_SDIO_IRQ) &&
860 (host->ios.bus_width == MMC_BUS_WIDTH_1)) {
861 u8 reg;
862
863 func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
864 reg = sdio_f0_readb(func, SDIO_CCCR_IF, &ret);
865 if (ret)
866 goto disable;
867
868 reg |= SDIO_BUS_ECSI;
869 sdio_f0_writeb(func, reg, SDIO_CCCR_IF, &ret);
870 if (ret)
871 goto disable;
872 }
873
874 card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret);
875 if (ret)
876 goto disable;
877
878 card->ioport |= sdio_readb(func, IF_SDIO_IOPORT + 1, &ret) << 8;
879 if (ret)
880 goto disable;
881
882 card->ioport |= sdio_readb(func, IF_SDIO_IOPORT + 2, &ret) << 16;
883 if (ret)
884 goto disable;
885
886 sdio_release_host(func);
887 ret = if_sdio_prog_firmware(card);
888 if (ret) {
889 sdio_disable_func(func);
890 return ret;
891 }
892
893 return 0;
894
871disable: 895disable:
872 sdio_disable_func(func); 896 sdio_disable_func(func);
873release: 897release:
@@ -1074,11 +1098,17 @@ static int if_sdio_power_save(struct lbs_private *priv)
1074static int if_sdio_power_restore(struct lbs_private *priv) 1098static int if_sdio_power_restore(struct lbs_private *priv)
1075{ 1099{
1076 struct if_sdio_card *card = priv->card; 1100 struct if_sdio_card *card = priv->card;
1101 int r;
1077 1102
1078 /* Make sure the card will not be powered off by runtime PM */ 1103 /* Make sure the card will not be powered off by runtime PM */
1079 pm_runtime_get_sync(&card->func->dev); 1104 pm_runtime_get_sync(&card->func->dev);
1080 1105
1081 return if_sdio_power_on(card); 1106 r = if_sdio_power_on(card);
1107 if (r)
1108 return r;
1109
1110 wait_event(card->pwron_waitq, priv->fw_ready);
1111 return 0;
1082} 1112}
1083 1113
1084 1114
@@ -1179,6 +1209,7 @@ static int if_sdio_probe(struct sdio_func *func,
1179 spin_lock_init(&card->lock); 1209 spin_lock_init(&card->lock);
1180 card->workqueue = create_workqueue("libertas_sdio"); 1210 card->workqueue = create_workqueue("libertas_sdio");
1181 INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker); 1211 INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker);
1212 init_waitqueue_head(&card->pwron_waitq);
1182 1213
1183 /* Check if we support this card */ 1214 /* Check if we support this card */
1184 for (i = 0; i < ARRAY_SIZE(fw_table); i++) { 1215 for (i = 0; i < ARRAY_SIZE(fw_table); i++) {
@@ -1220,14 +1251,6 @@ static int if_sdio_probe(struct sdio_func *func,
1220 if (ret) 1251 if (ret)
1221 goto err_activate_card; 1252 goto err_activate_card;
1222 1253
1223 ret = lbs_start_card(priv);
1224 if_sdio_power_off(card);
1225 if (ret)
1226 goto err_activate_card;
1227
1228 /* Tell PM core that we don't need the card to be powered now */
1229 pm_runtime_put_noidle(&func->dev);
1230
1231out: 1254out:
1232 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); 1255 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
1233 1256
@@ -1244,10 +1267,6 @@ free:
1244 kfree(packet); 1267 kfree(packet);
1245 } 1268 }
1246 1269
1247 if (card->helper_allocated)
1248 kfree(card->helper);
1249 if (card->firmware_allocated)
1250 kfree(card->firmware);
1251 kfree(card); 1270 kfree(card);
1252 1271
1253 goto out; 1272 goto out;
@@ -1295,12 +1314,6 @@ static void if_sdio_remove(struct sdio_func *func)
1295 kfree(packet); 1314 kfree(packet);
1296 } 1315 }
1297 1316
1298 if (card->helper_allocated)
1299 kfree(card->helper);
1300 if (card->firmware_allocated)
1301 kfree(card->firmware);
1302 kfree(card);
1303
1304 lbs_deb_leave(LBS_DEB_SDIO); 1317 lbs_deb_leave(LBS_DEB_SDIO);
1305} 1318}
1306 1319
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 50b1ee7721e9..9604a1c4a74d 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -1064,9 +1064,8 @@ static int if_spi_init_card(struct if_spi_card *card)
1064 goto out; 1064 goto out;
1065 } 1065 }
1066 1066
1067 err = lbs_get_firmware(&card->spi->dev, NULL, NULL, 1067 err = lbs_get_firmware(&card->spi->dev, card->card_id,
1068 card->card_id, &fw_table[0], &helper, 1068 &fw_table[0], &helper, &mainfw);
1069 &mainfw);
1070 if (err) { 1069 if (err) {
1071 netdev_err(priv->dev, "failed to find firmware (%d)\n", 1070 netdev_err(priv->dev, "failed to find firmware (%d)\n",
1072 err); 1071 err);
@@ -1095,10 +1094,8 @@ static int if_spi_init_card(struct if_spi_card *card)
1095 goto out; 1094 goto out;
1096 1095
1097out: 1096out:
1098 if (helper) 1097 release_firmware(helper);
1099 release_firmware(helper); 1098 release_firmware(mainfw);
1100 if (mainfw)
1101 release_firmware(mainfw);
1102 1099
1103 lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err); 1100 lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
1104 1101
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 74da5f1ea243..75403e6e3990 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -29,9 +29,6 @@
29 29
30#define MESSAGE_HEADER_LEN 4 30#define MESSAGE_HEADER_LEN 4
31 31
32static char *lbs_fw_name = NULL;
33module_param_named(fw_name, lbs_fw_name, charp, 0644);
34
35MODULE_FIRMWARE("libertas/usb8388_v9.bin"); 32MODULE_FIRMWARE("libertas/usb8388_v9.bin");
36MODULE_FIRMWARE("libertas/usb8388_v5.bin"); 33MODULE_FIRMWARE("libertas/usb8388_v5.bin");
37MODULE_FIRMWARE("libertas/usb8388.bin"); 34MODULE_FIRMWARE("libertas/usb8388.bin");
@@ -44,6 +41,16 @@ enum {
44 MODEL_8682 = 0x2 41 MODEL_8682 = 0x2
45}; 42};
46 43
44/* table of firmware file names */
45static const struct lbs_fw_table fw_table[] = {
46 { MODEL_8388, "libertas/usb8388_olpc.bin", NULL },
47 { MODEL_8388, "libertas/usb8388_v9.bin", NULL },
48 { MODEL_8388, "libertas/usb8388_v5.bin", NULL },
49 { MODEL_8388, "libertas/usb8388.bin", NULL },
50 { MODEL_8388, "usb8388.bin", NULL },
51 { MODEL_8682, "libertas/usb8682.bin", NULL }
52};
53
47static struct usb_device_id if_usb_table[] = { 54static struct usb_device_id if_usb_table[] = {
48 /* Enter the device signature inside */ 55 /* Enter the device signature inside */
49 { USB_DEVICE(0x1286, 0x2001), .driver_info = MODEL_8388 }, 56 { USB_DEVICE(0x1286, 0x2001), .driver_info = MODEL_8388 },
@@ -55,10 +62,9 @@ MODULE_DEVICE_TABLE(usb, if_usb_table);
55 62
56static void if_usb_receive(struct urb *urb); 63static void if_usb_receive(struct urb *urb);
57static void if_usb_receive_fwload(struct urb *urb); 64static void if_usb_receive_fwload(struct urb *urb);
58static int __if_usb_prog_firmware(struct if_usb_card *cardp, 65static void if_usb_prog_firmware(struct lbs_private *priv, int ret,
59 const char *fwname, int cmd); 66 const struct firmware *fw,
60static int if_usb_prog_firmware(struct if_usb_card *cardp, 67 const struct firmware *unused);
61 const char *fwname, int cmd);
62static int if_usb_host_to_card(struct lbs_private *priv, uint8_t type, 68static int if_usb_host_to_card(struct lbs_private *priv, uint8_t type,
63 uint8_t *payload, uint16_t nb); 69 uint8_t *payload, uint16_t nb);
64static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, 70static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
@@ -67,69 +73,6 @@ static void if_usb_free(struct if_usb_card *cardp);
67static int if_usb_submit_rx_urb(struct if_usb_card *cardp); 73static int if_usb_submit_rx_urb(struct if_usb_card *cardp);
68static int if_usb_reset_device(struct if_usb_card *cardp); 74static int if_usb_reset_device(struct if_usb_card *cardp);
69 75
70/* sysfs hooks */
71
72/*
73 * Set function to write firmware to device's persistent memory
74 */
75static ssize_t if_usb_firmware_set(struct device *dev,
76 struct device_attribute *attr, const char *buf, size_t count)
77{
78 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
79 struct if_usb_card *cardp = priv->card;
80 int ret;
81
82 BUG_ON(buf == NULL);
83
84 ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_FW);
85 if (ret == 0)
86 return count;
87
88 return ret;
89}
90
91/*
92 * lbs_flash_fw attribute to be exported per ethX interface through sysfs
93 * (/sys/class/net/ethX/lbs_flash_fw). Use this like so to write firmware to
94 * the device's persistent memory:
95 * echo usb8388-5.126.0.p5.bin > /sys/class/net/ethX/lbs_flash_fw
96 */
97static DEVICE_ATTR(lbs_flash_fw, 0200, NULL, if_usb_firmware_set);
98
99/**
100 * if_usb_boot2_set - write firmware to device's persistent memory
101 *
102 * @dev: target device
103 * @attr: device attributes
104 * @buf: firmware buffer to write
105 * @count: number of bytes to write
106 *
107 * returns: number of bytes written or negative error code
108 */
109static ssize_t if_usb_boot2_set(struct device *dev,
110 struct device_attribute *attr, const char *buf, size_t count)
111{
112 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
113 struct if_usb_card *cardp = priv->card;
114 int ret;
115
116 BUG_ON(buf == NULL);
117
118 ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_BOOT2);
119 if (ret == 0)
120 return count;
121
122 return ret;
123}
124
125/*
126 * lbs_flash_boot2 attribute to be exported per ethX interface through sysfs
127 * (/sys/class/net/ethX/lbs_flash_boot2). Use this like so to write firmware
128 * to the device's persistent memory:
129 * echo usb8388-5.126.0.p5.bin > /sys/class/net/ethX/lbs_flash_boot2
130 */
131static DEVICE_ATTR(lbs_flash_boot2, 0200, NULL, if_usb_boot2_set);
132
133/** 76/**
134 * if_usb_write_bulk_callback - callback function to handle the status 77 * if_usb_write_bulk_callback - callback function to handle the status
135 * of the URB 78 * of the URB
@@ -256,6 +199,7 @@ static int if_usb_probe(struct usb_interface *intf,
256 struct usb_endpoint_descriptor *endpoint; 199 struct usb_endpoint_descriptor *endpoint;
257 struct lbs_private *priv; 200 struct lbs_private *priv;
258 struct if_usb_card *cardp; 201 struct if_usb_card *cardp;
202 int r = -ENOMEM;
259 int i; 203 int i;
260 204
261 udev = interface_to_usbdev(intf); 205 udev = interface_to_usbdev(intf);
@@ -313,20 +257,10 @@ static int if_usb_probe(struct usb_interface *intf,
313 goto dealloc; 257 goto dealloc;
314 } 258 }
315 259
316 /* Upload firmware */
317 kparam_block_sysfs_write(fw_name);
318 if (__if_usb_prog_firmware(cardp, lbs_fw_name, BOOT_CMD_FW_BY_USB)) {
319 kparam_unblock_sysfs_write(fw_name);
320 lbs_deb_usbd(&udev->dev, "FW upload failed\n");
321 goto err_prog_firmware;
322 }
323 kparam_unblock_sysfs_write(fw_name);
324
325 if (!(priv = lbs_add_card(cardp, &intf->dev))) 260 if (!(priv = lbs_add_card(cardp, &intf->dev)))
326 goto err_prog_firmware; 261 goto err_add_card;
327 262
328 cardp->priv = priv; 263 cardp->priv = priv;
329 cardp->priv->fw_ready = 1;
330 264
331 priv->hw_host_to_card = if_usb_host_to_card; 265 priv->hw_host_to_card = if_usb_host_to_card;
332 priv->enter_deep_sleep = NULL; 266 priv->enter_deep_sleep = NULL;
@@ -339,42 +273,25 @@ static int if_usb_probe(struct usb_interface *intf,
339 273
340 cardp->boot2_version = udev->descriptor.bcdDevice; 274 cardp->boot2_version = udev->descriptor.bcdDevice;
341 275
342 if_usb_submit_rx_urb(cardp);
343
344 if (lbs_start_card(priv))
345 goto err_start_card;
346
347 if_usb_setup_firmware(priv);
348
349 usb_get_dev(udev); 276 usb_get_dev(udev);
350 usb_set_intfdata(intf, cardp); 277 usb_set_intfdata(intf, cardp);
351 278
352 if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_fw)) 279 r = lbs_get_firmware_async(priv, &udev->dev, cardp->model,
353 netdev_err(priv->dev, 280 fw_table, if_usb_prog_firmware);
354 "cannot register lbs_flash_fw attribute\n"); 281 if (r)
355 282 goto err_get_fw;
356 if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2))
357 netdev_err(priv->dev,
358 "cannot register lbs_flash_boot2 attribute\n");
359
360 /*
361 * EHS_REMOVE_WAKEUP is not supported on all versions of the firmware.
362 */
363 priv->wol_criteria = EHS_REMOVE_WAKEUP;
364 if (lbs_host_sleep_cfg(priv, priv->wol_criteria, NULL))
365 priv->ehs_remove_supported = false;
366 283
367 return 0; 284 return 0;
368 285
369err_start_card: 286err_get_fw:
370 lbs_remove_card(priv); 287 lbs_remove_card(priv);
371err_prog_firmware: 288err_add_card:
372 if_usb_reset_device(cardp); 289 if_usb_reset_device(cardp);
373dealloc: 290dealloc:
374 if_usb_free(cardp); 291 if_usb_free(cardp);
375 292
376error: 293error:
377 return -ENOMEM; 294 return r;
378} 295}
379 296
380/** 297/**
@@ -389,9 +306,6 @@ static void if_usb_disconnect(struct usb_interface *intf)
389 306
390 lbs_deb_enter(LBS_DEB_MAIN); 307 lbs_deb_enter(LBS_DEB_MAIN);
391 308
392 device_remove_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2);
393 device_remove_file(&priv->dev->dev, &dev_attr_lbs_flash_fw);
394
395 cardp->surprise_removed = 1; 309 cardp->surprise_removed = 1;
396 310
397 if (priv) { 311 if (priv) {
@@ -912,121 +826,22 @@ static int check_fwfile_format(const uint8_t *data, uint32_t totlen)
912 return ret; 826 return ret;
913} 827}
914 828
915 829static void if_usb_prog_firmware(struct lbs_private *priv, int ret,
916/** 830 const struct firmware *fw,
917* if_usb_prog_firmware - programs the firmware subject to cmd 831 const struct firmware *unused)
918*
919* @cardp: the if_usb_card descriptor
920* @fwname: firmware or boot2 image file name
921* @cmd: either BOOT_CMD_FW_BY_USB, BOOT_CMD_UPDATE_FW,
922* or BOOT_CMD_UPDATE_BOOT2.
923* returns: 0 or error code
924*/
925static int if_usb_prog_firmware(struct if_usb_card *cardp,
926 const char *fwname, int cmd)
927{
928 struct lbs_private *priv = cardp->priv;
929 unsigned long flags, caps;
930 int ret;
931
932 caps = priv->fwcapinfo;
933 if (((cmd == BOOT_CMD_UPDATE_FW) && !(caps & FW_CAPINFO_FIRMWARE_UPGRADE)) ||
934 ((cmd == BOOT_CMD_UPDATE_BOOT2) && !(caps & FW_CAPINFO_BOOT2_UPGRADE)))
935 return -EOPNOTSUPP;
936
937 /* Ensure main thread is idle. */
938 spin_lock_irqsave(&priv->driver_lock, flags);
939 while (priv->cur_cmd != NULL || priv->dnld_sent != DNLD_RES_RECEIVED) {
940 spin_unlock_irqrestore(&priv->driver_lock, flags);
941 if (wait_event_interruptible(priv->waitq,
942 (priv->cur_cmd == NULL &&
943 priv->dnld_sent == DNLD_RES_RECEIVED))) {
944 return -ERESTARTSYS;
945 }
946 spin_lock_irqsave(&priv->driver_lock, flags);
947 }
948 priv->dnld_sent = DNLD_BOOTCMD_SENT;
949 spin_unlock_irqrestore(&priv->driver_lock, flags);
950
951 ret = __if_usb_prog_firmware(cardp, fwname, cmd);
952
953 spin_lock_irqsave(&priv->driver_lock, flags);
954 priv->dnld_sent = DNLD_RES_RECEIVED;
955 spin_unlock_irqrestore(&priv->driver_lock, flags);
956
957 wake_up(&priv->waitq);
958
959 return ret;
960}
961
962/* table of firmware file names */
963static const struct {
964 u32 model;
965 const char *fwname;
966} fw_table[] = {
967 { MODEL_8388, "libertas/usb8388_v9.bin" },
968 { MODEL_8388, "libertas/usb8388_v5.bin" },
969 { MODEL_8388, "libertas/usb8388.bin" },
970 { MODEL_8388, "usb8388.bin" },
971 { MODEL_8682, "libertas/usb8682.bin" }
972};
973
974#ifdef CONFIG_OLPC
975
976static int try_olpc_fw(struct if_usb_card *cardp)
977{
978 int retval = -ENOENT;
979
980 /* try the OLPC firmware first; fall back to fw_table list */
981 if (machine_is_olpc() && cardp->model == MODEL_8388)
982 retval = request_firmware(&cardp->fw,
983 "libertas/usb8388_olpc.bin", &cardp->udev->dev);
984 return retval;
985}
986
987#else
988static int try_olpc_fw(struct if_usb_card *cardp) { return -ENOENT; }
989#endif /* !CONFIG_OLPC */
990
991static int get_fw(struct if_usb_card *cardp, const char *fwname)
992{
993 int i;
994
995 /* Try user-specified firmware first */
996 if (fwname)
997 return request_firmware(&cardp->fw, fwname, &cardp->udev->dev);
998
999 /* Handle OLPC firmware */
1000 if (try_olpc_fw(cardp) == 0)
1001 return 0;
1002
1003 /* Otherwise search for firmware to use */
1004 for (i = 0; i < ARRAY_SIZE(fw_table); i++) {
1005 if (fw_table[i].model != cardp->model)
1006 continue;
1007 if (request_firmware(&cardp->fw, fw_table[i].fwname,
1008 &cardp->udev->dev) == 0)
1009 return 0;
1010 }
1011
1012 return -ENOENT;
1013}
1014
1015static int __if_usb_prog_firmware(struct if_usb_card *cardp,
1016 const char *fwname, int cmd)
1017{ 832{
833 struct if_usb_card *cardp = priv->card;
1018 int i = 0; 834 int i = 0;
1019 static int reset_count = 10; 835 static int reset_count = 10;
1020 int ret = 0;
1021 836
1022 lbs_deb_enter(LBS_DEB_USB); 837 lbs_deb_enter(LBS_DEB_USB);
1023 838
1024 ret = get_fw(cardp, fwname);
1025 if (ret) { 839 if (ret) {
1026 pr_err("failed to find firmware (%d)\n", ret); 840 pr_err("failed to find firmware (%d)\n", ret);
1027 goto done; 841 goto done;
1028 } 842 }
1029 843
844 cardp->fw = fw;
1030 if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) { 845 if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) {
1031 ret = -EINVAL; 846 ret = -EINVAL;
1032 goto release_fw; 847 goto release_fw;
@@ -1053,7 +868,7 @@ restart:
1053 do { 868 do {
1054 int j = 0; 869 int j = 0;
1055 i++; 870 i++;
1056 if_usb_issue_boot_command(cardp, cmd); 871 if_usb_issue_boot_command(cardp, BOOT_CMD_FW_BY_USB);
1057 /* wait for command response */ 872 /* wait for command response */
1058 do { 873 do {
1059 j++; 874 j++;
@@ -1109,13 +924,27 @@ restart:
1109 goto release_fw; 924 goto release_fw;
1110 } 925 }
1111 926
927 cardp->priv->fw_ready = 1;
928 if_usb_submit_rx_urb(cardp);
929
930 if (lbs_start_card(priv))
931 goto release_fw;
932
933 if_usb_setup_firmware(priv);
934
935 /*
936 * EHS_REMOVE_WAKEUP is not supported on all versions of the firmware.
937 */
938 priv->wol_criteria = EHS_REMOVE_WAKEUP;
939 if (lbs_host_sleep_cfg(priv, priv->wol_criteria, NULL))
940 priv->ehs_remove_supported = false;
941
1112 release_fw: 942 release_fw:
1113 release_firmware(cardp->fw); 943 release_firmware(cardp->fw);
1114 cardp->fw = NULL; 944 cardp->fw = NULL;
1115 945
1116 done: 946 done:
1117 lbs_deb_leave_args(LBS_DEB_USB, "ret %d", ret); 947 lbs_deb_leave(LBS_DEB_USB);
1118 return ret;
1119} 948}
1120 949
1121 950
@@ -1128,8 +957,10 @@ static int if_usb_suspend(struct usb_interface *intf, pm_message_t message)
1128 957
1129 lbs_deb_enter(LBS_DEB_USB); 958 lbs_deb_enter(LBS_DEB_USB);
1130 959
1131 if (priv->psstate != PS_STATE_FULL_POWER) 960 if (priv->psstate != PS_STATE_FULL_POWER) {
1132 return -1; 961 ret = -1;
962 goto out;
963 }
1133 964
1134#ifdef CONFIG_OLPC 965#ifdef CONFIG_OLPC
1135 if (machine_is_olpc()) { 966 if (machine_is_olpc()) {
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 957681dede17..e96ee0aa8439 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -878,6 +878,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
878 priv->is_host_sleep_configured = 0; 878 priv->is_host_sleep_configured = 0;
879 priv->is_host_sleep_activated = 0; 879 priv->is_host_sleep_activated = 0;
880 init_waitqueue_head(&priv->host_sleep_q); 880 init_waitqueue_head(&priv->host_sleep_q);
881 init_waitqueue_head(&priv->fw_waitq);
881 mutex_init(&priv->lock); 882 mutex_init(&priv->lock);
882 883
883 setup_timer(&priv->command_timer, lbs_cmd_timeout_handler, 884 setup_timer(&priv->command_timer, lbs_cmd_timeout_handler,
@@ -1033,7 +1034,11 @@ void lbs_remove_card(struct lbs_private *priv)
1033 lbs_deb_enter(LBS_DEB_MAIN); 1034 lbs_deb_enter(LBS_DEB_MAIN);
1034 1035
1035 lbs_remove_mesh(priv); 1036 lbs_remove_mesh(priv);
1036 lbs_scan_deinit(priv); 1037
1038 if (priv->wiphy_registered)
1039 lbs_scan_deinit(priv);
1040
1041 lbs_wait_for_firmware_load(priv);
1037 1042
1038 /* worker thread destruction blocks on the in-flight command which 1043 /* worker thread destruction blocks on the in-flight command which
1039 * should have been cleared already in lbs_stop_card(). 1044 * should have been cleared already in lbs_stop_card().
@@ -1128,6 +1133,11 @@ void lbs_stop_card(struct lbs_private *priv)
1128 goto out; 1133 goto out;
1129 dev = priv->dev; 1134 dev = priv->dev;
1130 1135
1136 /* If the netdev isn't registered, it means that lbs_start_card() was
1137 * never called so we have nothing to do here. */
1138 if (dev->reg_state != NETREG_REGISTERED)
1139 goto out;
1140
1131 netif_stop_queue(dev); 1141 netif_stop_queue(dev);
1132 netif_carrier_off(dev); 1142 netif_carrier_off(dev);
1133 1143
@@ -1177,111 +1187,6 @@ void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx)
1177} 1187}
1178EXPORT_SYMBOL_GPL(lbs_notify_command_response); 1188EXPORT_SYMBOL_GPL(lbs_notify_command_response);
1179 1189
1180/**
1181 * lbs_get_firmware - Retrieves two-stage firmware
1182 *
1183 * @dev: A pointer to &device structure
1184 * @user_helper: User-defined helper firmware file
1185 * @user_mainfw: User-defined main firmware file
1186 * @card_model: Bus-specific card model ID used to filter firmware table
1187 * elements
1188 * @fw_table: Table of firmware file names and device model numbers
1189 * terminated by an entry with a NULL helper name
1190 * @helper: On success, the helper firmware; caller must free
1191 * @mainfw: On success, the main firmware; caller must free
1192 *
1193 * returns: 0 on success, non-zero on failure
1194 */
1195int lbs_get_firmware(struct device *dev, const char *user_helper,
1196 const char *user_mainfw, u32 card_model,
1197 const struct lbs_fw_table *fw_table,
1198 const struct firmware **helper,
1199 const struct firmware **mainfw)
1200{
1201 const struct lbs_fw_table *iter;
1202 int ret;
1203
1204 BUG_ON(helper == NULL);
1205 BUG_ON(mainfw == NULL);
1206
1207 /* Try user-specified firmware first */
1208 if (user_helper) {
1209 ret = request_firmware(helper, user_helper, dev);
1210 if (ret) {
1211 dev_err(dev, "couldn't find helper firmware %s\n",
1212 user_helper);
1213 goto fail;
1214 }
1215 }
1216 if (user_mainfw) {
1217 ret = request_firmware(mainfw, user_mainfw, dev);
1218 if (ret) {
1219 dev_err(dev, "couldn't find main firmware %s\n",
1220 user_mainfw);
1221 goto fail;
1222 }
1223 }
1224
1225 if (*helper && *mainfw)
1226 return 0;
1227
1228 /* Otherwise search for firmware to use. If neither the helper or
1229 * the main firmware were specified by the user, then we need to
1230 * make sure that found helper & main are from the same entry in
1231 * fw_table.
1232 */
1233 iter = fw_table;
1234 while (iter && iter->helper) {
1235 if (iter->model != card_model)
1236 goto next;
1237
1238 if (*helper == NULL) {
1239 ret = request_firmware(helper, iter->helper, dev);
1240 if (ret)
1241 goto next;
1242
1243 /* If the device has one-stage firmware (ie cf8305) and
1244 * we've got it then we don't need to bother with the
1245 * main firmware.
1246 */
1247 if (iter->fwname == NULL)
1248 return 0;
1249 }
1250
1251 if (*mainfw == NULL) {
1252 ret = request_firmware(mainfw, iter->fwname, dev);
1253 if (ret && !user_helper) {
1254 /* Clear the helper if it wasn't user-specified
1255 * and the main firmware load failed, to ensure
1256 * we don't have mismatched firmware pairs.
1257 */
1258 release_firmware(*helper);
1259 *helper = NULL;
1260 }
1261 }
1262
1263 if (*helper && *mainfw)
1264 return 0;
1265
1266 next:
1267 iter++;
1268 }
1269
1270 fail:
1271 /* Failed */
1272 if (*helper) {
1273 release_firmware(*helper);
1274 *helper = NULL;
1275 }
1276 if (*mainfw) {
1277 release_firmware(*mainfw);
1278 *mainfw = NULL;
1279 }
1280
1281 return -ENOENT;
1282}
1283EXPORT_SYMBOL_GPL(lbs_get_firmware);
1284
1285static int __init lbs_init_module(void) 1190static int __init lbs_init_module(void)
1286{ 1191{
1287 lbs_deb_enter(LBS_DEB_MAIN); 1192 lbs_deb_enter(LBS_DEB_MAIN);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index b7ce6a6e355f..03c0c6b1372c 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -582,11 +582,13 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
582 goto nla_put_failure; 582 goto nla_put_failure;
583 } 583 }
584 584
585 NLA_PUT(skb, HWSIM_ATTR_ADDR_TRANSMITTER, 585 if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
586 sizeof(struct mac_address), data->addresses[1].addr); 586 sizeof(struct mac_address), data->addresses[1].addr))
587 goto nla_put_failure;
587 588
588 /* We get the skb->data */ 589 /* We get the skb->data */
589 NLA_PUT(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data); 590 if (nla_put(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data))
591 goto nla_put_failure;
590 592
591 /* We get the flags for this transmission, and we translate them to 593 /* We get the flags for this transmission, and we translate them to
592 wmediumd flags */ 594 wmediumd flags */
@@ -597,7 +599,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
597 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 599 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
598 hwsim_flags |= HWSIM_TX_CTL_NO_ACK; 600 hwsim_flags |= HWSIM_TX_CTL_NO_ACK;
599 601
600 NLA_PUT_U32(skb, HWSIM_ATTR_FLAGS, hwsim_flags); 602 if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags))
603 goto nla_put_failure;
601 604
602 /* We get the tx control (rate and retries) info*/ 605 /* We get the tx control (rate and retries) info*/
603 606
@@ -606,12 +609,14 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
606 tx_attempts[i].count = info->status.rates[i].count; 609 tx_attempts[i].count = info->status.rates[i].count;
607 } 610 }
608 611
609 NLA_PUT(skb, HWSIM_ATTR_TX_INFO, 612 if (nla_put(skb, HWSIM_ATTR_TX_INFO,
610 sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES, 613 sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES,
611 tx_attempts); 614 tx_attempts))
615 goto nla_put_failure;
612 616
613 /* We create a cookie to identify this skb */ 617 /* We create a cookie to identify this skb */
614 NLA_PUT_U64(skb, HWSIM_ATTR_COOKIE, (unsigned long) my_skb); 618 if (nla_put_u64(skb, HWSIM_ATTR_COOKIE, (unsigned long) my_skb))
619 goto nla_put_failure;
615 620
616 genlmsg_end(skb, msg_head); 621 genlmsg_end(skb, msg_head);
617 genlmsg_unicast(&init_net, skb, dst_pid); 622 genlmsg_unicast(&init_net, skb, dst_pid);
@@ -632,6 +637,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
632 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 637 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
633 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 638 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
634 struct ieee80211_rx_status rx_status; 639 struct ieee80211_rx_status rx_status;
640 struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info);
635 641
636 if (data->idle) { 642 if (data->idle) {
637 wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n"); 643 wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
@@ -666,6 +672,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
666 spin_lock(&hwsim_radio_lock); 672 spin_lock(&hwsim_radio_lock);
667 list_for_each_entry(data2, &hwsim_radios, list) { 673 list_for_each_entry(data2, &hwsim_radios, list) {
668 struct sk_buff *nskb; 674 struct sk_buff *nskb;
675 struct ieee80211_mgmt *mgmt;
669 676
670 if (data == data2) 677 if (data == data2)
671 continue; 678 continue;
@@ -683,8 +690,18 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
683 690
684 if (mac80211_hwsim_addr_match(data2, hdr->addr1)) 691 if (mac80211_hwsim_addr_match(data2, hdr->addr1))
685 ack = true; 692 ack = true;
693
694 /* set bcn timestamp relative to receiver mactime */
686 rx_status.mactime = 695 rx_status.mactime =
687 le64_to_cpu(__mac80211_hwsim_get_tsf(data2)); 696 le64_to_cpu(__mac80211_hwsim_get_tsf(data2));
697 mgmt = (struct ieee80211_mgmt *) nskb->data;
698 if (ieee80211_is_beacon(mgmt->frame_control) ||
699 ieee80211_is_probe_resp(mgmt->frame_control))
700 mgmt->u.beacon.timestamp = cpu_to_le64(
701 rx_status.mactime +
702 (data->tsf_offset - data2->tsf_offset) +
703 24 * 8 * 10 / txrate->bitrate);
704
688 memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status)); 705 memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
689 ieee80211_rx_irqsafe(data2->hw, nskb); 706 ieee80211_rx_irqsafe(data2->hw, nskb);
690 } 707 }
@@ -698,12 +715,6 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
698 bool ack; 715 bool ack;
699 struct ieee80211_tx_info *txi; 716 struct ieee80211_tx_info *txi;
700 u32 _pid; 717 u32 _pid;
701 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) skb->data;
702 struct mac80211_hwsim_data *data = hw->priv;
703
704 if (ieee80211_is_beacon(mgmt->frame_control) ||
705 ieee80211_is_probe_resp(mgmt->frame_control))
706 mgmt->u.beacon.timestamp = __mac80211_hwsim_get_tsf(data);
707 718
708 mac80211_hwsim_monitor_rx(hw, skb); 719 mac80211_hwsim_monitor_rx(hw, skb);
709 720
@@ -800,11 +811,9 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
800 struct ieee80211_vif *vif) 811 struct ieee80211_vif *vif)
801{ 812{
802 struct ieee80211_hw *hw = arg; 813 struct ieee80211_hw *hw = arg;
803 struct mac80211_hwsim_data *data = hw->priv;
804 struct sk_buff *skb; 814 struct sk_buff *skb;
805 struct ieee80211_tx_info *info; 815 struct ieee80211_tx_info *info;
806 u32 _pid; 816 u32 _pid;
807 struct ieee80211_mgmt *mgmt;
808 817
809 hwsim_check_magic(vif); 818 hwsim_check_magic(vif);
810 819
@@ -818,9 +827,6 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
818 return; 827 return;
819 info = IEEE80211_SKB_CB(skb); 828 info = IEEE80211_SKB_CB(skb);
820 829
821 mgmt = (struct ieee80211_mgmt *) skb->data;
822 mgmt->u.beacon.timestamp = __mac80211_hwsim_get_tsf(data);
823
824 mac80211_hwsim_monitor_rx(hw, skb); 830 mac80211_hwsim_monitor_rx(hw, skb);
825 831
826 /* wmediumd mode check */ 832 /* wmediumd mode check */
@@ -1108,7 +1114,8 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
1108 nla_total_size(sizeof(u32))); 1114 nla_total_size(sizeof(u32)));
1109 if (!skb) 1115 if (!skb)
1110 return -ENOMEM; 1116 return -ENOMEM;
1111 NLA_PUT_U32(skb, HWSIM_TM_ATTR_PS, hwsim->ps); 1117 if (nla_put_u32(skb, HWSIM_TM_ATTR_PS, hwsim->ps))
1118 goto nla_put_failure;
1112 return cfg80211_testmode_reply(skb); 1119 return cfg80211_testmode_reply(skb);
1113 default: 1120 default:
1114 return -EOPNOTSUPP; 1121 return -EOPNOTSUPP;
@@ -1444,7 +1451,7 @@ DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_group,
1444 hwsim_fops_group_read, hwsim_fops_group_write, 1451 hwsim_fops_group_read, hwsim_fops_group_write,
1445 "%llx\n"); 1452 "%llx\n");
1446 1453
1447struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr( 1454static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(
1448 struct mac_address *addr) 1455 struct mac_address *addr)
1449{ 1456{
1450 struct mac80211_hwsim_data *data; 1457 struct mac80211_hwsim_data *data;
@@ -1789,9 +1796,11 @@ static int __init init_mac80211_hwsim(void)
1789 IEEE80211_HW_SIGNAL_DBM | 1796 IEEE80211_HW_SIGNAL_DBM |
1790 IEEE80211_HW_SUPPORTS_STATIC_SMPS | 1797 IEEE80211_HW_SUPPORTS_STATIC_SMPS |
1791 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | 1798 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
1792 IEEE80211_HW_AMPDU_AGGREGATION; 1799 IEEE80211_HW_AMPDU_AGGREGATION |
1800 IEEE80211_HW_WANT_MONITOR_VIF;
1793 1801
1794 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 1802 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
1803 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
1795 1804
1796 /* ask mac80211 to reserve space for magic */ 1805 /* ask mac80211 to reserve space for magic */
1797 hw->vif_data_size = sizeof(struct hwsim_vif_priv); 1806 hw->vif_data_size = sizeof(struct hwsim_vif_priv);
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index a5e182b5e944..fe8ebfebcc0e 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -350,25 +350,26 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
350 ret_len += sizeof(struct mwifiex_ie_types_htcap); 350 ret_len += sizeof(struct mwifiex_ie_types_htcap);
351 } 351 }
352 352
353 if (bss_desc->bcn_ht_info) { 353 if (bss_desc->bcn_ht_oper) {
354 if (priv->bss_mode == NL80211_IFTYPE_ADHOC) { 354 if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
355 ht_info = (struct mwifiex_ie_types_htinfo *) *buffer; 355 ht_info = (struct mwifiex_ie_types_htinfo *) *buffer;
356 memset(ht_info, 0, 356 memset(ht_info, 0,
357 sizeof(struct mwifiex_ie_types_htinfo)); 357 sizeof(struct mwifiex_ie_types_htinfo));
358 ht_info->header.type = 358 ht_info->header.type =
359 cpu_to_le16(WLAN_EID_HT_INFORMATION); 359 cpu_to_le16(WLAN_EID_HT_OPERATION);
360 ht_info->header.len = 360 ht_info->header.len =
361 cpu_to_le16(sizeof(struct ieee80211_ht_info)); 361 cpu_to_le16(
362 sizeof(struct ieee80211_ht_operation));
362 363
363 memcpy((u8 *) ht_info + 364 memcpy((u8 *) ht_info +
364 sizeof(struct mwifiex_ie_types_header), 365 sizeof(struct mwifiex_ie_types_header),
365 (u8 *) bss_desc->bcn_ht_info + 366 (u8 *) bss_desc->bcn_ht_oper +
366 sizeof(struct ieee_types_header), 367 sizeof(struct ieee_types_header),
367 le16_to_cpu(ht_info->header.len)); 368 le16_to_cpu(ht_info->header.len));
368 369
369 if (!(sband->ht_cap.cap & 370 if (!(sband->ht_cap.cap &
370 IEEE80211_HT_CAP_SUP_WIDTH_20_40)) 371 IEEE80211_HT_CAP_SUP_WIDTH_20_40))
371 ht_info->ht_info.ht_param &= 372 ht_info->ht_oper.ht_param &=
372 ~(IEEE80211_HT_PARAM_CHAN_WIDTH_ANY | 373 ~(IEEE80211_HT_PARAM_CHAN_WIDTH_ANY |
373 IEEE80211_HT_PARAM_CHA_SEC_OFFSET); 374 IEEE80211_HT_PARAM_CHA_SEC_OFFSET);
374 375
@@ -385,16 +386,16 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
385 sizeof(struct mwifiex_ie_types_chan_list_param_set) - 386 sizeof(struct mwifiex_ie_types_chan_list_param_set) -
386 sizeof(struct mwifiex_ie_types_header)); 387 sizeof(struct mwifiex_ie_types_header));
387 chan_list->chan_scan_param[0].chan_number = 388 chan_list->chan_scan_param[0].chan_number =
388 bss_desc->bcn_ht_info->control_chan; 389 bss_desc->bcn_ht_oper->primary_chan;
389 chan_list->chan_scan_param[0].radio_type = 390 chan_list->chan_scan_param[0].radio_type =
390 mwifiex_band_to_radio_type((u8) bss_desc->bss_band); 391 mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
391 392
392 if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && 393 if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
393 bss_desc->bcn_ht_info->ht_param & 394 bss_desc->bcn_ht_oper->ht_param &
394 IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) 395 IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)
395 SET_SECONDARYCHAN(chan_list->chan_scan_param[0]. 396 SET_SECONDARYCHAN(chan_list->chan_scan_param[0].
396 radio_type, 397 radio_type,
397 (bss_desc->bcn_ht_info->ht_param & 398 (bss_desc->bcn_ht_oper->ht_param &
398 IEEE80211_HT_PARAM_CHA_SEC_OFFSET)); 399 IEEE80211_HT_PARAM_CHA_SEC_OFFSET));
399 400
400 *buffer += sizeof(struct mwifiex_ie_types_chan_list_param_set); 401 *buffer += sizeof(struct mwifiex_ie_types_chan_list_param_set);
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 9eefb2a0ce9f..ab84eb943749 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -233,21 +233,27 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
233 233
234 skb_push(skb_aggr, headroom); 234 skb_push(skb_aggr, headroom);
235 235
236 /* 236 if (adapter->iface_type == MWIFIEX_USB) {
237 * Padding per MSDU will affect the length of next 237 adapter->data_sent = true;
238 * packet and hence the exact length of next packet 238 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
239 * is uncertain here. 239 skb_aggr, NULL);
240 * 240 } else {
241 * Also, aggregation of transmission buffer, while 241 /*
242 * downloading the data to the card, wont gain much 242 * Padding per MSDU will affect the length of next
243 * on the AMSDU packets as the AMSDU packets utilizes 243 * packet and hence the exact length of next packet
244 * the transmission buffer space to the maximum 244 * is uncertain here.
245 * (adapter->tx_buf_size). 245 *
246 */ 246 * Also, aggregation of transmission buffer, while
247 tx_param.next_pkt_len = 0; 247 * downloading the data to the card, wont gain much
248 248 * on the AMSDU packets as the AMSDU packets utilizes
249 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, 249 * the transmission buffer space to the maximum
250 skb_aggr, &tx_param); 250 * (adapter->tx_buf_size).
251 */
252 tx_param.next_pkt_len = 0;
253
254 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
255 skb_aggr, &tx_param);
256 }
251 switch (ret) { 257 switch (ret) {
252 case -EBUSY: 258 case -EBUSY:
253 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); 259 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index 2a078cea830a..8e384fae3e68 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -10,12 +10,12 @@ config MWIFIEX
10 mwifiex. 10 mwifiex.
11 11
12config MWIFIEX_SDIO 12config MWIFIEX_SDIO
13 tristate "Marvell WiFi-Ex Driver for SD8787/SD8797" 13 tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797"
14 depends on MWIFIEX && MMC 14 depends on MWIFIEX && MMC
15 select FW_LOADER 15 select FW_LOADER
16 ---help--- 16 ---help---
17 This adds support for wireless adapters based on Marvell 17 This adds support for wireless adapters based on Marvell
18 8787/8797 chipsets with SDIO interface. 18 8786/8787/8797 chipsets with SDIO interface.
19 19
20 If you choose to build it as a module, it will be called 20 If you choose to build it as a module, it will be called
21 mwifiex_sdio. 21 mwifiex_sdio.
@@ -30,3 +30,14 @@ config MWIFIEX_PCIE
30 30
31 If you choose to build it as a module, it will be called 31 If you choose to build it as a module, it will be called
32 mwifiex_pcie. 32 mwifiex_pcie.
33
34config MWIFIEX_USB
35 tristate "Marvell WiFi-Ex Driver for USB8797"
36 depends on MWIFIEX && USB
37 select FW_LOADER
38 ---help---
39 This adds support for wireless adapters based on Marvell
40 Avastar 88W8797 chipset with USB interface.
41
42 If you choose to build it as a module, it will be called
43 mwifiex_usb.
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index b0257ad1bbed..5c1a46bf1e11 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -42,3 +42,6 @@ obj-$(CONFIG_MWIFIEX_SDIO) += mwifiex_sdio.o
42 42
43mwifiex_pcie-y += pcie.o 43mwifiex_pcie-y += pcie.o
44obj-$(CONFIG_MWIFIEX_PCIE) += mwifiex_pcie.o 44obj-$(CONFIG_MWIFIEX_PCIE) += mwifiex_pcie.o
45
46mwifiex_usb-y += usb.o
47obj-$(CONFIG_MWIFIEX_USB) += mwifiex_usb.o
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 65050384c42b..c78ea873a63a 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -516,25 +516,23 @@ static int
516mwifiex_dump_station_info(struct mwifiex_private *priv, 516mwifiex_dump_station_info(struct mwifiex_private *priv,
517 struct station_info *sinfo) 517 struct station_info *sinfo)
518{ 518{
519 struct mwifiex_ds_get_signal signal;
520 struct mwifiex_rate_cfg rate; 519 struct mwifiex_rate_cfg rate;
521 int ret = 0;
522 520
523 sinfo->filled = STATION_INFO_RX_BYTES | STATION_INFO_TX_BYTES | 521 sinfo->filled = STATION_INFO_RX_BYTES | STATION_INFO_TX_BYTES |
524 STATION_INFO_RX_PACKETS | 522 STATION_INFO_RX_PACKETS | STATION_INFO_TX_PACKETS |
525 STATION_INFO_TX_PACKETS 523 STATION_INFO_TX_BITRATE |
526 | STATION_INFO_SIGNAL | STATION_INFO_TX_BITRATE; 524 STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG;
527 525
528 /* Get signal information from the firmware */ 526 /* Get signal information from the firmware */
529 memset(&signal, 0, sizeof(struct mwifiex_ds_get_signal)); 527 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_RSSI_INFO,
530 if (mwifiex_get_signal_info(priv, &signal)) { 528 HostCmd_ACT_GEN_GET, 0, NULL)) {
531 dev_err(priv->adapter->dev, "getting signal information\n"); 529 dev_err(priv->adapter->dev, "failed to get signal information\n");
532 ret = -EFAULT; 530 return -EFAULT;
533 } 531 }
534 532
535 if (mwifiex_drv_get_data_rate(priv, &rate)) { 533 if (mwifiex_drv_get_data_rate(priv, &rate)) {
536 dev_err(priv->adapter->dev, "getting data rate\n"); 534 dev_err(priv->adapter->dev, "getting data rate\n");
537 ret = -EFAULT; 535 return -EFAULT;
538 } 536 }
539 537
540 /* Get DTIM period information from firmware */ 538 /* Get DTIM period information from firmware */
@@ -557,11 +555,12 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
557 sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 555 sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
558 } 556 }
559 557
558 sinfo->signal_avg = priv->bcn_rssi_avg;
560 sinfo->rx_bytes = priv->stats.rx_bytes; 559 sinfo->rx_bytes = priv->stats.rx_bytes;
561 sinfo->tx_bytes = priv->stats.tx_bytes; 560 sinfo->tx_bytes = priv->stats.tx_bytes;
562 sinfo->rx_packets = priv->stats.rx_packets; 561 sinfo->rx_packets = priv->stats.rx_packets;
563 sinfo->tx_packets = priv->stats.tx_packets; 562 sinfo->tx_packets = priv->stats.tx_packets;
564 sinfo->signal = priv->qual_level; 563 sinfo->signal = priv->bcn_rssi_avg;
565 /* bit rate is in 500 kb/s units. Convert it to 100kb/s units */ 564 /* bit rate is in 500 kb/s units. Convert it to 100kb/s units */
566 sinfo->txrate.legacy = rate.rate * 5; 565 sinfo->txrate.legacy = rate.rate * 5;
567 566
@@ -581,7 +580,7 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
581 priv->curr_bss_params.bss_descriptor.beacon_period; 580 priv->curr_bss_params.bss_descriptor.beacon_period;
582 } 581 }
583 582
584 return ret; 583 return 0;
585} 584}
586 585
587/* 586/*
@@ -604,6 +603,23 @@ mwifiex_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
604 return mwifiex_dump_station_info(priv, sinfo); 603 return mwifiex_dump_station_info(priv, sinfo);
605} 604}
606 605
606/*
607 * CFG802.11 operation handler to dump station information.
608 */
609static int
610mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
611 int idx, u8 *mac, struct station_info *sinfo)
612{
613 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
614
615 if (!priv->media_connected || idx)
616 return -ENOENT;
617
618 memcpy(mac, priv->cfg_bssid, ETH_ALEN);
619
620 return mwifiex_dump_station_info(priv, sinfo);
621}
622
607/* Supported rates to be advertised to the cfg80211 */ 623/* Supported rates to be advertised to the cfg80211 */
608 624
609static struct ieee80211_rate mwifiex_rates[] = { 625static struct ieee80211_rate mwifiex_rates[] = {
@@ -750,6 +766,45 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
750} 766}
751 767
752/* 768/*
769 * CFG802.11 operation handler for connection quality monitoring.
770 *
771 * This function subscribes/unsubscribes HIGH_RSSI and LOW_RSSI
772 * events to FW.
773 */
774static int mwifiex_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy,
775 struct net_device *dev,
776 s32 rssi_thold, u32 rssi_hyst)
777{
778 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
779 struct mwifiex_ds_misc_subsc_evt subsc_evt;
780
781 priv->cqm_rssi_thold = rssi_thold;
782 priv->cqm_rssi_hyst = rssi_hyst;
783
784 memset(&subsc_evt, 0x00, sizeof(struct mwifiex_ds_misc_subsc_evt));
785 subsc_evt.events = BITMASK_BCN_RSSI_LOW | BITMASK_BCN_RSSI_HIGH;
786
787 /* Subscribe/unsubscribe low and high rssi events */
788 if (rssi_thold && rssi_hyst) {
789 subsc_evt.action = HostCmd_ACT_BITWISE_SET;
790 subsc_evt.bcn_l_rssi_cfg.abs_value = abs(rssi_thold);
791 subsc_evt.bcn_h_rssi_cfg.abs_value = abs(rssi_thold);
792 subsc_evt.bcn_l_rssi_cfg.evt_freq = 1;
793 subsc_evt.bcn_h_rssi_cfg.evt_freq = 1;
794 return mwifiex_send_cmd_sync(priv,
795 HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
796 0, 0, &subsc_evt);
797 } else {
798 subsc_evt.action = HostCmd_ACT_BITWISE_CLR;
799 return mwifiex_send_cmd_sync(priv,
800 HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
801 0, 0, &subsc_evt);
802 }
803
804 return 0;
805}
806
807/*
753 * CFG802.11 operation handler for disconnection request. 808 * CFG802.11 operation handler for disconnection request.
754 * 809 *
755 * This function does not work when there is already a disconnection 810 * This function does not work when there is already a disconnection
@@ -1107,6 +1162,17 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev,
1107 priv->user_scan_cfg->num_ssids = request->n_ssids; 1162 priv->user_scan_cfg->num_ssids = request->n_ssids;
1108 priv->user_scan_cfg->ssid_list = request->ssids; 1163 priv->user_scan_cfg->ssid_list = request->ssids;
1109 1164
1165 if (request->ie && request->ie_len) {
1166 for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) {
1167 if (priv->vs_ie[i].mask != MWIFIEX_VSIE_MASK_CLEAR)
1168 continue;
1169 priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_SCAN;
1170 memcpy(&priv->vs_ie[i].ie, request->ie,
1171 request->ie_len);
1172 break;
1173 }
1174 }
1175
1110 for (i = 0; i < request->n_channels; i++) { 1176 for (i = 0; i < request->n_channels; i++) {
1111 chan = request->channels[i]; 1177 chan = request->channels[i];
1112 priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value; 1178 priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
@@ -1124,6 +1190,15 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev,
1124 if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg)) 1190 if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg))
1125 return -EFAULT; 1191 return -EFAULT;
1126 1192
1193 if (request->ie && request->ie_len) {
1194 for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) {
1195 if (priv->vs_ie[i].mask == MWIFIEX_VSIE_MASK_SCAN) {
1196 priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_CLEAR;
1197 memset(&priv->vs_ie[i].ie, 0,
1198 MWIFIEX_MAX_VSIE_LEN);
1199 }
1200 }
1201 }
1127 return 0; 1202 return 0;
1128} 1203}
1129 1204
@@ -1340,6 +1415,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
1340 .connect = mwifiex_cfg80211_connect, 1415 .connect = mwifiex_cfg80211_connect,
1341 .disconnect = mwifiex_cfg80211_disconnect, 1416 .disconnect = mwifiex_cfg80211_disconnect,
1342 .get_station = mwifiex_cfg80211_get_station, 1417 .get_station = mwifiex_cfg80211_get_station,
1418 .dump_station = mwifiex_cfg80211_dump_station,
1343 .set_wiphy_params = mwifiex_cfg80211_set_wiphy_params, 1419 .set_wiphy_params = mwifiex_cfg80211_set_wiphy_params,
1344 .set_channel = mwifiex_cfg80211_set_channel, 1420 .set_channel = mwifiex_cfg80211_set_channel,
1345 .join_ibss = mwifiex_cfg80211_join_ibss, 1421 .join_ibss = mwifiex_cfg80211_join_ibss,
@@ -1350,6 +1426,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
1350 .set_power_mgmt = mwifiex_cfg80211_set_power_mgmt, 1426 .set_power_mgmt = mwifiex_cfg80211_set_power_mgmt,
1351 .set_tx_power = mwifiex_cfg80211_set_tx_power, 1427 .set_tx_power = mwifiex_cfg80211_set_tx_power,
1352 .set_bitrate_mask = mwifiex_cfg80211_set_bitrate_mask, 1428 .set_bitrate_mask = mwifiex_cfg80211_set_bitrate_mask,
1429 .set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config,
1353}; 1430};
1354 1431
1355/* 1432/*
@@ -1365,6 +1442,7 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
1365 void *wdev_priv; 1442 void *wdev_priv;
1366 struct wireless_dev *wdev; 1443 struct wireless_dev *wdev;
1367 struct ieee80211_sta_ht_cap *ht_info; 1444 struct ieee80211_sta_ht_cap *ht_info;
1445 u8 *country_code;
1368 1446
1369 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); 1447 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
1370 if (!wdev) { 1448 if (!wdev) {
@@ -1381,6 +1459,7 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
1381 } 1459 }
1382 wdev->iftype = NL80211_IFTYPE_STATION; 1460 wdev->iftype = NL80211_IFTYPE_STATION;
1383 wdev->wiphy->max_scan_ssids = 10; 1461 wdev->wiphy->max_scan_ssids = 10;
1462 wdev->wiphy->max_scan_ie_len = MWIFIEX_MAX_VSIE_LEN;
1384 wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 1463 wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1385 BIT(NL80211_IFTYPE_ADHOC); 1464 BIT(NL80211_IFTYPE_ADHOC);
1386 1465
@@ -1403,8 +1482,8 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
1403 memcpy(wdev->wiphy->perm_addr, priv->curr_addr, ETH_ALEN); 1482 memcpy(wdev->wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
1404 wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; 1483 wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
1405 1484
1406 /* Reserve space for bss band information */ 1485 /* Reserve space for mwifiex specific private data for BSS */
1407 wdev->wiphy->bss_priv_size = sizeof(u8); 1486 wdev->wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
1408 1487
1409 wdev->wiphy->reg_notifier = mwifiex_reg_notifier; 1488 wdev->wiphy->reg_notifier = mwifiex_reg_notifier;
1410 1489
@@ -1427,6 +1506,11 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
1427 "info: successfully registered wiphy device\n"); 1506 "info: successfully registered wiphy device\n");
1428 } 1507 }
1429 1508
1509 country_code = mwifiex_11d_code_2_region(priv->adapter->region_code);
1510 if (country_code && regulatory_hint(wdev->wiphy, country_code))
1511 dev_err(priv->adapter->dev,
1512 "%s: regulatory_hint failed\n", __func__);
1513
1430 priv->wdev = wdev; 1514 priv->wdev = wdev;
1431 1515
1432 return ret; 1516 return ret;
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index 2fe1c33765b8..560871b0e236 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -71,6 +71,37 @@ u16 region_code_index[MWIFIEX_MAX_REGION_CODE] = { 0x10, 0x20, 0x30,
71 71
72static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 }; 72static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 };
73 73
74struct region_code_mapping {
75 u8 code;
76 u8 region[IEEE80211_COUNTRY_STRING_LEN];
77};
78
79static struct region_code_mapping region_code_mapping_t[] = {
80 { 0x10, "US " }, /* US FCC */
81 { 0x20, "CA " }, /* IC Canada */
82 { 0x30, "EU " }, /* ETSI */
83 { 0x31, "ES " }, /* Spain */
84 { 0x32, "FR " }, /* France */
85 { 0x40, "JP " }, /* Japan */
86 { 0x41, "JP " }, /* Japan */
87 { 0x50, "CN " }, /* China */
88};
89
90/* This function converts integer code to region string */
91u8 *mwifiex_11d_code_2_region(u8 code)
92{
93 u8 i;
94 u8 size = sizeof(region_code_mapping_t)/
95 sizeof(struct region_code_mapping);
96
97 /* Look for code in mapping table */
98 for (i = 0; i < size; i++)
99 if (region_code_mapping_t[i].code == code)
100 return region_code_mapping_t[i].region;
101
102 return NULL;
103}
104
74/* 105/*
75 * This function maps an index in supported rates table into 106 * This function maps an index in supported rates table into
76 * the corresponding data rate. 107 * the corresponding data rate.
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 07f6e0092552..1710beffb93a 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -139,6 +139,7 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
139 uint16_t cmd_size; 139 uint16_t cmd_size;
140 struct timeval tstamp; 140 struct timeval tstamp;
141 unsigned long flags; 141 unsigned long flags;
142 __le32 tmp;
142 143
143 if (!adapter || !cmd_node) 144 if (!adapter || !cmd_node)
144 return -1; 145 return -1;
@@ -178,15 +179,28 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
178 le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)), cmd_size, 179 le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)), cmd_size,
179 le16_to_cpu(host_cmd->seq_num)); 180 le16_to_cpu(host_cmd->seq_num));
180 181
181 skb_push(cmd_node->cmd_skb, INTF_HEADER_LEN); 182 if (adapter->iface_type == MWIFIEX_USB) {
182 183 tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD);
183 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD, 184 skb_push(cmd_node->cmd_skb, MWIFIEX_TYPE_LEN);
184 cmd_node->cmd_skb, NULL); 185 memcpy(cmd_node->cmd_skb->data, &tmp, MWIFIEX_TYPE_LEN);
185 186 adapter->cmd_sent = true;
186 skb_pull(cmd_node->cmd_skb, INTF_HEADER_LEN); 187 ret = adapter->if_ops.host_to_card(adapter,
188 MWIFIEX_USB_EP_CMD_EVENT,
189 cmd_node->cmd_skb, NULL);
190 skb_pull(cmd_node->cmd_skb, MWIFIEX_TYPE_LEN);
191 if (ret == -EBUSY)
192 cmd_node->cmd_skb = NULL;
193 } else {
194 skb_push(cmd_node->cmd_skb, INTF_HEADER_LEN);
195 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD,
196 cmd_node->cmd_skb, NULL);
197 skb_pull(cmd_node->cmd_skb, INTF_HEADER_LEN);
198 }
187 199
188 if (ret == -1) { 200 if (ret == -1) {
189 dev_err(adapter->dev, "DNLD_CMD: host to card failed\n"); 201 dev_err(adapter->dev, "DNLD_CMD: host to card failed\n");
202 if (adapter->iface_type == MWIFIEX_USB)
203 adapter->cmd_sent = false;
190 if (cmd_node->wait_q_enabled) 204 if (cmd_node->wait_q_enabled)
191 adapter->cmd_wait_q.status = -1; 205 adapter->cmd_wait_q.status = -1;
192 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd); 206 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
@@ -232,6 +246,9 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
232 struct mwifiex_opt_sleep_confirm *sleep_cfm_buf = 246 struct mwifiex_opt_sleep_confirm *sleep_cfm_buf =
233 (struct mwifiex_opt_sleep_confirm *) 247 (struct mwifiex_opt_sleep_confirm *)
234 adapter->sleep_cfm->data; 248 adapter->sleep_cfm->data;
249 struct sk_buff *sleep_cfm_tmp;
250 __le32 tmp;
251
235 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); 252 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
236 253
237 sleep_cfm_buf->seq_num = 254 sleep_cfm_buf->seq_num =
@@ -240,10 +257,28 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
240 priv->bss_type))); 257 priv->bss_type)));
241 adapter->seq_num++; 258 adapter->seq_num++;
242 259
243 skb_push(adapter->sleep_cfm, INTF_HEADER_LEN); 260 if (adapter->iface_type == MWIFIEX_USB) {
244 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD, 261 sleep_cfm_tmp =
245 adapter->sleep_cfm, NULL); 262 dev_alloc_skb(sizeof(struct mwifiex_opt_sleep_confirm)
246 skb_pull(adapter->sleep_cfm, INTF_HEADER_LEN); 263 + MWIFIEX_TYPE_LEN);
264 skb_put(sleep_cfm_tmp, sizeof(struct mwifiex_opt_sleep_confirm)
265 + MWIFIEX_TYPE_LEN);
266 tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD);
267 memcpy(sleep_cfm_tmp->data, &tmp, MWIFIEX_TYPE_LEN);
268 memcpy(sleep_cfm_tmp->data + MWIFIEX_TYPE_LEN,
269 adapter->sleep_cfm->data,
270 sizeof(struct mwifiex_opt_sleep_confirm));
271 ret = adapter->if_ops.host_to_card(adapter,
272 MWIFIEX_USB_EP_CMD_EVENT,
273 sleep_cfm_tmp, NULL);
274 if (ret != -EBUSY)
275 dev_kfree_skb_any(sleep_cfm_tmp);
276 } else {
277 skb_push(adapter->sleep_cfm, INTF_HEADER_LEN);
278 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD,
279 adapter->sleep_cfm, NULL);
280 skb_pull(adapter->sleep_cfm, INTF_HEADER_LEN);
281 }
247 282
248 if (ret == -1) { 283 if (ret == -1) {
249 dev_err(adapter->dev, "SLEEP_CFM: failed\n"); 284 dev_err(adapter->dev, "SLEEP_CFM: failed\n");
@@ -343,7 +378,12 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
343 } 378 }
344 if (!cmd_array[i].resp_skb) 379 if (!cmd_array[i].resp_skb)
345 continue; 380 continue;
346 dev_kfree_skb_any(cmd_array[i].resp_skb); 381
382 if (adapter->iface_type == MWIFIEX_USB)
383 adapter->if_ops.cmdrsp_complete(adapter,
384 cmd_array[i].resp_skb);
385 else
386 dev_kfree_skb_any(cmd_array[i].resp_skb);
347 } 387 }
348 /* Release struct cmd_ctrl_node */ 388 /* Release struct cmd_ctrl_node */
349 if (adapter->cmd_pool) { 389 if (adapter->cmd_pool) {
@@ -1083,6 +1123,7 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
1083 MWIFIEX_BSS_ROLE_ANY), 1123 MWIFIEX_BSS_ROLE_ANY),
1084 false); 1124 false);
1085} 1125}
1126EXPORT_SYMBOL_GPL(mwifiex_process_hs_config);
1086 1127
1087/* 1128/*
1088 * This function handles the command response of a sleep confirm command. 1129 * This function handles the command response of a sleep confirm command.
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 1a845074c52a..a870b5885c09 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -212,7 +212,7 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
212 p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid); 212 p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid);
213 p += sprintf(p, "bssid=\"%pM\"\n", info.bssid); 213 p += sprintf(p, "bssid=\"%pM\"\n", info.bssid);
214 p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan); 214 p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan);
215 p += sprintf(p, "region_code = \"%02x\"\n", info.region_code); 215 p += sprintf(p, "country_code = \"%s\"\n", info.country_code);
216 216
217 netdev_for_each_mc_addr(ha, netdev) 217 netdev_for_each_mc_addr(ha, netdev)
218 p += sprintf(p, "multicast_address[%d]=\"%pM\"\n", 218 p += sprintf(p, "multicast_address[%d]=\"%pM\"\n",
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index be5fd1652e53..d04aba4131dc 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -53,6 +53,7 @@
53#define MWIFIEX_RATE_BITMAP_MCS127 159 53#define MWIFIEX_RATE_BITMAP_MCS127 159
54 54
55#define MWIFIEX_RX_DATA_BUF_SIZE (4 * 1024) 55#define MWIFIEX_RX_DATA_BUF_SIZE (4 * 1024)
56#define MWIFIEX_RX_CMD_BUF_SIZE (2 * 1024)
56 57
57#define MWIFIEX_RTS_MIN_VALUE (0) 58#define MWIFIEX_RTS_MIN_VALUE (0)
58#define MWIFIEX_RTS_MAX_VALUE (2347) 59#define MWIFIEX_RTS_MAX_VALUE (2347)
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index e98fc5af73dc..5f6adeb9b950 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -81,6 +81,11 @@ enum KEY_TYPE_ID {
81#define FIRMWARE_READY_SDIO 0xfedc 81#define FIRMWARE_READY_SDIO 0xfedc
82#define FIRMWARE_READY_PCIE 0xfedcba00 82#define FIRMWARE_READY_PCIE 0xfedcba00
83 83
84enum mwifiex_usb_ep {
85 MWIFIEX_USB_EP_CMD_EVENT = 1,
86 MWIFIEX_USB_EP_DATA = 2,
87};
88
84enum MWIFIEX_802_11_PRIVACY_FILTER { 89enum MWIFIEX_802_11_PRIVACY_FILTER {
85 MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL, 90 MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL,
86 MWIFIEX_802_11_PRIV_FILTER_8021X_WEP 91 MWIFIEX_802_11_PRIV_FILTER_8021X_WEP
@@ -92,16 +97,19 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
92#define TLV_TYPE_KEY_MATERIAL (PROPRIETARY_TLV_BASE_ID + 0) 97#define TLV_TYPE_KEY_MATERIAL (PROPRIETARY_TLV_BASE_ID + 0)
93#define TLV_TYPE_CHANLIST (PROPRIETARY_TLV_BASE_ID + 1) 98#define TLV_TYPE_CHANLIST (PROPRIETARY_TLV_BASE_ID + 1)
94#define TLV_TYPE_NUMPROBES (PROPRIETARY_TLV_BASE_ID + 2) 99#define TLV_TYPE_NUMPROBES (PROPRIETARY_TLV_BASE_ID + 2)
100#define TLV_TYPE_RSSI_LOW (PROPRIETARY_TLV_BASE_ID + 4)
95#define TLV_TYPE_PASSTHROUGH (PROPRIETARY_TLV_BASE_ID + 10) 101#define TLV_TYPE_PASSTHROUGH (PROPRIETARY_TLV_BASE_ID + 10)
96#define TLV_TYPE_WMMQSTATUS (PROPRIETARY_TLV_BASE_ID + 16) 102#define TLV_TYPE_WMMQSTATUS (PROPRIETARY_TLV_BASE_ID + 16)
97#define TLV_TYPE_WILDCARDSSID (PROPRIETARY_TLV_BASE_ID + 18) 103#define TLV_TYPE_WILDCARDSSID (PROPRIETARY_TLV_BASE_ID + 18)
98#define TLV_TYPE_TSFTIMESTAMP (PROPRIETARY_TLV_BASE_ID + 19) 104#define TLV_TYPE_TSFTIMESTAMP (PROPRIETARY_TLV_BASE_ID + 19)
105#define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22)
99#define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31) 106#define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31)
100#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42) 107#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42)
101#define TLV_TYPE_RATE_DROP_CONTROL (PROPRIETARY_TLV_BASE_ID + 82) 108#define TLV_TYPE_RATE_DROP_CONTROL (PROPRIETARY_TLV_BASE_ID + 82)
102#define TLV_TYPE_RATE_SCOPE (PROPRIETARY_TLV_BASE_ID + 83) 109#define TLV_TYPE_RATE_SCOPE (PROPRIETARY_TLV_BASE_ID + 83)
103#define TLV_TYPE_POWER_GROUP (PROPRIETARY_TLV_BASE_ID + 84) 110#define TLV_TYPE_POWER_GROUP (PROPRIETARY_TLV_BASE_ID + 84)
104#define TLV_TYPE_WAPI_IE (PROPRIETARY_TLV_BASE_ID + 94) 111#define TLV_TYPE_WAPI_IE (PROPRIETARY_TLV_BASE_ID + 94)
112#define TLV_TYPE_MGMT_IE (PROPRIETARY_TLV_BASE_ID + 105)
105#define TLV_TYPE_AUTO_DS_PARAM (PROPRIETARY_TLV_BASE_ID + 113) 113#define TLV_TYPE_AUTO_DS_PARAM (PROPRIETARY_TLV_BASE_ID + 113)
106#define TLV_TYPE_PS_PARAM (PROPRIETARY_TLV_BASE_ID + 114) 114#define TLV_TYPE_PS_PARAM (PROPRIETARY_TLV_BASE_ID + 114)
107 115
@@ -194,6 +202,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
194#define HostCmd_CMD_802_11_KEY_MATERIAL 0x005e 202#define HostCmd_CMD_802_11_KEY_MATERIAL 0x005e
195#define HostCmd_CMD_802_11_BG_SCAN_QUERY 0x006c 203#define HostCmd_CMD_802_11_BG_SCAN_QUERY 0x006c
196#define HostCmd_CMD_WMM_GET_STATUS 0x0071 204#define HostCmd_CMD_WMM_GET_STATUS 0x0071
205#define HostCmd_CMD_802_11_SUBSCRIBE_EVENT 0x0075
197#define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f 206#define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f
198#define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083 207#define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083
199#define HostCmd_CMD_VERSION_EXT 0x0097 208#define HostCmd_CMD_VERSION_EXT 0x0097
@@ -228,6 +237,8 @@ enum ENH_PS_MODES {
228#define HostCmd_RET_BIT 0x8000 237#define HostCmd_RET_BIT 0x8000
229#define HostCmd_ACT_GEN_GET 0x0000 238#define HostCmd_ACT_GEN_GET 0x0000
230#define HostCmd_ACT_GEN_SET 0x0001 239#define HostCmd_ACT_GEN_SET 0x0001
240#define HostCmd_ACT_BITWISE_SET 0x0002
241#define HostCmd_ACT_BITWISE_CLR 0x0003
231#define HostCmd_RESULT_OK 0x0000 242#define HostCmd_RESULT_OK 0x0000
232 243
233#define HostCmd_ACT_MAC_RX_ON 0x0001 244#define HostCmd_ACT_MAC_RX_ON 0x0001
@@ -813,7 +824,7 @@ struct host_cmd_ds_txpwr_cfg {
813struct mwifiex_bcn_param { 824struct mwifiex_bcn_param {
814 u8 bssid[ETH_ALEN]; 825 u8 bssid[ETH_ALEN];
815 u8 rssi; 826 u8 rssi;
816 __le32 timestamp[2]; 827 __le64 timestamp;
817 __le16 beacon_period; 828 __le16 beacon_period;
818 __le16 cap_info_bitmap; 829 __le16 cap_info_bitmap;
819} __packed; 830} __packed;
@@ -982,8 +993,7 @@ struct mwifiex_ie_types_wmm_queue_status {
982struct ieee_types_vendor_header { 993struct ieee_types_vendor_header {
983 u8 element_id; 994 u8 element_id;
984 u8 len; 995 u8 len;
985 u8 oui[3]; 996 u8 oui[4]; /* 0~2: oui, 3: oui_type */
986 u8 oui_type;
987 u8 oui_subtype; 997 u8 oui_subtype;
988 u8 version; 998 u8 version;
989} __packed; 999} __packed;
@@ -1007,7 +1017,7 @@ struct ieee_types_wmm_parameter {
1007 struct ieee_types_vendor_header vend_hdr; 1017 struct ieee_types_vendor_header vend_hdr;
1008 u8 qos_info_bitmap; 1018 u8 qos_info_bitmap;
1009 u8 reserved; 1019 u8 reserved;
1010 struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_MAX_QUEUES]; 1020 struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS];
1011} __packed; 1021} __packed;
1012 1022
1013struct ieee_types_wmm_info { 1023struct ieee_types_wmm_info {
@@ -1028,7 +1038,7 @@ struct ieee_types_wmm_info {
1028 1038
1029struct host_cmd_ds_wmm_get_status { 1039struct host_cmd_ds_wmm_get_status {
1030 u8 queue_status_tlv[sizeof(struct mwifiex_ie_types_wmm_queue_status) * 1040 u8 queue_status_tlv[sizeof(struct mwifiex_ie_types_wmm_queue_status) *
1031 IEEE80211_MAX_QUEUES]; 1041 IEEE80211_NUM_ACS];
1032 u8 wmm_param_tlv[sizeof(struct ieee_types_wmm_parameter) + 2]; 1042 u8 wmm_param_tlv[sizeof(struct ieee_types_wmm_parameter) + 2];
1033} __packed; 1043} __packed;
1034 1044
@@ -1045,7 +1055,7 @@ struct mwifiex_ie_types_htcap {
1045 1055
1046struct mwifiex_ie_types_htinfo { 1056struct mwifiex_ie_types_htinfo {
1047 struct mwifiex_ie_types_header header; 1057 struct mwifiex_ie_types_header header;
1048 struct ieee80211_ht_info ht_info; 1058 struct ieee80211_ht_operation ht_oper;
1049} __packed; 1059} __packed;
1050 1060
1051struct mwifiex_ie_types_2040bssco { 1061struct mwifiex_ie_types_2040bssco {
@@ -1146,6 +1156,17 @@ struct host_cmd_ds_pcie_details {
1146 u32 sleep_cookie_addr_hi; 1156 u32 sleep_cookie_addr_hi;
1147} __packed; 1157} __packed;
1148 1158
1159struct mwifiex_ie_types_rssi_threshold {
1160 struct mwifiex_ie_types_header header;
1161 u8 abs_value;
1162 u8 evt_freq;
1163} __packed;
1164
1165struct host_cmd_ds_802_11_subsc_evt {
1166 __le16 action;
1167 __le16 events;
1168} __packed;
1169
1149struct host_cmd_ds_command { 1170struct host_cmd_ds_command {
1150 __le16 command; 1171 __le16 command;
1151 __le16 size; 1172 __le16 size;
@@ -1195,6 +1216,7 @@ struct host_cmd_ds_command {
1195 struct host_cmd_ds_set_bss_mode bss_mode; 1216 struct host_cmd_ds_set_bss_mode bss_mode;
1196 struct host_cmd_ds_pcie_details pcie_host_spec; 1217 struct host_cmd_ds_pcie_details pcie_host_spec;
1197 struct host_cmd_ds_802_11_eeprom_access eeprom; 1218 struct host_cmd_ds_802_11_eeprom_access eeprom;
1219 struct host_cmd_ds_802_11_subsc_evt subsc_evt;
1198 } params; 1220 } params;
1199} __packed; 1221} __packed;
1200 1222
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 54bb4839b57c..d440c3eb640b 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -131,6 +131,8 @@ static int mwifiex_init_priv(struct mwifiex_private *priv)
131 priv->wmm_qosinfo = 0; 131 priv->wmm_qosinfo = 0;
132 priv->curr_bcn_buf = NULL; 132 priv->curr_bcn_buf = NULL;
133 priv->curr_bcn_size = 0; 133 priv->curr_bcn_size = 0;
134 priv->wps_ie = NULL;
135 priv->wps_ie_len = 0;
134 136
135 priv->scan_block = false; 137 priv->scan_block = false;
136 138
@@ -186,10 +188,10 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
186 188
187 adapter->cmd_sent = false; 189 adapter->cmd_sent = false;
188 190
189 if (adapter->iface_type == MWIFIEX_PCIE) 191 if (adapter->iface_type == MWIFIEX_SDIO)
190 adapter->data_sent = false;
191 else
192 adapter->data_sent = true; 192 adapter->data_sent = true;
193 else
194 adapter->data_sent = false;
193 195
194 adapter->cmd_resp_received = false; 196 adapter->cmd_resp_received = false;
195 adapter->event_received = false; 197 adapter->event_received = false;
@@ -377,7 +379,8 @@ mwifiex_free_adapter(struct mwifiex_adapter *adapter)
377 379
378 dev_dbg(adapter->dev, "info: free scan table\n"); 380 dev_dbg(adapter->dev, "info: free scan table\n");
379 381
380 adapter->if_ops.cleanup_if(adapter); 382 if (adapter->if_ops.cleanup_if)
383 adapter->if_ops.cleanup_if(adapter);
381 384
382 if (adapter->sleep_cfm) 385 if (adapter->sleep_cfm)
383 dev_kfree_skb_any(adapter->sleep_cfm); 386 dev_kfree_skb_any(adapter->sleep_cfm);
@@ -417,6 +420,8 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
417 spin_lock_init(&adapter->cmd_pending_q_lock); 420 spin_lock_init(&adapter->cmd_pending_q_lock);
418 spin_lock_init(&adapter->scan_pending_q_lock); 421 spin_lock_init(&adapter->scan_pending_q_lock);
419 422
423 skb_queue_head_init(&adapter->usb_rx_data_q);
424
420 for (i = 0; i < adapter->priv_num; ++i) { 425 for (i = 0; i < adapter->priv_num; ++i) {
421 INIT_LIST_HEAD(&adapter->bss_prio_tbl[i].bss_prio_head); 426 INIT_LIST_HEAD(&adapter->bss_prio_tbl[i].bss_prio_head);
422 adapter->bss_prio_tbl[i].bss_prio_cur = NULL; 427 adapter->bss_prio_tbl[i].bss_prio_cur = NULL;
@@ -572,6 +577,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
572 struct mwifiex_private *priv; 577 struct mwifiex_private *priv;
573 s32 i; 578 s32 i;
574 unsigned long flags; 579 unsigned long flags;
580 struct sk_buff *skb;
575 581
576 /* mwifiex already shutdown */ 582 /* mwifiex already shutdown */
577 if (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY) 583 if (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY)
@@ -599,6 +605,18 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
599 605
600 spin_lock_irqsave(&adapter->mwifiex_lock, flags); 606 spin_lock_irqsave(&adapter->mwifiex_lock, flags);
601 607
608 if (adapter->if_ops.data_complete) {
609 while ((skb = skb_dequeue(&adapter->usb_rx_data_q))) {
610 struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
611
612 priv = adapter->priv[rx_info->bss_num];
613 if (priv)
614 priv->stats.rx_dropped++;
615
616 adapter->if_ops.data_complete(adapter, skb);
617 }
618 }
619
602 /* Free adapter structure */ 620 /* Free adapter structure */
603 mwifiex_free_adapter(adapter); 621 mwifiex_free_adapter(adapter);
604 622
@@ -628,24 +646,28 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
628 int ret; 646 int ret;
629 u32 poll_num = 1; 647 u32 poll_num = 1;
630 648
631 adapter->winner = 0; 649 if (adapter->if_ops.check_fw_status) {
650 adapter->winner = 0;
632 651
633 /* Check if firmware is already running */ 652 /* check if firmware is already running */
634 ret = adapter->if_ops.check_fw_status(adapter, poll_num); 653 ret = adapter->if_ops.check_fw_status(adapter, poll_num);
635 if (!ret) { 654 if (!ret) {
636 dev_notice(adapter->dev, 655 dev_notice(adapter->dev,
637 "WLAN FW already running! Skip FW download\n"); 656 "WLAN FW already running! Skip FW dnld\n");
638 goto done; 657 goto done;
639 } 658 }
640 poll_num = MAX_FIRMWARE_POLL_TRIES; 659
641 660 poll_num = MAX_FIRMWARE_POLL_TRIES;
642 /* Check if we are the winner for downloading FW */ 661
643 if (!adapter->winner) { 662 /* check if we are the winner for downloading FW */
644 dev_notice(adapter->dev, 663 if (!adapter->winner) {
645 "Other intf already running! Skip FW download\n"); 664 dev_notice(adapter->dev,
646 poll_num = MAX_MULTI_INTERFACE_POLL_TRIES; 665 "FW already running! Skip FW dnld\n");
647 goto poll_fw; 666 poll_num = MAX_MULTI_INTERFACE_POLL_TRIES;
667 goto poll_fw;
668 }
648 } 669 }
670
649 if (pmfw) { 671 if (pmfw) {
650 /* Download firmware with helper */ 672 /* Download firmware with helper */
651 ret = adapter->if_ops.prog_fw(adapter, pmfw); 673 ret = adapter->if_ops.prog_fw(adapter, pmfw);
@@ -664,6 +686,8 @@ poll_fw:
664 } 686 }
665done: 687done:
666 /* re-enable host interrupt for mwifiex after fw dnld is successful */ 688 /* re-enable host interrupt for mwifiex after fw dnld is successful */
667 adapter->if_ops.enable_int(adapter); 689 if (adapter->if_ops.enable_int)
690 adapter->if_ops.enable_int(adapter);
691
668 return ret; 692 return ret;
669} 693}
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index 7ca4e8234f3e..f0f95524e96b 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -85,34 +85,6 @@ struct mwifiex_ds_get_stats {
85 u32 wep_icv_error[4]; 85 u32 wep_icv_error[4];
86}; 86};
87 87
88#define BCN_RSSI_AVG_MASK 0x00000002
89#define BCN_NF_AVG_MASK 0x00000200
90#define ALL_RSSI_INFO_MASK 0x00000fff
91
92struct mwifiex_ds_get_signal {
93 /*
94 * Bit0: Last Beacon RSSI, Bit1: Average Beacon RSSI,
95 * Bit2: Last Data RSSI, Bit3: Average Data RSSI,
96 * Bit4: Last Beacon SNR, Bit5: Average Beacon SNR,
97 * Bit6: Last Data SNR, Bit7: Average Data SNR,
98 * Bit8: Last Beacon NF, Bit9: Average Beacon NF,
99 * Bit10: Last Data NF, Bit11: Average Data NF
100 */
101 u16 selector;
102 s16 bcn_rssi_last;
103 s16 bcn_rssi_avg;
104 s16 data_rssi_last;
105 s16 data_rssi_avg;
106 s16 bcn_snr_last;
107 s16 bcn_snr_avg;
108 s16 data_snr_last;
109 s16 data_snr_avg;
110 s16 bcn_nf_last;
111 s16 bcn_nf_avg;
112 s16 data_nf_last;
113 s16 data_nf_avg;
114};
115
116#define MWIFIEX_MAX_VER_STR_LEN 128 88#define MWIFIEX_MAX_VER_STR_LEN 128
117 89
118struct mwifiex_ver_ext { 90struct mwifiex_ver_ext {
@@ -124,7 +96,7 @@ struct mwifiex_bss_info {
124 u32 bss_mode; 96 u32 bss_mode;
125 struct cfg80211_ssid ssid; 97 struct cfg80211_ssid ssid;
126 u32 bss_chan; 98 u32 bss_chan;
127 u32 region_code; 99 u8 country_code[3];
128 u32 media_connected; 100 u32 media_connected;
129 u32 max_power_level; 101 u32 max_power_level;
130 u32 min_power_level; 102 u32 min_power_level;
@@ -308,8 +280,30 @@ struct mwifiex_ds_misc_cmd {
308 u8 cmd[MWIFIEX_SIZE_OF_CMD_BUFFER]; 280 u8 cmd[MWIFIEX_SIZE_OF_CMD_BUFFER];
309}; 281};
310 282
283#define BITMASK_BCN_RSSI_LOW BIT(0)
284#define BITMASK_BCN_RSSI_HIGH BIT(4)
285
286enum subsc_evt_rssi_state {
287 EVENT_HANDLED,
288 RSSI_LOW_RECVD,
289 RSSI_HIGH_RECVD
290};
291
292struct subsc_evt_cfg {
293 u8 abs_value;
294 u8 evt_freq;
295};
296
297struct mwifiex_ds_misc_subsc_evt {
298 u16 action;
299 u16 events;
300 struct subsc_evt_cfg bcn_l_rssi_cfg;
301 struct subsc_evt_cfg bcn_h_rssi_cfg;
302};
303
311#define MWIFIEX_MAX_VSIE_LEN (256) 304#define MWIFIEX_MAX_VSIE_LEN (256)
312#define MWIFIEX_MAX_VSIE_NUM (8) 305#define MWIFIEX_MAX_VSIE_NUM (8)
306#define MWIFIEX_VSIE_MASK_CLEAR 0x00
313#define MWIFIEX_VSIE_MASK_SCAN 0x01 307#define MWIFIEX_VSIE_MASK_SCAN 0x01
314#define MWIFIEX_VSIE_MASK_ASSOC 0x02 308#define MWIFIEX_VSIE_MASK_ASSOC 0x02
315#define MWIFIEX_VSIE_MASK_ADHOC 0x04 309#define MWIFIEX_VSIE_MASK_ADHOC 0x04
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 8f9382b9c3ca..8a390982463e 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -118,15 +118,15 @@ mwifiex_cmd_append_tsf_tlv(struct mwifiex_private *priv, u8 **buffer,
118 *buffer += sizeof(tsf_tlv.header); 118 *buffer += sizeof(tsf_tlv.header);
119 119
120 /* TSF at the time when beacon/probe_response was received */ 120 /* TSF at the time when beacon/probe_response was received */
121 tsf_val = cpu_to_le64(bss_desc->network_tsf); 121 tsf_val = cpu_to_le64(bss_desc->fw_tsf);
122 memcpy(*buffer, &tsf_val, sizeof(tsf_val)); 122 memcpy(*buffer, &tsf_val, sizeof(tsf_val));
123 *buffer += sizeof(tsf_val); 123 *buffer += sizeof(tsf_val);
124 124
125 memcpy(&tsf_val, bss_desc->time_stamp, sizeof(tsf_val)); 125 tsf_val = cpu_to_le64(bss_desc->timestamp);
126 126
127 dev_dbg(priv->adapter->dev, 127 dev_dbg(priv->adapter->dev,
128 "info: %s: TSF offset calc: %016llx - %016llx\n", 128 "info: %s: TSF offset calc: %016llx - %016llx\n",
129 __func__, tsf_val, bss_desc->network_tsf); 129 __func__, bss_desc->timestamp, bss_desc->fw_tsf);
130 130
131 memcpy(*buffer, &tsf_val, sizeof(tsf_val)); 131 memcpy(*buffer, &tsf_val, sizeof(tsf_val));
132 *buffer += sizeof(tsf_val); 132 *buffer += sizeof(tsf_val);
@@ -225,6 +225,48 @@ mwifiex_setup_rates_from_bssdesc(struct mwifiex_private *priv,
225} 225}
226 226
227/* 227/*
228 * This function appends a WPS IE. It is called from the network join command
229 * preparation routine.
230 *
231 * If the IE buffer has been setup by the application, this routine appends
232 * the buffer as a WPS TLV type to the request.
233 */
234static int
235mwifiex_cmd_append_wps_ie(struct mwifiex_private *priv, u8 **buffer)
236{
237 int retLen = 0;
238 struct mwifiex_ie_types_header ie_header;
239
240 if (!buffer || !*buffer)
241 return 0;
242
243 /*
244 * If there is a wps ie buffer setup, append it to the return
245 * parameter buffer pointer.
246 */
247 if (priv->wps_ie_len) {
248 dev_dbg(priv->adapter->dev, "cmd: append wps ie %d to %p\n",
249 priv->wps_ie_len, *buffer);
250
251 /* Wrap the generic IE buffer with a pass through TLV type */
252 ie_header.type = cpu_to_le16(TLV_TYPE_MGMT_IE);
253 ie_header.len = cpu_to_le16(priv->wps_ie_len);
254 memcpy(*buffer, &ie_header, sizeof(ie_header));
255 *buffer += sizeof(ie_header);
256 retLen += sizeof(ie_header);
257
258 memcpy(*buffer, priv->wps_ie, priv->wps_ie_len);
259 *buffer += priv->wps_ie_len;
260 retLen += priv->wps_ie_len;
261
262 }
263
264 kfree(priv->wps_ie);
265 priv->wps_ie_len = 0;
266 return retLen;
267}
268
269/*
228 * This function appends a WAPI IE. 270 * This function appends a WAPI IE.
229 * 271 *
230 * This function is called from the network join command preparation routine. 272 * This function is called from the network join command preparation routine.
@@ -480,6 +522,8 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
480 if (priv->sec_info.wapi_enabled && priv->wapi_ie_len) 522 if (priv->sec_info.wapi_enabled && priv->wapi_ie_len)
481 mwifiex_cmd_append_wapi_ie(priv, &pos); 523 mwifiex_cmd_append_wapi_ie(priv, &pos);
482 524
525 if (priv->wps.session_enable && priv->wps_ie_len)
526 mwifiex_cmd_append_wps_ie(priv, &pos);
483 527
484 mwifiex_cmd_append_generic_ie(priv, &pos); 528 mwifiex_cmd_append_generic_ie(priv, &pos);
485 529
@@ -932,20 +976,20 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
932 /* Fill HT INFORMATION */ 976 /* Fill HT INFORMATION */
933 ht_info = (struct mwifiex_ie_types_htinfo *) pos; 977 ht_info = (struct mwifiex_ie_types_htinfo *) pos;
934 memset(ht_info, 0, sizeof(struct mwifiex_ie_types_htinfo)); 978 memset(ht_info, 0, sizeof(struct mwifiex_ie_types_htinfo));
935 ht_info->header.type = cpu_to_le16(WLAN_EID_HT_INFORMATION); 979 ht_info->header.type = cpu_to_le16(WLAN_EID_HT_OPERATION);
936 ht_info->header.len = 980 ht_info->header.len =
937 cpu_to_le16(sizeof(struct ieee80211_ht_info)); 981 cpu_to_le16(sizeof(struct ieee80211_ht_operation));
938 982
939 ht_info->ht_info.control_chan = 983 ht_info->ht_oper.primary_chan =
940 (u8) priv->curr_bss_params.bss_descriptor.channel; 984 (u8) priv->curr_bss_params.bss_descriptor.channel;
941 if (adapter->sec_chan_offset) { 985 if (adapter->sec_chan_offset) {
942 ht_info->ht_info.ht_param = adapter->sec_chan_offset; 986 ht_info->ht_oper.ht_param = adapter->sec_chan_offset;
943 ht_info->ht_info.ht_param |= 987 ht_info->ht_oper.ht_param |=
944 IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; 988 IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
945 } 989 }
946 ht_info->ht_info.operation_mode = 990 ht_info->ht_oper.operation_mode =
947 cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); 991 cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
948 ht_info->ht_info.basic_set[0] = 0xff; 992 ht_info->ht_oper.basic_set[0] = 0xff;
949 pos += sizeof(struct mwifiex_ie_types_htinfo); 993 pos += sizeof(struct mwifiex_ie_types_htinfo);
950 cmd_append_size += 994 cmd_append_size +=
951 sizeof(struct mwifiex_ie_types_htinfo); 995 sizeof(struct mwifiex_ie_types_htinfo);
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 9d1b3ca6334b..be0f0e583f75 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -58,8 +58,9 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
58 memmove(&adapter->if_ops, if_ops, sizeof(struct mwifiex_if_ops)); 58 memmove(&adapter->if_ops, if_ops, sizeof(struct mwifiex_if_ops));
59 59
60 /* card specific initialization has been deferred until now .. */ 60 /* card specific initialization has been deferred until now .. */
61 if (adapter->if_ops.init_if(adapter)) 61 if (adapter->if_ops.init_if)
62 goto error; 62 if (adapter->if_ops.init_if(adapter))
63 goto error;
63 64
64 adapter->priv_num = 0; 65 adapter->priv_num = 0;
65 66
@@ -140,6 +141,7 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
140{ 141{
141 int ret = 0; 142 int ret = 0;
142 unsigned long flags; 143 unsigned long flags;
144 struct sk_buff *skb;
143 145
144 spin_lock_irqsave(&adapter->main_proc_lock, flags); 146 spin_lock_irqsave(&adapter->main_proc_lock, flags);
145 147
@@ -161,7 +163,8 @@ process_start:
161 if (adapter->int_status) { 163 if (adapter->int_status) {
162 if (adapter->hs_activated) 164 if (adapter->hs_activated)
163 mwifiex_process_hs_config(adapter); 165 mwifiex_process_hs_config(adapter);
164 adapter->if_ops.process_int_status(adapter); 166 if (adapter->if_ops.process_int_status)
167 adapter->if_ops.process_int_status(adapter);
165 } 168 }
166 169
167 /* Need to wake up the card ? */ 170 /* Need to wake up the card ? */
@@ -174,6 +177,7 @@ process_start:
174 adapter->if_ops.wakeup(adapter); 177 adapter->if_ops.wakeup(adapter);
175 continue; 178 continue;
176 } 179 }
180
177 if (IS_CARD_RX_RCVD(adapter)) { 181 if (IS_CARD_RX_RCVD(adapter)) {
178 adapter->pm_wakeup_fw_try = false; 182 adapter->pm_wakeup_fw_try = false;
179 if (adapter->ps_state == PS_STATE_SLEEP) 183 if (adapter->ps_state == PS_STATE_SLEEP)
@@ -194,6 +198,11 @@ process_start:
194 } 198 }
195 } 199 }
196 200
201 /* Check Rx data for USB */
202 if (adapter->iface_type == MWIFIEX_USB)
203 while ((skb = skb_dequeue(&adapter->usb_rx_data_q)))
204 mwifiex_handle_rx_packet(adapter, skb);
205
197 /* Check for Cmd Resp */ 206 /* Check for Cmd Resp */
198 if (adapter->cmd_resp_received) { 207 if (adapter->cmd_resp_received) {
199 adapter->cmd_resp_received = false; 208 adapter->cmd_resp_received = false;
@@ -292,33 +301,35 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter)
292} 301}
293 302
294/* 303/*
295 * This function initializes the hardware and firmware. 304 * This function gets firmware and initializes it.
296 * 305 *
297 * The main initialization steps followed are - 306 * The main initialization steps followed are -
298 * - Download the correct firmware to card 307 * - Download the correct firmware to card
299 * - Allocate and initialize the adapter structure
300 * - Initialize the private structures
301 * - Issue the init commands to firmware 308 * - Issue the init commands to firmware
302 */ 309 */
303static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter) 310static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
304{ 311{
305 int ret, err; 312 int ret;
313 char fmt[64];
314 struct mwifiex_private *priv;
315 struct mwifiex_adapter *adapter = context;
306 struct mwifiex_fw_image fw; 316 struct mwifiex_fw_image fw;
307 317
308 memset(&fw, 0, sizeof(struct mwifiex_fw_image)); 318 if (!firmware) {
309 319 dev_err(adapter->dev,
310 err = request_firmware(&adapter->firmware, adapter->fw_name, 320 "Failed to get firmware %s\n", adapter->fw_name);
311 adapter->dev);
312 if (err < 0) {
313 dev_err(adapter->dev, "request_firmware() returned"
314 " error code %#x\n", err);
315 ret = -1;
316 goto done; 321 goto done;
317 } 322 }
323
324 memset(&fw, 0, sizeof(struct mwifiex_fw_image));
325 adapter->firmware = firmware;
318 fw.fw_buf = (u8 *) adapter->firmware->data; 326 fw.fw_buf = (u8 *) adapter->firmware->data;
319 fw.fw_len = adapter->firmware->size; 327 fw.fw_len = adapter->firmware->size;
320 328
321 ret = mwifiex_dnld_fw(adapter, &fw); 329 if (adapter->if_ops.dnld_fw)
330 ret = adapter->if_ops.dnld_fw(adapter, &fw);
331 else
332 ret = mwifiex_dnld_fw(adapter, &fw);
322 if (ret == -1) 333 if (ret == -1)
323 goto done; 334 goto done;
324 335
@@ -335,17 +346,54 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
335 /* Wait for mwifiex_init to complete */ 346 /* Wait for mwifiex_init to complete */
336 wait_event_interruptible(adapter->init_wait_q, 347 wait_event_interruptible(adapter->init_wait_q,
337 adapter->init_wait_q_woken); 348 adapter->init_wait_q_woken);
338 if (adapter->hw_status != MWIFIEX_HW_STATUS_READY) { 349 if (adapter->hw_status != MWIFIEX_HW_STATUS_READY)
339 ret = -1;
340 goto done; 350 goto done;
351
352 priv = adapter->priv[0];
353 if (mwifiex_register_cfg80211(priv) != 0) {
354 dev_err(adapter->dev, "cannot register with cfg80211\n");
355 goto err_init_fw;
341 } 356 }
342 ret = 0;
343 357
358 rtnl_lock();
359 /* Create station interface by default */
360 if (!mwifiex_add_virtual_intf(priv->wdev->wiphy, "mlan%d",
361 NL80211_IFTYPE_STATION, NULL, NULL)) {
362 dev_err(adapter->dev, "cannot create default STA interface\n");
363 goto err_add_intf;
364 }
365 rtnl_unlock();
366
367 mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
368 dev_notice(adapter->dev, "driver_version = %s\n", fmt);
369 goto done;
370
371err_add_intf:
372 mwifiex_del_virtual_intf(priv->wdev->wiphy, priv->netdev);
373 rtnl_unlock();
374err_init_fw:
375 pr_debug("info: %s: unregister device\n", __func__);
376 adapter->if_ops.unregister_dev(adapter);
344done: 377done:
345 if (adapter->firmware) 378 release_firmware(adapter->firmware);
346 release_firmware(adapter->firmware); 379 complete(&adapter->fw_load);
347 if (ret) 380 return;
348 ret = -1; 381}
382
383/*
384 * This function initializes the hardware and gets firmware.
385 */
386static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
387{
388 int ret;
389
390 init_completion(&adapter->fw_load);
391 ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name,
392 adapter->dev, GFP_KERNEL, adapter,
393 mwifiex_fw_dpc);
394 if (ret < 0)
395 dev_err(adapter->dev,
396 "request_firmware_nowait() returned error %d\n", ret);
349 return ret; 397 return ret;
350} 398}
351 399
@@ -650,8 +698,6 @@ mwifiex_add_card(void *card, struct semaphore *sem,
650 struct mwifiex_if_ops *if_ops, u8 iface_type) 698 struct mwifiex_if_ops *if_ops, u8 iface_type)
651{ 699{
652 struct mwifiex_adapter *adapter; 700 struct mwifiex_adapter *adapter;
653 char fmt[64];
654 struct mwifiex_private *priv;
655 701
656 if (down_interruptible(sem)) 702 if (down_interruptible(sem))
657 goto exit_sem_err; 703 goto exit_sem_err;
@@ -692,40 +738,13 @@ mwifiex_add_card(void *card, struct semaphore *sem,
692 goto err_init_fw; 738 goto err_init_fw;
693 } 739 }
694 740
695 priv = adapter->priv[0];
696
697 if (mwifiex_register_cfg80211(priv) != 0) {
698 dev_err(adapter->dev, "cannot register netdevice"
699 " with cfg80211\n");
700 goto err_init_fw;
701 }
702
703 rtnl_lock();
704 /* Create station interface by default */
705 if (!mwifiex_add_virtual_intf(priv->wdev->wiphy, "mlan%d",
706 NL80211_IFTYPE_STATION, NULL, NULL)) {
707 rtnl_unlock();
708 dev_err(adapter->dev, "cannot create default station"
709 " interface\n");
710 goto err_add_intf;
711 }
712
713 rtnl_unlock();
714
715 up(sem); 741 up(sem);
716
717 mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
718 dev_notice(adapter->dev, "driver_version = %s\n", fmt);
719
720 return 0; 742 return 0;
721 743
722err_add_intf:
723 rtnl_lock();
724 mwifiex_del_virtual_intf(priv->wdev->wiphy, priv->netdev);
725 rtnl_unlock();
726err_init_fw: 744err_init_fw:
727 pr_debug("info: %s: unregister device\n", __func__); 745 pr_debug("info: %s: unregister device\n", __func__);
728 adapter->if_ops.unregister_dev(adapter); 746 if (adapter->if_ops.unregister_dev)
747 adapter->if_ops.unregister_dev(adapter);
729err_registerdev: 748err_registerdev:
730 adapter->surprise_removed = true; 749 adapter->surprise_removed = true;
731 mwifiex_terminate_workqueue(adapter); 750 mwifiex_terminate_workqueue(adapter);
@@ -830,7 +849,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
830 849
831 /* Unregister device */ 850 /* Unregister device */
832 dev_dbg(adapter->dev, "info: unregister device\n"); 851 dev_dbg(adapter->dev, "info: unregister device\n");
833 adapter->if_ops.unregister_dev(adapter); 852 if (adapter->if_ops.unregister_dev)
853 adapter->if_ops.unregister_dev(adapter);
834 /* Free adapter structure */ 854 /* Free adapter structure */
835 dev_dbg(adapter->dev, "info: free adapter\n"); 855 dev_dbg(adapter->dev, "info: free adapter\n");
836 mwifiex_free_adapter(adapter); 856 mwifiex_free_adapter(adapter);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 35225e9b1080..324ad390cacd 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -92,9 +92,16 @@ enum {
92#define MWIFIEX_OUI_NOT_PRESENT 0 92#define MWIFIEX_OUI_NOT_PRESENT 0
93#define MWIFIEX_OUI_PRESENT 1 93#define MWIFIEX_OUI_PRESENT 1
94 94
95/*
96 * Do not check for data_received for USB, as data_received
97 * is handled in mwifiex_usb_recv for USB
98 */
95#define IS_CARD_RX_RCVD(adapter) (adapter->cmd_resp_received || \ 99#define IS_CARD_RX_RCVD(adapter) (adapter->cmd_resp_received || \
96 adapter->event_received || \ 100 adapter->event_received || \
97 adapter->data_received) 101 ((adapter->iface_type != MWIFIEX_USB) && \
102 adapter->data_received) || \
103 ((adapter->iface_type == MWIFIEX_USB) && \
104 !skb_queue_empty(&adapter->usb_rx_data_q)))
98 105
99#define MWIFIEX_TYPE_CMD 1 106#define MWIFIEX_TYPE_CMD 1
100#define MWIFIEX_TYPE_DATA 0 107#define MWIFIEX_TYPE_DATA 0
@@ -110,6 +117,11 @@ enum {
110 117
111#define MWIFIEX_EVENT_HEADER_LEN 4 118#define MWIFIEX_EVENT_HEADER_LEN 4
112 119
120#define MWIFIEX_TYPE_LEN 4
121#define MWIFIEX_USB_TYPE_CMD 0xF00DFACE
122#define MWIFIEX_USB_TYPE_DATA 0xBEADC0DE
123#define MWIFIEX_USB_TYPE_EVENT 0xBEEFFACE
124
113struct mwifiex_dbg { 125struct mwifiex_dbg {
114 u32 num_cmd_host_to_card_failure; 126 u32 num_cmd_host_to_card_failure;
115 u32 num_cmd_sleep_cfm_host_to_card_failure; 127 u32 num_cmd_sleep_cfm_host_to_card_failure;
@@ -162,6 +174,7 @@ enum MWIFIEX_PS_STATE {
162enum mwifiex_iface_type { 174enum mwifiex_iface_type {
163 MWIFIEX_SDIO, 175 MWIFIEX_SDIO,
164 MWIFIEX_PCIE, 176 MWIFIEX_PCIE,
177 MWIFIEX_USB
165}; 178};
166 179
167struct mwifiex_add_ba_param { 180struct mwifiex_add_ba_param {
@@ -201,10 +214,10 @@ struct mwifiex_wmm_desc {
201 u32 packets_out[MAX_NUM_TID]; 214 u32 packets_out[MAX_NUM_TID];
202 /* spin lock to protect ra_list */ 215 /* spin lock to protect ra_list */
203 spinlock_t ra_list_spinlock; 216 spinlock_t ra_list_spinlock;
204 struct mwifiex_wmm_ac_status ac_status[IEEE80211_MAX_QUEUES]; 217 struct mwifiex_wmm_ac_status ac_status[IEEE80211_NUM_ACS];
205 enum mwifiex_wmm_ac_e ac_down_graded_vals[IEEE80211_MAX_QUEUES]; 218 enum mwifiex_wmm_ac_e ac_down_graded_vals[IEEE80211_NUM_ACS];
206 u32 drv_pkt_delay_max; 219 u32 drv_pkt_delay_max;
207 u8 queue_priority[IEEE80211_MAX_QUEUES]; 220 u8 queue_priority[IEEE80211_NUM_ACS];
208 u32 user_pri_pkt_tx_ctrl[WMM_HIGHEST_PRIORITY + 1]; /* UP: 0 to 7 */ 221 u32 user_pri_pkt_tx_ctrl[WMM_HIGHEST_PRIORITY + 1]; /* UP: 0 to 7 */
209 /* Number of transmit packets queued */ 222 /* Number of transmit packets queued */
210 atomic_t tx_pkts_queued; 223 atomic_t tx_pkts_queued;
@@ -260,8 +273,8 @@ struct mwifiex_bssdescriptor {
260 * BAND_A(0X04): 'a' band 273 * BAND_A(0X04): 'a' band
261 */ 274 */
262 u16 bss_band; 275 u16 bss_band;
263 u64 network_tsf; 276 u64 fw_tsf;
264 u8 time_stamp[8]; 277 u64 timestamp;
265 union ieee_types_phy_param_set phy_param_set; 278 union ieee_types_phy_param_set phy_param_set;
266 union ieee_types_ss_param_set ss_param_set; 279 union ieee_types_ss_param_set ss_param_set;
267 u16 cap_info_bitmap; 280 u16 cap_info_bitmap;
@@ -269,7 +282,7 @@ struct mwifiex_bssdescriptor {
269 u8 disable_11n; 282 u8 disable_11n;
270 struct ieee80211_ht_cap *bcn_ht_cap; 283 struct ieee80211_ht_cap *bcn_ht_cap;
271 u16 ht_cap_offset; 284 u16 ht_cap_offset;
272 struct ieee80211_ht_info *bcn_ht_info; 285 struct ieee80211_ht_operation *bcn_ht_oper;
273 u16 ht_info_offset; 286 u16 ht_info_offset;
274 u8 *bcn_bss_co_2040; 287 u8 *bcn_bss_co_2040;
275 u16 bss_co_2040_offset; 288 u16 bss_co_2040_offset;
@@ -407,6 +420,8 @@ struct mwifiex_private {
407 struct host_cmd_ds_802_11_key_material aes_key; 420 struct host_cmd_ds_802_11_key_material aes_key;
408 u8 wapi_ie[256]; 421 u8 wapi_ie[256];
409 u8 wapi_ie_len; 422 u8 wapi_ie_len;
423 u8 *wps_ie;
424 u8 wps_ie_len;
410 u8 wmm_required; 425 u8 wmm_required;
411 u8 wmm_enabled; 426 u8 wmm_enabled;
412 u8 wmm_qosinfo; 427 u8 wmm_qosinfo;
@@ -448,7 +463,6 @@ struct mwifiex_private {
448 struct dentry *dfs_dev_dir; 463 struct dentry *dfs_dev_dir;
449#endif 464#endif
450 u8 nick_name[16]; 465 u8 nick_name[16];
451 u8 qual_level, qual_noise;
452 u16 current_key_index; 466 u16 current_key_index;
453 struct semaphore async_sem; 467 struct semaphore async_sem;
454 u8 scan_pending_on_block; 468 u8 scan_pending_on_block;
@@ -459,6 +473,9 @@ struct mwifiex_private {
459 u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; 473 u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
460 struct wps wps; 474 struct wps wps;
461 u8 scan_block; 475 u8 scan_block;
476 s32 cqm_rssi_thold;
477 u32 cqm_rssi_hyst;
478 u8 subsc_evt_rssi_state;
462}; 479};
463 480
464enum mwifiex_ba_status { 481enum mwifiex_ba_status {
@@ -518,6 +535,11 @@ struct cmd_ctrl_node {
518 u8 cmd_wait_q_woken; 535 u8 cmd_wait_q_woken;
519}; 536};
520 537
538struct mwifiex_bss_priv {
539 u8 band;
540 u64 fw_tsf;
541};
542
521struct mwifiex_if_ops { 543struct mwifiex_if_ops {
522 int (*init_if) (struct mwifiex_adapter *); 544 int (*init_if) (struct mwifiex_adapter *);
523 void (*cleanup_if) (struct mwifiex_adapter *); 545 void (*cleanup_if) (struct mwifiex_adapter *);
@@ -537,6 +559,8 @@ struct mwifiex_if_ops {
537 void (*cleanup_mpa_buf) (struct mwifiex_adapter *); 559 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
538 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *); 560 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
539 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *); 561 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
562 int (*data_complete) (struct mwifiex_adapter *, struct sk_buff *);
563 int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
540}; 564};
541 565
542struct mwifiex_adapter { 566struct mwifiex_adapter {
@@ -599,6 +623,7 @@ struct mwifiex_adapter {
599 struct list_head scan_pending_q; 623 struct list_head scan_pending_q;
600 /* spin lock for scan_pending_q */ 624 /* spin lock for scan_pending_q */
601 spinlock_t scan_pending_q_lock; 625 spinlock_t scan_pending_q_lock;
626 struct sk_buff_head usb_rx_data_q;
602 u32 scan_processing; 627 u32 scan_processing;
603 u16 region_code; 628 u16 region_code;
604 struct mwifiex_802_11d_domain_reg domain_reg; 629 struct mwifiex_802_11d_domain_reg domain_reg;
@@ -651,6 +676,7 @@ struct mwifiex_adapter {
651 u8 scan_wait_q_woken; 676 u8 scan_wait_q_woken;
652 struct cmd_ctrl_node *cmd_queued; 677 struct cmd_ctrl_node *cmd_queued;
653 spinlock_t queue_lock; /* lock for tx queues */ 678 spinlock_t queue_lock; /* lock for tx queues */
679 struct completion fw_load;
654}; 680};
655 681
656int mwifiex_init_lock_list(struct mwifiex_adapter *adapter); 682int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -896,8 +922,6 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
896int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type); 922int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type);
897int mwifiex_enable_hs(struct mwifiex_adapter *adapter); 923int mwifiex_enable_hs(struct mwifiex_adapter *adapter);
898int mwifiex_disable_auto_ds(struct mwifiex_private *priv); 924int mwifiex_disable_auto_ds(struct mwifiex_private *priv);
899int mwifiex_get_signal_info(struct mwifiex_private *priv,
900 struct mwifiex_ds_get_signal *signal);
901int mwifiex_drv_get_data_rate(struct mwifiex_private *priv, 925int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
902 struct mwifiex_rate_cfg *rate); 926 struct mwifiex_rate_cfg *rate);
903int mwifiex_request_scan(struct mwifiex_private *priv, 927int mwifiex_request_scan(struct mwifiex_private *priv,
@@ -950,13 +974,10 @@ int mwifiex_bss_set_channel(struct mwifiex_private *,
950int mwifiex_get_bss_info(struct mwifiex_private *, 974int mwifiex_get_bss_info(struct mwifiex_private *,
951 struct mwifiex_bss_info *); 975 struct mwifiex_bss_info *);
952int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, 976int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
953 u8 *bssid, s32 rssi, u8 *ie_buf, 977 struct cfg80211_bss *bss,
954 size_t ie_len, u16 beacon_period,
955 u16 cap_info_bitmap, u8 band,
956 struct mwifiex_bssdescriptor *bss_desc); 978 struct mwifiex_bssdescriptor *bss_desc);
957int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, 979int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
958 struct mwifiex_bssdescriptor *bss_entry, 980 struct mwifiex_bssdescriptor *bss_entry);
959 u8 *ie_buf, u32 ie_len);
960int mwifiex_check_network_compatibility(struct mwifiex_private *priv, 981int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
961 struct mwifiex_bssdescriptor *bss_desc); 982 struct mwifiex_bssdescriptor *bss_desc);
962 983
@@ -965,6 +986,7 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
965 u32 *flags, struct vif_params *params); 986 u32 *flags, struct vif_params *params);
966int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev); 987int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev);
967 988
989u8 *mwifiex_11d_code_2_region(u8 code);
968 990
969#ifdef CONFIG_DEBUG_FS 991#ifdef CONFIG_DEBUG_FS
970void mwifiex_debugfs_init(void); 992void mwifiex_debugfs_init(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 5867facd415d..13fbc4eb1595 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -119,6 +119,9 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
119 if (!adapter || !adapter->priv_num) 119 if (!adapter || !adapter->priv_num)
120 return; 120 return;
121 121
122 /* In case driver is removed when asynchronous FW load is in progress */
123 wait_for_completion(&adapter->fw_load);
124
122 if (user_rmmod) { 125 if (user_rmmod) {
123#ifdef CONFIG_PM 126#ifdef CONFIG_PM
124 if (adapter->is_suspended) 127 if (adapter->is_suspended)
diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h
index 445ff21772e2..2f218f9a3fd3 100644
--- a/drivers/net/wireless/mwifiex/pcie.h
+++ b/drivers/net/wireless/mwifiex/pcie.h
@@ -48,15 +48,15 @@
48#define PCIE_HOST_INT_STATUS_MASK 0xC3C 48#define PCIE_HOST_INT_STATUS_MASK 0xC3C
49#define PCIE_SCRATCH_2_REG 0xC40 49#define PCIE_SCRATCH_2_REG 0xC40
50#define PCIE_SCRATCH_3_REG 0xC44 50#define PCIE_SCRATCH_3_REG 0xC44
51#define PCIE_SCRATCH_4_REG 0xCC0 51#define PCIE_SCRATCH_4_REG 0xCD0
52#define PCIE_SCRATCH_5_REG 0xCC4 52#define PCIE_SCRATCH_5_REG 0xCD4
53#define PCIE_SCRATCH_6_REG 0xCC8 53#define PCIE_SCRATCH_6_REG 0xCD8
54#define PCIE_SCRATCH_7_REG 0xCCC 54#define PCIE_SCRATCH_7_REG 0xCDC
55#define PCIE_SCRATCH_8_REG 0xCD0 55#define PCIE_SCRATCH_8_REG 0xCE0
56#define PCIE_SCRATCH_9_REG 0xCD4 56#define PCIE_SCRATCH_9_REG 0xCE4
57#define PCIE_SCRATCH_10_REG 0xCD8 57#define PCIE_SCRATCH_10_REG 0xCE8
58#define PCIE_SCRATCH_11_REG 0xCDC 58#define PCIE_SCRATCH_11_REG 0xCEC
59#define PCIE_SCRATCH_12_REG 0xCE0 59#define PCIE_SCRATCH_12_REG 0xCF0
60 60
61#define CPU_INTR_DNLD_RDY BIT(0) 61#define CPU_INTR_DNLD_RDY BIT(0)
62#define CPU_INTR_DOOR_BELL BIT(1) 62#define CPU_INTR_DOOR_BELL BIT(1)
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index aff9cd763f2b..74f045715723 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -1048,10 +1048,8 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter,
1048 * This function parses provided beacon buffer and updates 1048 * This function parses provided beacon buffer and updates
1049 * respective fields in bss descriptor structure. 1049 * respective fields in bss descriptor structure.
1050 */ 1050 */
1051int 1051int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
1052mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, 1052 struct mwifiex_bssdescriptor *bss_entry)
1053 struct mwifiex_bssdescriptor *bss_entry,
1054 u8 *ie_buf, u32 ie_len)
1055{ 1053{
1056 int ret = 0; 1054 int ret = 0;
1057 u8 element_id; 1055 u8 element_id;
@@ -1073,10 +1071,8 @@ mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
1073 1071
1074 found_data_rate_ie = false; 1072 found_data_rate_ie = false;
1075 rate_size = 0; 1073 rate_size = 0;
1076 current_ptr = ie_buf; 1074 current_ptr = bss_entry->beacon_buf;
1077 bytes_left = ie_len; 1075 bytes_left = bss_entry->beacon_buf_size;
1078 bss_entry->beacon_buf = ie_buf;
1079 bss_entry->beacon_buf_size = ie_len;
1080 1076
1081 /* Process variable IE */ 1077 /* Process variable IE */
1082 while (bytes_left >= 2) { 1078 while (bytes_left >= 2) {
@@ -1221,9 +1217,9 @@ mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
1221 sizeof(struct ieee_types_header) - 1217 sizeof(struct ieee_types_header) -
1222 bss_entry->beacon_buf); 1218 bss_entry->beacon_buf);
1223 break; 1219 break;
1224 case WLAN_EID_HT_INFORMATION: 1220 case WLAN_EID_HT_OPERATION:
1225 bss_entry->bcn_ht_info = (struct ieee80211_ht_info *) 1221 bss_entry->bcn_ht_oper =
1226 (current_ptr + 1222 (struct ieee80211_ht_operation *)(current_ptr +
1227 sizeof(struct ieee_types_header)); 1223 sizeof(struct ieee_types_header));
1228 bss_entry->ht_info_offset = (u16) (current_ptr + 1224 bss_entry->ht_info_offset = (u16) (current_ptr +
1229 sizeof(struct ieee_types_header) - 1225 sizeof(struct ieee_types_header) -
@@ -1447,15 +1443,12 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
1447 return ret; 1443 return ret;
1448} 1444}
1449 1445
1450static int 1446static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv,
1451mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid, 1447 struct cfg80211_bss *bss)
1452 s32 rssi, const u8 *ie_buf, size_t ie_len,
1453 u16 beacon_period, u16 cap_info_bitmap, u8 band)
1454{ 1448{
1455 struct mwifiex_bssdescriptor *bss_desc; 1449 struct mwifiex_bssdescriptor *bss_desc;
1456 int ret; 1450 int ret;
1457 unsigned long flags; 1451 unsigned long flags;
1458 u8 *beacon_ie;
1459 1452
1460 /* Allocate and fill new bss descriptor */ 1453 /* Allocate and fill new bss descriptor */
1461 bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), 1454 bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor),
@@ -1465,16 +1458,7 @@ mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid,
1465 return -ENOMEM; 1458 return -ENOMEM;
1466 } 1459 }
1467 1460
1468 beacon_ie = kmemdup(ie_buf, ie_len, GFP_KERNEL); 1461 ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc);
1469 if (!beacon_ie) {
1470 kfree(bss_desc);
1471 dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n");
1472 return -ENOMEM;
1473 }
1474
1475 ret = mwifiex_fill_new_bss_desc(priv, bssid, rssi, beacon_ie,
1476 ie_len, beacon_period,
1477 cap_info_bitmap, band, bss_desc);
1478 if (ret) 1462 if (ret)
1479 goto done; 1463 goto done;
1480 1464
@@ -1493,7 +1477,7 @@ mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid,
1493 priv->curr_bss_params.bss_descriptor.bcn_ht_cap = NULL; 1477 priv->curr_bss_params.bss_descriptor.bcn_ht_cap = NULL;
1494 priv->curr_bss_params.bss_descriptor.ht_cap_offset = 1478 priv->curr_bss_params.bss_descriptor.ht_cap_offset =
1495 0; 1479 0;
1496 priv->curr_bss_params.bss_descriptor.bcn_ht_info = NULL; 1480 priv->curr_bss_params.bss_descriptor.bcn_ht_oper = NULL;
1497 priv->curr_bss_params.bss_descriptor.ht_info_offset = 1481 priv->curr_bss_params.bss_descriptor.ht_info_offset =
1498 0; 1482 0;
1499 priv->curr_bss_params.bss_descriptor.bcn_bss_co_2040 = 1483 priv->curr_bss_params.bss_descriptor.bcn_bss_co_2040 =
@@ -1514,7 +1498,6 @@ mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid,
1514 1498
1515done: 1499done:
1516 kfree(bss_desc); 1500 kfree(bss_desc);
1517 kfree(beacon_ie);
1518 return 0; 1501 return 0;
1519} 1502}
1520 1503
@@ -1620,14 +1603,16 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1620 const u8 *ie_buf; 1603 const u8 *ie_buf;
1621 size_t ie_len; 1604 size_t ie_len;
1622 u16 channel = 0; 1605 u16 channel = 0;
1623 u64 network_tsf = 0; 1606 u64 fw_tsf = 0;
1624 u16 beacon_size = 0; 1607 u16 beacon_size = 0;
1625 u32 curr_bcn_bytes; 1608 u32 curr_bcn_bytes;
1626 u32 freq; 1609 u32 freq;
1627 u16 beacon_period; 1610 u16 beacon_period;
1628 u16 cap_info_bitmap; 1611 u16 cap_info_bitmap;
1629 u8 *current_ptr; 1612 u8 *current_ptr;
1613 u64 timestamp;
1630 struct mwifiex_bcn_param *bcn_param; 1614 struct mwifiex_bcn_param *bcn_param;
1615 struct mwifiex_bss_priv *bss_priv;
1631 1616
1632 if (bytes_left >= sizeof(beacon_size)) { 1617 if (bytes_left >= sizeof(beacon_size)) {
1633 /* Extract & convert beacon size from command buffer */ 1618 /* Extract & convert beacon size from command buffer */
@@ -1667,9 +1652,11 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1667 1652
1668 memcpy(bssid, bcn_param->bssid, ETH_ALEN); 1653 memcpy(bssid, bcn_param->bssid, ETH_ALEN);
1669 1654
1670 rssi = (s32) (bcn_param->rssi); 1655 rssi = (s32) bcn_param->rssi;
1671 dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%02X\n", rssi); 1656 rssi = (-rssi) * 100; /* Convert dBm to mBm */
1657 dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi);
1672 1658
1659 timestamp = le64_to_cpu(bcn_param->timestamp);
1673 beacon_period = le16_to_cpu(bcn_param->beacon_period); 1660 beacon_period = le16_to_cpu(bcn_param->beacon_period);
1674 1661
1675 cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap); 1662 cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
@@ -1709,14 +1696,13 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1709 1696
1710 /* 1697 /*
1711 * If the TSF TLV was appended to the scan results, save this 1698 * If the TSF TLV was appended to the scan results, save this
1712 * entry's TSF value in the networkTSF field.The networkTSF is 1699 * entry's TSF value in the fw_tsf field. It is the firmware's
1713 * the firmware's TSF value at the time the beacon or probe 1700 * TSF value at the time the beacon or probe response was
1714 * response was received. 1701 * received.
1715 */ 1702 */
1716 if (tsf_tlv) 1703 if (tsf_tlv)
1717 memcpy(&network_tsf, 1704 memcpy(&fw_tsf, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE],
1718 &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE], 1705 sizeof(fw_tsf));
1719 sizeof(network_tsf));
1720 1706
1721 if (channel) { 1707 if (channel) {
1722 struct ieee80211_channel *chan; 1708 struct ieee80211_channel *chan;
@@ -1739,21 +1725,19 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1739 1725
1740 if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) { 1726 if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
1741 bss = cfg80211_inform_bss(priv->wdev->wiphy, 1727 bss = cfg80211_inform_bss(priv->wdev->wiphy,
1742 chan, bssid, network_tsf, 1728 chan, bssid, timestamp,
1743 cap_info_bitmap, beacon_period, 1729 cap_info_bitmap, beacon_period,
1744 ie_buf, ie_len, rssi, GFP_KERNEL); 1730 ie_buf, ie_len, rssi, GFP_KERNEL);
1745 *(u8 *)bss->priv = band; 1731 bss_priv = (struct mwifiex_bss_priv *)bss->priv;
1746 cfg80211_put_bss(bss); 1732 bss_priv->band = band;
1747 1733 bss_priv->fw_tsf = fw_tsf;
1748 if (priv->media_connected && 1734 if (priv->media_connected &&
1749 !memcmp(bssid, 1735 !memcmp(bssid,
1750 priv->curr_bss_params.bss_descriptor 1736 priv->curr_bss_params.bss_descriptor
1751 .mac_address, ETH_ALEN)) 1737 .mac_address, ETH_ALEN))
1752 mwifiex_update_curr_bss_params 1738 mwifiex_update_curr_bss_params(priv,
1753 (priv, bssid, rssi, 1739 bss);
1754 ie_buf, ie_len, 1740 cfg80211_put_bss(bss);
1755 beacon_period,
1756 cap_info_bitmap, band);
1757 } 1741 }
1758 } else { 1742 } else {
1759 dev_dbg(adapter->dev, "missing BSS channel IE\n"); 1743 dev_dbg(adapter->dev, "missing BSS channel IE\n");
@@ -2019,8 +2003,8 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
2019 (curr_bss->beacon_buf + 2003 (curr_bss->beacon_buf +
2020 curr_bss->ht_cap_offset); 2004 curr_bss->ht_cap_offset);
2021 2005
2022 if (curr_bss->bcn_ht_info) 2006 if (curr_bss->bcn_ht_oper)
2023 curr_bss->bcn_ht_info = (struct ieee80211_ht_info *) 2007 curr_bss->bcn_ht_oper = (struct ieee80211_ht_operation *)
2024 (curr_bss->beacon_buf + 2008 (curr_bss->beacon_buf +
2025 curr_bss->ht_info_offset); 2009 curr_bss->ht_info_offset);
2026 2010
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index f8012e2b7f7c..e0377473282f 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -123,6 +123,9 @@ mwifiex_sdio_remove(struct sdio_func *func)
123 if (!adapter || !adapter->priv_num) 123 if (!adapter || !adapter->priv_num)
124 return; 124 return;
125 125
126 /* In case driver is removed when asynchronous FW load is in progress */
127 wait_for_completion(&adapter->fw_load);
128
126 if (user_rmmod) { 129 if (user_rmmod) {
127 if (adapter->is_suspended) 130 if (adapter->is_suspended)
128 mwifiex_sdio_resume(adapter->dev); 131 mwifiex_sdio_resume(adapter->dev);
@@ -250,6 +253,8 @@ static int mwifiex_sdio_resume(struct device *dev)
250 return 0; 253 return 0;
251} 254}
252 255
256/* Device ID for SD8786 */
257#define SDIO_DEVICE_ID_MARVELL_8786 (0x9116)
253/* Device ID for SD8787 */ 258/* Device ID for SD8787 */
254#define SDIO_DEVICE_ID_MARVELL_8787 (0x9119) 259#define SDIO_DEVICE_ID_MARVELL_8787 (0x9119)
255/* Device ID for SD8797 */ 260/* Device ID for SD8797 */
@@ -257,6 +262,7 @@ static int mwifiex_sdio_resume(struct device *dev)
257 262
258/* WLAN IDs */ 263/* WLAN IDs */
259static const struct sdio_device_id mwifiex_ids[] = { 264static const struct sdio_device_id mwifiex_ids[] = {
265 {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8786)},
260 {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)}, 266 {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)},
261 {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797)}, 267 {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797)},
262 {}, 268 {},
@@ -1596,6 +1602,9 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
1596 adapter->dev = &func->dev; 1602 adapter->dev = &func->dev;
1597 1603
1598 switch (func->device) { 1604 switch (func->device) {
1605 case SDIO_DEVICE_ID_MARVELL_8786:
1606 strcpy(adapter->fw_name, SD8786_DEFAULT_FW_NAME);
1607 break;
1599 case SDIO_DEVICE_ID_MARVELL_8797: 1608 case SDIO_DEVICE_ID_MARVELL_8797:
1600 strcpy(adapter->fw_name, SD8797_DEFAULT_FW_NAME); 1609 strcpy(adapter->fw_name, SD8797_DEFAULT_FW_NAME);
1601 break; 1610 break;
@@ -1804,5 +1813,6 @@ MODULE_AUTHOR("Marvell International Ltd.");
1804MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION); 1813MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION);
1805MODULE_VERSION(SDIO_VERSION); 1814MODULE_VERSION(SDIO_VERSION);
1806MODULE_LICENSE("GPL v2"); 1815MODULE_LICENSE("GPL v2");
1816MODULE_FIRMWARE(SD8786_DEFAULT_FW_NAME);
1807MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME); 1817MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
1808MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME); 1818MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index a3fb322205b0..21033738ef0c 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -28,6 +28,7 @@
28 28
29#include "main.h" 29#include "main.h"
30 30
31#define SD8786_DEFAULT_FW_NAME "mrvl/sd8786_uapsta.bin"
31#define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin" 32#define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin"
32#define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin" 33#define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin"
33 34
@@ -193,7 +194,7 @@
193 a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt+1+(MAX_PORT - \ 194 a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt+1+(MAX_PORT - \
194 a->mp_end_port))); \ 195 a->mp_end_port))); \
195 a->mpa_tx.pkt_cnt++; \ 196 a->mpa_tx.pkt_cnt++; \
196} while (0); 197} while (0)
197 198
198/* SDIO Tx aggregation limit ? */ 199/* SDIO Tx aggregation limit ? */
199#define MP_TX_AGGR_PKT_LIMIT_REACHED(a) \ 200#define MP_TX_AGGR_PKT_LIMIT_REACHED(a) \
@@ -211,7 +212,7 @@
211 a->mpa_tx.buf_len = 0; \ 212 a->mpa_tx.buf_len = 0; \
212 a->mpa_tx.ports = 0; \ 213 a->mpa_tx.ports = 0; \
213 a->mpa_tx.start_port = 0; \ 214 a->mpa_tx.start_port = 0; \
214} while (0); 215} while (0)
215 216
216/* SDIO Rx aggregation limit ? */ 217/* SDIO Rx aggregation limit ? */
217#define MP_RX_AGGR_PKT_LIMIT_REACHED(a) \ 218#define MP_RX_AGGR_PKT_LIMIT_REACHED(a) \
@@ -242,7 +243,7 @@
242 a->mpa_rx.skb_arr[a->mpa_rx.pkt_cnt] = skb; \ 243 a->mpa_rx.skb_arr[a->mpa_rx.pkt_cnt] = skb; \
243 a->mpa_rx.len_arr[a->mpa_rx.pkt_cnt] = skb->len; \ 244 a->mpa_rx.len_arr[a->mpa_rx.pkt_cnt] = skb->len; \
244 a->mpa_rx.pkt_cnt++; \ 245 a->mpa_rx.pkt_cnt++; \
245} while (0); 246} while (0)
246 247
247/* Reset SDIO Rx aggregation buffer parameters */ 248/* Reset SDIO Rx aggregation buffer parameters */
248#define MP_RX_AGGR_BUF_RESET(a) do { \ 249#define MP_RX_AGGR_BUF_RESET(a) do { \
@@ -250,7 +251,7 @@
250 a->mpa_rx.buf_len = 0; \ 251 a->mpa_rx.buf_len = 0; \
251 a->mpa_rx.ports = 0; \ 252 a->mpa_rx.ports = 0; \
252 a->mpa_rx.start_port = 0; \ 253 a->mpa_rx.start_port = 0; \
253} while (0); 254} while (0)
254 255
255 256
256/* data structure for SDIO MPA TX */ 257/* data structure for SDIO MPA TX */
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 6c8e4594b48b..87ed2a1f6cd9 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -907,6 +907,101 @@ mwifiex_cmd_pcie_host_spec(struct mwifiex_private *priv,
907} 907}
908 908
909/* 909/*
910 * This function prepares command for event subscription, configuration
911 * and query. Events can be subscribed or unsubscribed. Current subscribed
912 * events can be queried. Also, current subscribed events are reported in
913 * every FW response.
914 */
915static int
916mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
917 struct host_cmd_ds_command *cmd,
918 struct mwifiex_ds_misc_subsc_evt *subsc_evt_cfg)
919{
920 struct host_cmd_ds_802_11_subsc_evt *subsc_evt = &cmd->params.subsc_evt;
921 struct mwifiex_ie_types_rssi_threshold *rssi_tlv;
922 u16 event_bitmap;
923 u8 *pos;
924
925 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SUBSCRIBE_EVENT);
926 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_subsc_evt) +
927 S_DS_GEN);
928
929 subsc_evt->action = cpu_to_le16(subsc_evt_cfg->action);
930 dev_dbg(priv->adapter->dev, "cmd: action: %d\n", subsc_evt_cfg->action);
931
932 /*For query requests, no configuration TLV structures are to be added.*/
933 if (subsc_evt_cfg->action == HostCmd_ACT_GEN_GET)
934 return 0;
935
936 subsc_evt->events = cpu_to_le16(subsc_evt_cfg->events);
937
938 event_bitmap = subsc_evt_cfg->events;
939 dev_dbg(priv->adapter->dev, "cmd: event bitmap : %16x\n",
940 event_bitmap);
941
942 if (((subsc_evt_cfg->action == HostCmd_ACT_BITWISE_CLR) ||
943 (subsc_evt_cfg->action == HostCmd_ACT_BITWISE_SET)) &&
944 (event_bitmap == 0)) {
945 dev_dbg(priv->adapter->dev, "Error: No event specified "
946 "for bitwise action type\n");
947 return -EINVAL;
948 }
949
950 /*
951 * Append TLV structures for each of the specified events for
952 * subscribing or re-configuring. This is not required for
953 * bitwise unsubscribing request.
954 */
955 if (subsc_evt_cfg->action == HostCmd_ACT_BITWISE_CLR)
956 return 0;
957
958 pos = ((u8 *)subsc_evt) +
959 sizeof(struct host_cmd_ds_802_11_subsc_evt);
960
961 if (event_bitmap & BITMASK_BCN_RSSI_LOW) {
962 rssi_tlv = (struct mwifiex_ie_types_rssi_threshold *) pos;
963
964 rssi_tlv->header.type = cpu_to_le16(TLV_TYPE_RSSI_LOW);
965 rssi_tlv->header.len =
966 cpu_to_le16(sizeof(struct mwifiex_ie_types_rssi_threshold) -
967 sizeof(struct mwifiex_ie_types_header));
968 rssi_tlv->abs_value = subsc_evt_cfg->bcn_l_rssi_cfg.abs_value;
969 rssi_tlv->evt_freq = subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq;
970
971 dev_dbg(priv->adapter->dev, "Cfg Beacon Low Rssi event, "
972 "RSSI:-%d dBm, Freq:%d\n",
973 subsc_evt_cfg->bcn_l_rssi_cfg.abs_value,
974 subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq);
975
976 pos += sizeof(struct mwifiex_ie_types_rssi_threshold);
977 le16_add_cpu(&cmd->size,
978 sizeof(struct mwifiex_ie_types_rssi_threshold));
979 }
980
981 if (event_bitmap & BITMASK_BCN_RSSI_HIGH) {
982 rssi_tlv = (struct mwifiex_ie_types_rssi_threshold *) pos;
983
984 rssi_tlv->header.type = cpu_to_le16(TLV_TYPE_RSSI_HIGH);
985 rssi_tlv->header.len =
986 cpu_to_le16(sizeof(struct mwifiex_ie_types_rssi_threshold) -
987 sizeof(struct mwifiex_ie_types_header));
988 rssi_tlv->abs_value = subsc_evt_cfg->bcn_h_rssi_cfg.abs_value;
989 rssi_tlv->evt_freq = subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq;
990
991 dev_dbg(priv->adapter->dev, "Cfg Beacon High Rssi event, "
992 "RSSI:-%d dBm, Freq:%d\n",
993 subsc_evt_cfg->bcn_h_rssi_cfg.abs_value,
994 subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq);
995
996 pos += sizeof(struct mwifiex_ie_types_rssi_threshold);
997 le16_add_cpu(&cmd->size,
998 sizeof(struct mwifiex_ie_types_rssi_threshold));
999 }
1000
1001 return 0;
1002}
1003
1004/*
910 * This function prepares the commands before sending them to the firmware. 1005 * This function prepares the commands before sending them to the firmware.
911 * 1006 *
912 * This is a generic function which calls specific command preparation 1007 * This is a generic function which calls specific command preparation
@@ -1086,6 +1181,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
1086 case HostCmd_CMD_PCIE_DESC_DETAILS: 1181 case HostCmd_CMD_PCIE_DESC_DETAILS:
1087 ret = mwifiex_cmd_pcie_host_spec(priv, cmd_ptr, cmd_action); 1182 ret = mwifiex_cmd_pcie_host_spec(priv, cmd_ptr, cmd_action);
1088 break; 1183 break;
1184 case HostCmd_CMD_802_11_SUBSCRIBE_EVENT:
1185 ret = mwifiex_cmd_802_11_subsc_evt(priv, cmd_ptr, data_buf);
1186 break;
1089 default: 1187 default:
1090 dev_err(priv->adapter->dev, 1188 dev_err(priv->adapter->dev,
1091 "PREP_CMD: unknown cmd- %#x\n", cmd_no); 1189 "PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -1195,7 +1293,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1195 if (ret) 1293 if (ret)
1196 return -1; 1294 return -1;
1197 1295
1198 if (first_sta) { 1296 if (first_sta && (priv->adapter->iface_type != MWIFIEX_USB)) {
1199 /* Enable auto deep sleep */ 1297 /* Enable auto deep sleep */
1200 auto_ds.auto_ds = DEEP_SLEEP_ON; 1298 auto_ds.auto_ds = DEEP_SLEEP_ON;
1201 auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME; 1299 auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME;
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 4da19ed0f078..3aa54243dea9 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -119,11 +119,11 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
119 * calculated SNR values. 119 * calculated SNR values.
120 */ 120 */
121static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv, 121static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
122 struct host_cmd_ds_command *resp, 122 struct host_cmd_ds_command *resp)
123 struct mwifiex_ds_get_signal *signal)
124{ 123{
125 struct host_cmd_ds_802_11_rssi_info_rsp *rssi_info_rsp = 124 struct host_cmd_ds_802_11_rssi_info_rsp *rssi_info_rsp =
126 &resp->params.rssi_info_rsp; 125 &resp->params.rssi_info_rsp;
126 struct mwifiex_ds_misc_subsc_evt subsc_evt;
127 127
128 priv->data_rssi_last = le16_to_cpu(rssi_info_rsp->data_rssi_last); 128 priv->data_rssi_last = le16_to_cpu(rssi_info_rsp->data_rssi_last);
129 priv->data_nf_last = le16_to_cpu(rssi_info_rsp->data_nf_last); 129 priv->data_nf_last = le16_to_cpu(rssi_info_rsp->data_nf_last);
@@ -137,34 +137,29 @@ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
137 priv->bcn_rssi_avg = le16_to_cpu(rssi_info_rsp->bcn_rssi_avg); 137 priv->bcn_rssi_avg = le16_to_cpu(rssi_info_rsp->bcn_rssi_avg);
138 priv->bcn_nf_avg = le16_to_cpu(rssi_info_rsp->bcn_nf_avg); 138 priv->bcn_nf_avg = le16_to_cpu(rssi_info_rsp->bcn_nf_avg);
139 139
140 /* Need to indicate IOCTL complete */ 140 if (priv->subsc_evt_rssi_state == EVENT_HANDLED)
141 if (signal) { 141 return 0;
142 memset(signal, 0, sizeof(*signal)); 142
143 143 /* Resubscribe low and high rssi events with new thresholds */
144 signal->selector = ALL_RSSI_INFO_MASK; 144 memset(&subsc_evt, 0x00, sizeof(struct mwifiex_ds_misc_subsc_evt));
145 145 subsc_evt.events = BITMASK_BCN_RSSI_LOW | BITMASK_BCN_RSSI_HIGH;
146 /* RSSI */ 146 subsc_evt.action = HostCmd_ACT_BITWISE_SET;
147 signal->bcn_rssi_last = priv->bcn_rssi_last; 147 if (priv->subsc_evt_rssi_state == RSSI_LOW_RECVD) {
148 signal->bcn_rssi_avg = priv->bcn_rssi_avg; 148 subsc_evt.bcn_l_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg -
149 signal->data_rssi_last = priv->data_rssi_last; 149 priv->cqm_rssi_hyst);
150 signal->data_rssi_avg = priv->data_rssi_avg; 150 subsc_evt.bcn_h_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
151 151 } else if (priv->subsc_evt_rssi_state == RSSI_HIGH_RECVD) {
152 /* SNR */ 152 subsc_evt.bcn_l_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
153 signal->bcn_snr_last = 153 subsc_evt.bcn_h_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg +
154 CAL_SNR(priv->bcn_rssi_last, priv->bcn_nf_last); 154 priv->cqm_rssi_hyst);
155 signal->bcn_snr_avg =
156 CAL_SNR(priv->bcn_rssi_avg, priv->bcn_nf_avg);
157 signal->data_snr_last =
158 CAL_SNR(priv->data_rssi_last, priv->data_nf_last);
159 signal->data_snr_avg =
160 CAL_SNR(priv->data_rssi_avg, priv->data_nf_avg);
161
162 /* NF */
163 signal->bcn_nf_last = priv->bcn_nf_last;
164 signal->bcn_nf_avg = priv->bcn_nf_avg;
165 signal->data_nf_last = priv->data_nf_last;
166 signal->data_nf_avg = priv->data_nf_avg;
167 } 155 }
156 subsc_evt.bcn_l_rssi_cfg.evt_freq = 1;
157 subsc_evt.bcn_h_rssi_cfg.evt_freq = 1;
158
159 priv->subsc_evt_rssi_state = EVENT_HANDLED;
160
161 mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
162 0, 0, &subsc_evt);
168 163
169 return 0; 164 return 0;
170} 165}
@@ -785,6 +780,28 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
785} 780}
786 781
787/* 782/*
783 * This function handles the command response for subscribe event command.
784 */
785static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
786 struct host_cmd_ds_command *resp,
787 struct mwifiex_ds_misc_subsc_evt *sub_event)
788{
789 struct host_cmd_ds_802_11_subsc_evt *cmd_sub_event =
790 (struct host_cmd_ds_802_11_subsc_evt *)&resp->params.subsc_evt;
791
792 /* For every subscribe event command (Get/Set/Clear), FW reports the
793 * current set of subscribed events*/
794 dev_dbg(priv->adapter->dev, "Bitmap of currently subscribed events: %16x\n",
795 le16_to_cpu(cmd_sub_event->events));
796
797 /*Return the subscribed event info for a Get request*/
798 if (sub_event)
799 sub_event->events = le16_to_cpu(cmd_sub_event->events);
800
801 return 0;
802}
803
804/*
788 * This function handles the command responses. 805 * This function handles the command responses.
789 * 806 *
790 * This is a generic function, which calls command specific 807 * This is a generic function, which calls command specific
@@ -853,7 +870,7 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
853 ret = mwifiex_ret_get_log(priv, resp, data_buf); 870 ret = mwifiex_ret_get_log(priv, resp, data_buf);
854 break; 871 break;
855 case HostCmd_CMD_RSSI_INFO: 872 case HostCmd_CMD_RSSI_INFO:
856 ret = mwifiex_ret_802_11_rssi_info(priv, resp, data_buf); 873 ret = mwifiex_ret_802_11_rssi_info(priv, resp);
857 break; 874 break;
858 case HostCmd_CMD_802_11_SNMP_MIB: 875 case HostCmd_CMD_802_11_SNMP_MIB:
859 ret = mwifiex_ret_802_11_snmp_mib(priv, resp, data_buf); 876 ret = mwifiex_ret_802_11_snmp_mib(priv, resp, data_buf);
@@ -924,6 +941,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
924 break; 941 break;
925 case HostCmd_CMD_PCIE_DESC_DETAILS: 942 case HostCmd_CMD_PCIE_DESC_DETAILS:
926 break; 943 break;
944 case HostCmd_CMD_802_11_SUBSCRIBE_EVENT:
945 ret = mwifiex_ret_subsc_evt(priv, resp, data_buf);
946 break;
927 default: 947 default:
928 dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n", 948 dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
929 resp->command); 949 resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index cc531b536a56..f6bbb9307f86 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -128,9 +128,6 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
128 mwifiex_stop_net_dev_queue(priv->netdev, adapter); 128 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
129 if (netif_carrier_ok(priv->netdev)) 129 if (netif_carrier_ok(priv->netdev))
130 netif_carrier_off(priv->netdev); 130 netif_carrier_off(priv->netdev);
131 /* Reset wireless stats signal info */
132 priv->qual_level = 0;
133 priv->qual_noise = 0;
134} 131}
135 132
136/* 133/*
@@ -317,6 +314,12 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
317 break; 314 break;
318 315
319 case EVENT_RSSI_LOW: 316 case EVENT_RSSI_LOW:
317 cfg80211_cqm_rssi_notify(priv->netdev,
318 NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
319 GFP_KERNEL);
320 mwifiex_send_cmd_async(priv, HostCmd_CMD_RSSI_INFO,
321 HostCmd_ACT_GEN_GET, 0, NULL);
322 priv->subsc_evt_rssi_state = RSSI_LOW_RECVD;
320 dev_dbg(adapter->dev, "event: Beacon RSSI_LOW\n"); 323 dev_dbg(adapter->dev, "event: Beacon RSSI_LOW\n");
321 break; 324 break;
322 case EVENT_SNR_LOW: 325 case EVENT_SNR_LOW:
@@ -326,6 +329,12 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
326 dev_dbg(adapter->dev, "event: MAX_FAIL\n"); 329 dev_dbg(adapter->dev, "event: MAX_FAIL\n");
327 break; 330 break;
328 case EVENT_RSSI_HIGH: 331 case EVENT_RSSI_HIGH:
332 cfg80211_cqm_rssi_notify(priv->netdev,
333 NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
334 GFP_KERNEL);
335 mwifiex_send_cmd_async(priv, HostCmd_CMD_RSSI_INFO,
336 HostCmd_ACT_GEN_GET, 0, NULL);
337 priv->subsc_evt_rssi_state = RSSI_HIGH_RECVD;
329 dev_dbg(adapter->dev, "event: Beacon RSSI_HIGH\n"); 338 dev_dbg(adapter->dev, "event: Beacon RSSI_HIGH\n");
330 break; 339 break;
331 case EVENT_SNR_HIGH: 340 case EVENT_SNR_HIGH:
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index d7b11defafe0..58970e0f7d13 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -155,20 +155,29 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
155 * information. 155 * information.
156 */ 156 */
157int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, 157int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
158 u8 *bssid, s32 rssi, u8 *ie_buf, 158 struct cfg80211_bss *bss,
159 size_t ie_len, u16 beacon_period,
160 u16 cap_info_bitmap, u8 band,
161 struct mwifiex_bssdescriptor *bss_desc) 159 struct mwifiex_bssdescriptor *bss_desc)
162{ 160{
163 int ret; 161 int ret;
162 u8 *beacon_ie;
163 struct mwifiex_bss_priv *bss_priv = (void *)bss->priv;
164 164
165 memcpy(bss_desc->mac_address, bssid, ETH_ALEN); 165 beacon_ie = kmemdup(bss->information_elements, bss->len_beacon_ies,
166 bss_desc->rssi = rssi; 166 GFP_KERNEL);
167 bss_desc->beacon_buf = ie_buf; 167 if (!beacon_ie) {
168 bss_desc->beacon_buf_size = ie_len; 168 dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n");
169 bss_desc->beacon_period = beacon_period; 169 return -ENOMEM;
170 bss_desc->cap_info_bitmap = cap_info_bitmap; 170 }
171 bss_desc->bss_band = band; 171
172 memcpy(bss_desc->mac_address, bss->bssid, ETH_ALEN);
173 bss_desc->rssi = bss->signal;
174 bss_desc->beacon_buf = beacon_ie;
175 bss_desc->beacon_buf_size = bss->len_beacon_ies;
176 bss_desc->beacon_period = bss->beacon_interval;
177 bss_desc->cap_info_bitmap = bss->capability;
178 bss_desc->bss_band = bss_priv->band;
179 bss_desc->fw_tsf = bss_priv->fw_tsf;
180 bss_desc->timestamp = bss->tsf;
172 if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) { 181 if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) {
173 dev_dbg(priv->adapter->dev, "info: InterpretIE: AP WEP enabled\n"); 182 dev_dbg(priv->adapter->dev, "info: InterpretIE: AP WEP enabled\n");
174 bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP; 183 bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
@@ -180,9 +189,9 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
180 else 189 else
181 bss_desc->bss_mode = NL80211_IFTYPE_STATION; 190 bss_desc->bss_mode = NL80211_IFTYPE_STATION;
182 191
183 ret = mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc, 192 ret = mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
184 ie_buf, ie_len);
185 193
194 kfree(beacon_ie);
186 return ret; 195 return ret;
187} 196}
188 197
@@ -197,7 +206,6 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
197 int ret; 206 int ret;
198 struct mwifiex_adapter *adapter = priv->adapter; 207 struct mwifiex_adapter *adapter = priv->adapter;
199 struct mwifiex_bssdescriptor *bss_desc = NULL; 208 struct mwifiex_bssdescriptor *bss_desc = NULL;
200 u8 *beacon_ie = NULL;
201 209
202 priv->scan_block = false; 210 priv->scan_block = false;
203 211
@@ -210,19 +218,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
210 return -ENOMEM; 218 return -ENOMEM;
211 } 219 }
212 220
213 beacon_ie = kmemdup(bss->information_elements, 221 ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc);
214 bss->len_beacon_ies, GFP_KERNEL);
215 if (!beacon_ie) {
216 kfree(bss_desc);
217 dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n");
218 return -ENOMEM;
219 }
220
221 ret = mwifiex_fill_new_bss_desc(priv, bss->bssid, bss->signal,
222 beacon_ie, bss->len_beacon_ies,
223 bss->beacon_interval,
224 bss->capability,
225 *(u8 *)bss->priv, bss_desc);
226 if (ret) 222 if (ret)
227 goto done; 223 goto done;
228 } 224 }
@@ -269,7 +265,6 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
269 (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor. 265 (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor.
270 ssid, &bss_desc->ssid))) { 266 ssid, &bss_desc->ssid))) {
271 kfree(bss_desc); 267 kfree(bss_desc);
272 kfree(beacon_ie);
273 return 0; 268 return 0;
274 } 269 }
275 270
@@ -304,7 +299,6 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
304 299
305done: 300done:
306 kfree(bss_desc); 301 kfree(bss_desc);
307 kfree(beacon_ie);
308 return ret; 302 return ret;
309} 303}
310 304
@@ -468,7 +462,8 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv,
468 462
469 info->bss_chan = bss_desc->channel; 463 info->bss_chan = bss_desc->channel;
470 464
471 info->region_code = adapter->region_code; 465 memcpy(info->country_code, priv->country_code,
466 IEEE80211_COUNTRY_STRING_LEN);
472 467
473 info->media_connected = priv->media_connected; 468 info->media_connected = priv->media_connected;
474 469
@@ -996,6 +991,39 @@ static int mwifiex_set_wapi_ie(struct mwifiex_private *priv,
996} 991}
997 992
998/* 993/*
994 * IOCTL request handler to set/reset WPS IE.
995 *
996 * The supplied WPS IE is treated as a opaque buffer. Only the first field
997 * is checked to internally enable WPS. If buffer length is zero, the existing
998 * WPS IE is reset.
999 */
1000static int mwifiex_set_wps_ie(struct mwifiex_private *priv,
1001 u8 *ie_data_ptr, u16 ie_len)
1002{
1003 if (ie_len) {
1004 priv->wps_ie = kzalloc(MWIFIEX_MAX_VSIE_LEN, GFP_KERNEL);
1005 if (!priv->wps_ie)
1006 return -ENOMEM;
1007 if (ie_len > sizeof(priv->wps_ie)) {
1008 dev_dbg(priv->adapter->dev,
1009 "info: failed to copy WPS IE, too big\n");
1010 kfree(priv->wps_ie);
1011 return -1;
1012 }
1013 memcpy(priv->wps_ie, ie_data_ptr, ie_len);
1014 priv->wps_ie_len = ie_len;
1015 dev_dbg(priv->adapter->dev, "cmd: Set wps_ie_len=%d IE=%#x\n",
1016 priv->wps_ie_len, priv->wps_ie[0]);
1017 } else {
1018 kfree(priv->wps_ie);
1019 priv->wps_ie_len = ie_len;
1020 dev_dbg(priv->adapter->dev,
1021 "info: Reset wps_ie_len=%d\n", priv->wps_ie_len);
1022 }
1023 return 0;
1024}
1025
1026/*
999 * IOCTL request handler to set WAPI key. 1027 * IOCTL request handler to set WAPI key.
1000 * 1028 *
1001 * This function prepares the correct firmware command and 1029 * This function prepares the correct firmware command and
@@ -1185,39 +1213,6 @@ mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version,
1185} 1213}
1186 1214
1187/* 1215/*
1188 * Sends IOCTL request to get signal information.
1189 *
1190 * This function allocates the IOCTL request buffer, fills it
1191 * with requisite parameters and calls the IOCTL handler.
1192 */
1193int mwifiex_get_signal_info(struct mwifiex_private *priv,
1194 struct mwifiex_ds_get_signal *signal)
1195{
1196 int status;
1197
1198 signal->selector = ALL_RSSI_INFO_MASK;
1199
1200 /* Signal info can be obtained only if connected */
1201 if (!priv->media_connected) {
1202 dev_dbg(priv->adapter->dev,
1203 "info: Can not get signal in disconnected state\n");
1204 return -1;
1205 }
1206
1207 status = mwifiex_send_cmd_sync(priv, HostCmd_CMD_RSSI_INFO,
1208 HostCmd_ACT_GEN_GET, 0, signal);
1209
1210 if (!status) {
1211 if (signal->selector & BCN_RSSI_AVG_MASK)
1212 priv->qual_level = signal->bcn_rssi_avg;
1213 if (signal->selector & BCN_NF_AVG_MASK)
1214 priv->qual_noise = signal->bcn_nf_avg;
1215 }
1216
1217 return status;
1218}
1219
1220/*
1221 * Sends IOCTL request to set encoding parameters. 1216 * Sends IOCTL request to set encoding parameters.
1222 * 1217 *
1223 * This function allocates the IOCTL request buffer, fills it 1218 * This function allocates the IOCTL request buffer, fills it
@@ -1441,6 +1436,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
1441 priv->wps.session_enable = true; 1436 priv->wps.session_enable = true;
1442 dev_dbg(priv->adapter->dev, 1437 dev_dbg(priv->adapter->dev,
1443 "info: WPS Session Enabled.\n"); 1438 "info: WPS Session Enabled.\n");
1439 ret = mwifiex_set_wps_ie(priv, ie_data_ptr, ie_len);
1444 } 1440 }
1445 1441
1446 /* Append the passed data to the end of the 1442 /* Append the passed data to the end of the
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 750b695aca12..02ce3b77d3e7 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -145,7 +145,12 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
145 " rx_pkt_offset=%d, rx_pkt_length=%d\n", skb->len, 145 " rx_pkt_offset=%d, rx_pkt_length=%d\n", skb->len,
146 local_rx_pd->rx_pkt_offset, local_rx_pd->rx_pkt_length); 146 local_rx_pd->rx_pkt_offset, local_rx_pd->rx_pkt_length);
147 priv->stats.rx_dropped++; 147 priv->stats.rx_dropped++;
148 dev_kfree_skb_any(skb); 148
149 if (adapter->if_ops.data_complete)
150 adapter->if_ops.data_complete(adapter, skb);
151 else
152 dev_kfree_skb_any(skb);
153
149 return ret; 154 return ret;
150 } 155 }
151 156
@@ -196,8 +201,12 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
196 (u8) local_rx_pd->rx_pkt_type, 201 (u8) local_rx_pd->rx_pkt_type,
197 skb); 202 skb);
198 203
199 if (ret || (rx_pkt_type == PKT_TYPE_BAR)) 204 if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
200 dev_kfree_skb_any(skb); 205 if (adapter->if_ops.data_complete)
206 adapter->if_ops.data_complete(adapter, skb);
207 else
208 dev_kfree_skb_any(skb);
209 }
201 210
202 if (ret) 211 if (ret)
203 priv->stats.rx_dropped++; 212 priv->stats.rx_dropped++;
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 7af534feb420..0a046d3a0c16 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -149,10 +149,14 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
149 local_tx_pd->bss_num = priv->bss_num; 149 local_tx_pd->bss_num = priv->bss_num;
150 local_tx_pd->bss_type = priv->bss_type; 150 local_tx_pd->bss_type = priv->bss_type;
151 151
152 skb_push(skb, INTF_HEADER_LEN); 152 if (adapter->iface_type == MWIFIEX_USB) {
153 153 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
154 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, 154 skb, NULL);
155 skb, NULL); 155 } else {
156 skb_push(skb, INTF_HEADER_LEN);
157 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
158 skb, NULL);
159 }
156 switch (ret) { 160 switch (ret) {
157 case -EBUSY: 161 case -EBUSY:
158 adapter->data_sent = true; 162 adapter->data_sent = true;
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index d2af8cb98541..e2faec4db108 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -77,12 +77,23 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
77 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) 77 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
78 local_tx_pd = 78 local_tx_pd =
79 (struct txpd *) (head_ptr + INTF_HEADER_LEN); 79 (struct txpd *) (head_ptr + INTF_HEADER_LEN);
80 80 if (adapter->iface_type == MWIFIEX_USB) {
81 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, 81 adapter->data_sent = true;
82 skb, tx_param); 82 skb_pull(skb, INTF_HEADER_LEN);
83 ret = adapter->if_ops.host_to_card(adapter,
84 MWIFIEX_USB_EP_DATA,
85 skb, NULL);
86 } else {
87 ret = adapter->if_ops.host_to_card(adapter,
88 MWIFIEX_TYPE_DATA,
89 skb, tx_param);
90 }
83 } 91 }
84 92
85 switch (ret) { 93 switch (ret) {
94 case -ENOSR:
95 dev_err(adapter->dev, "data: -ENOSR is returned\n");
96 break;
86 case -EBUSY: 97 case -EBUSY:
87 if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && 98 if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
88 (adapter->pps_uapsd_mode) && (adapter->tx_lock_flag)) { 99 (adapter->pps_uapsd_mode) && (adapter->tx_lock_flag)) {
@@ -135,6 +146,9 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
135 if (!priv) 146 if (!priv)
136 goto done; 147 goto done;
137 148
149 if (adapter->iface_type == MWIFIEX_USB)
150 adapter->data_sent = false;
151
138 mwifiex_set_trans_start(priv->netdev); 152 mwifiex_set_trans_start(priv->netdev);
139 if (!status) { 153 if (!status) {
140 priv->stats.tx_packets++; 154 priv->stats.tx_packets++;
@@ -162,4 +176,5 @@ done:
162 176
163 return 0; 177 return 0;
164} 178}
179EXPORT_SYMBOL_GPL(mwifiex_write_data_complete);
165 180
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
new file mode 100644
index 000000000000..49ebf20c56eb
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -0,0 +1,1052 @@
1/*
2 * Marvell Wireless LAN device driver: USB specific handling
3 *
4 * Copyright (C) 2012, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "main.h"
21#include "usb.h"
22
23#define USB_VERSION "1.0"
24
25static const char usbdriver_name[] = "usb8797";
26
27static u8 user_rmmod;
28static struct mwifiex_if_ops usb_ops;
29static struct semaphore add_remove_card_sem;
30
31static struct usb_device_id mwifiex_usb_table[] = {
32 {USB_DEVICE(USB8797_VID, USB8797_PID_1)},
33 {USB_DEVICE_AND_INTERFACE_INFO(USB8797_VID, USB8797_PID_2,
34 USB_CLASS_VENDOR_SPEC,
35 USB_SUBCLASS_VENDOR_SPEC, 0xff)},
36 { } /* Terminating entry */
37};
38
39MODULE_DEVICE_TABLE(usb, mwifiex_usb_table);
40
41static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size);
42
43/* This function handles received packet. Necessary action is taken based on
44 * cmd/event/data.
45 */
46static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
47 struct sk_buff *skb, u8 ep)
48{
49 struct device *dev = adapter->dev;
50 u32 recv_type;
51 __le32 tmp;
52
53 if (adapter->hs_activated)
54 mwifiex_process_hs_config(adapter);
55
56 if (skb->len < INTF_HEADER_LEN) {
57 dev_err(dev, "%s: invalid skb->len\n", __func__);
58 return -1;
59 }
60
61 switch (ep) {
62 case MWIFIEX_USB_EP_CMD_EVENT:
63 dev_dbg(dev, "%s: EP_CMD_EVENT\n", __func__);
64 skb_copy_from_linear_data(skb, &tmp, INTF_HEADER_LEN);
65 recv_type = le32_to_cpu(tmp);
66 skb_pull(skb, INTF_HEADER_LEN);
67
68 switch (recv_type) {
69 case MWIFIEX_USB_TYPE_CMD:
70 if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
71 dev_err(dev, "CMD: skb->len too large\n");
72 return -1;
73 } else if (!adapter->curr_cmd) {
74 dev_dbg(dev, "CMD: no curr_cmd\n");
75 if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
76 mwifiex_process_sleep_confirm_resp(
77 adapter, skb->data,
78 skb->len);
79 return 0;
80 }
81 return -1;
82 }
83
84 adapter->curr_cmd->resp_skb = skb;
85 adapter->cmd_resp_received = true;
86 break;
87 case MWIFIEX_USB_TYPE_EVENT:
88 if (skb->len < sizeof(u32)) {
89 dev_err(dev, "EVENT: skb->len too small\n");
90 return -1;
91 }
92 skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
93 adapter->event_cause = le32_to_cpu(tmp);
94 skb_pull(skb, sizeof(u32));
95 dev_dbg(dev, "event_cause %#x\n", adapter->event_cause);
96
97 if (skb->len > MAX_EVENT_SIZE) {
98 dev_err(dev, "EVENT: event body too large\n");
99 return -1;
100 }
101
102 skb_copy_from_linear_data(skb, adapter->event_body,
103 skb->len);
104 adapter->event_received = true;
105 adapter->event_skb = skb;
106 break;
107 default:
108 dev_err(dev, "unknown recv_type %#x\n", recv_type);
109 return -1;
110 }
111 break;
112 case MWIFIEX_USB_EP_DATA:
113 dev_dbg(dev, "%s: EP_DATA\n", __func__);
114 if (skb->len > MWIFIEX_RX_DATA_BUF_SIZE) {
115 dev_err(dev, "DATA: skb->len too large\n");
116 return -1;
117 }
118 skb_queue_tail(&adapter->usb_rx_data_q, skb);
119 adapter->data_received = true;
120 break;
121 default:
122 dev_err(dev, "%s: unknown endport %#x\n", __func__, ep);
123 return -1;
124 }
125
126 return -EINPROGRESS;
127}
128
129static void mwifiex_usb_rx_complete(struct urb *urb)
130{
131 struct urb_context *context = (struct urb_context *)urb->context;
132 struct mwifiex_adapter *adapter = context->adapter;
133 struct sk_buff *skb = context->skb;
134 struct usb_card_rec *card;
135 int recv_length = urb->actual_length;
136 int size, status;
137
138 if (!adapter || !adapter->card) {
139 pr_err("mwifiex adapter or card structure is not valid\n");
140 return;
141 }
142
143 card = (struct usb_card_rec *)adapter->card;
144 if (card->rx_cmd_ep == context->ep)
145 atomic_dec(&card->rx_cmd_urb_pending);
146 else
147 atomic_dec(&card->rx_data_urb_pending);
148
149 if (recv_length) {
150 if (urb->status || (adapter->surprise_removed)) {
151 dev_err(adapter->dev,
152 "URB status is failed: %d\n", urb->status);
153 /* Do not free skb in case of command ep */
154 if (card->rx_cmd_ep != context->ep)
155 dev_kfree_skb_any(skb);
156 goto setup_for_next;
157 }
158 if (skb->len > recv_length)
159 skb_trim(skb, recv_length);
160 else
161 skb_put(skb, recv_length - skb->len);
162
163 atomic_inc(&adapter->rx_pending);
164 status = mwifiex_usb_recv(adapter, skb, context->ep);
165
166 dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n",
167 recv_length, status);
168 if (status == -EINPROGRESS) {
169 queue_work(adapter->workqueue, &adapter->main_work);
170
171 /* urb for data_ep is re-submitted now;
172 * urb for cmd_ep will be re-submitted in callback
173 * mwifiex_usb_recv_complete
174 */
175 if (card->rx_cmd_ep == context->ep)
176 return;
177 } else {
178 atomic_dec(&adapter->rx_pending);
179 if (status == -1)
180 dev_err(adapter->dev,
181 "received data processing failed!\n");
182
183 /* Do not free skb in case of command ep */
184 if (card->rx_cmd_ep != context->ep)
185 dev_kfree_skb_any(skb);
186 }
187 } else if (urb->status) {
188 if (!adapter->is_suspended) {
189 dev_warn(adapter->dev,
190 "Card is removed: %d\n", urb->status);
191 adapter->surprise_removed = true;
192 }
193 dev_kfree_skb_any(skb);
194 return;
195 } else {
196 /* Do not free skb in case of command ep */
197 if (card->rx_cmd_ep != context->ep)
198 dev_kfree_skb_any(skb);
199
200 /* fall through setup_for_next */
201 }
202
203setup_for_next:
204 if (card->rx_cmd_ep == context->ep)
205 size = MWIFIEX_RX_CMD_BUF_SIZE;
206 else
207 size = MWIFIEX_RX_DATA_BUF_SIZE;
208
209 mwifiex_usb_submit_rx_urb(context, size);
210
211 return;
212}
213
214static void mwifiex_usb_tx_complete(struct urb *urb)
215{
216 struct urb_context *context = (struct urb_context *)(urb->context);
217 struct mwifiex_adapter *adapter = context->adapter;
218 struct usb_card_rec *card = adapter->card;
219
220 dev_dbg(adapter->dev, "%s: status: %d\n", __func__, urb->status);
221
222 if (context->ep == card->tx_cmd_ep) {
223 dev_dbg(adapter->dev, "%s: CMD\n", __func__);
224 atomic_dec(&card->tx_cmd_urb_pending);
225 adapter->cmd_sent = false;
226 } else {
227 dev_dbg(adapter->dev, "%s: DATA\n", __func__);
228 atomic_dec(&card->tx_data_urb_pending);
229 mwifiex_write_data_complete(adapter, context->skb,
230 urb->status ? -1 : 0);
231 }
232
233 queue_work(adapter->workqueue, &adapter->main_work);
234
235 return;
236}
237
238static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size)
239{
240 struct mwifiex_adapter *adapter = ctx->adapter;
241 struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
242
243 if (card->rx_cmd_ep != ctx->ep) {
244 ctx->skb = dev_alloc_skb(size);
245 if (!ctx->skb) {
246 dev_err(adapter->dev,
247 "%s: dev_alloc_skb failed\n", __func__);
248 return -ENOMEM;
249 }
250 }
251
252 usb_fill_bulk_urb(ctx->urb, card->udev,
253 usb_rcvbulkpipe(card->udev, ctx->ep), ctx->skb->data,
254 size, mwifiex_usb_rx_complete, (void *)ctx);
255
256 if (card->rx_cmd_ep == ctx->ep)
257 atomic_inc(&card->rx_cmd_urb_pending);
258 else
259 atomic_inc(&card->rx_data_urb_pending);
260
261 if (usb_submit_urb(ctx->urb, GFP_ATOMIC)) {
262 dev_err(adapter->dev, "usb_submit_urb failed\n");
263 dev_kfree_skb_any(ctx->skb);
264 ctx->skb = NULL;
265
266 if (card->rx_cmd_ep == ctx->ep)
267 atomic_dec(&card->rx_cmd_urb_pending);
268 else
269 atomic_dec(&card->rx_data_urb_pending);
270
271 return -1;
272 }
273
274 return 0;
275}
276
277static void mwifiex_usb_free(struct usb_card_rec *card)
278{
279 int i;
280
281 if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
282 usb_kill_urb(card->rx_cmd.urb);
283
284 usb_free_urb(card->rx_cmd.urb);
285 card->rx_cmd.urb = NULL;
286
287 if (atomic_read(&card->rx_data_urb_pending))
288 for (i = 0; i < MWIFIEX_RX_DATA_URB; i++)
289 if (card->rx_data_list[i].urb)
290 usb_kill_urb(card->rx_data_list[i].urb);
291
292 for (i = 0; i < MWIFIEX_RX_DATA_URB; i++) {
293 usb_free_urb(card->rx_data_list[i].urb);
294 card->rx_data_list[i].urb = NULL;
295 }
296
297 for (i = 0; i < MWIFIEX_TX_DATA_URB; i++) {
298 usb_free_urb(card->tx_data_list[i].urb);
299 card->tx_data_list[i].urb = NULL;
300 }
301
302 usb_free_urb(card->tx_cmd.urb);
303 card->tx_cmd.urb = NULL;
304
305 return;
306}
307
308/* This function probes an mwifiex device and registers it. It allocates
309 * the card structure, initiates the device registration and initialization
310 * procedure by adding a logical interface.
311 */
312static int mwifiex_usb_probe(struct usb_interface *intf,
313 const struct usb_device_id *id)
314{
315 struct usb_device *udev = interface_to_usbdev(intf);
316 struct usb_host_interface *iface_desc = intf->cur_altsetting;
317 struct usb_endpoint_descriptor *epd;
318 int ret, i;
319 struct usb_card_rec *card;
320 u16 id_vendor, id_product, bcd_device, bcd_usb;
321
322 card = kzalloc(sizeof(struct usb_card_rec), GFP_KERNEL);
323 if (!card)
324 return -ENOMEM;
325
326 id_vendor = le16_to_cpu(udev->descriptor.idVendor);
327 id_product = le16_to_cpu(udev->descriptor.idProduct);
328 bcd_device = le16_to_cpu(udev->descriptor.bcdDevice);
329 bcd_usb = le16_to_cpu(udev->descriptor.bcdUSB);
330 pr_debug("info: VID/PID = %X/%X, Boot2 version = %X\n",
331 id_vendor, id_product, bcd_device);
332
333 /* PID_1 is used for firmware downloading only */
334 if (id_product == USB8797_PID_1)
335 card->usb_boot_state = USB8797_FW_DNLD;
336 else
337 card->usb_boot_state = USB8797_FW_READY;
338
339 card->udev = udev;
340 card->intf = intf;
341
342 pr_debug("info: bcdUSB=%#x Device Class=%#x SubClass=%#x Protocl=%#x\n",
343 udev->descriptor.bcdUSB, udev->descriptor.bDeviceClass,
344 udev->descriptor.bDeviceSubClass,
345 udev->descriptor.bDeviceProtocol);
346
347 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
348 epd = &iface_desc->endpoint[i].desc;
349 if (usb_endpoint_dir_in(epd) &&
350 usb_endpoint_num(epd) == MWIFIEX_USB_EP_CMD_EVENT &&
351 usb_endpoint_xfer_bulk(epd)) {
352 pr_debug("info: bulk IN: max pkt size: %d, addr: %d\n",
353 le16_to_cpu(epd->wMaxPacketSize),
354 epd->bEndpointAddress);
355 card->rx_cmd_ep = usb_endpoint_num(epd);
356 atomic_set(&card->rx_cmd_urb_pending, 0);
357 }
358 if (usb_endpoint_dir_in(epd) &&
359 usb_endpoint_num(epd) == MWIFIEX_USB_EP_DATA &&
360 usb_endpoint_xfer_bulk(epd)) {
361 pr_debug("info: bulk IN: max pkt size: %d, addr: %d\n",
362 le16_to_cpu(epd->wMaxPacketSize),
363 epd->bEndpointAddress);
364 card->rx_data_ep = usb_endpoint_num(epd);
365 atomic_set(&card->rx_data_urb_pending, 0);
366 }
367 if (usb_endpoint_dir_out(epd) &&
368 usb_endpoint_num(epd) == MWIFIEX_USB_EP_DATA &&
369 usb_endpoint_xfer_bulk(epd)) {
370 pr_debug("info: bulk OUT: max pkt size: %d, addr: %d\n",
371 le16_to_cpu(epd->wMaxPacketSize),
372 epd->bEndpointAddress);
373 card->tx_data_ep = usb_endpoint_num(epd);
374 atomic_set(&card->tx_data_urb_pending, 0);
375 }
376 if (usb_endpoint_dir_out(epd) &&
377 usb_endpoint_num(epd) == MWIFIEX_USB_EP_CMD_EVENT &&
378 usb_endpoint_xfer_bulk(epd)) {
379 pr_debug("info: bulk OUT: max pkt size: %d, addr: %d\n",
380 le16_to_cpu(epd->wMaxPacketSize),
381 epd->bEndpointAddress);
382 card->tx_cmd_ep = usb_endpoint_num(epd);
383 atomic_set(&card->tx_cmd_urb_pending, 0);
384 card->bulk_out_maxpktsize =
385 le16_to_cpu(epd->wMaxPacketSize);
386 }
387 }
388
389 usb_set_intfdata(intf, card);
390
391 ret = mwifiex_add_card(card, &add_remove_card_sem, &usb_ops,
392 MWIFIEX_USB);
393 if (ret) {
394 pr_err("%s: mwifiex_add_card failed: %d\n", __func__, ret);
395 usb_reset_device(udev);
396 kfree(card);
397 return ret;
398 }
399
400 usb_get_dev(udev);
401
402 return 0;
403}
404
405/* Kernel needs to suspend all functions separately. Therefore all
406 * registered functions must have drivers with suspend and resume
407 * methods. Failing that the kernel simply removes the whole card.
408 *
409 * If already not suspended, this function allocates and sends a
410 * 'host sleep activate' request to the firmware and turns off the traffic.
411 */
412static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
413{
414 struct usb_card_rec *card = usb_get_intfdata(intf);
415 struct mwifiex_adapter *adapter;
416 int i;
417
418 if (!card || !card->adapter) {
419 pr_err("%s: card or card->adapter is NULL\n", __func__);
420 return 0;
421 }
422 adapter = card->adapter;
423
424 if (unlikely(adapter->is_suspended))
425 dev_warn(adapter->dev, "Device already suspended\n");
426
427 mwifiex_enable_hs(adapter);
428
429 /* 'is_suspended' flag indicates device is suspended.
430 * It must be set here before the usb_kill_urb() calls. Reason
431 * is in the complete handlers, urb->status(= -ENOENT) and
432 * this flag is used in combination to distinguish between a
433 * 'suspended' state and a 'disconnect' one.
434 */
435 adapter->is_suspended = true;
436
437 for (i = 0; i < adapter->priv_num; i++)
438 netif_carrier_off(adapter->priv[i]->netdev);
439
440 if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
441 usb_kill_urb(card->rx_cmd.urb);
442
443 if (atomic_read(&card->rx_data_urb_pending))
444 for (i = 0; i < MWIFIEX_RX_DATA_URB; i++)
445 if (card->rx_data_list[i].urb)
446 usb_kill_urb(card->rx_data_list[i].urb);
447
448 for (i = 0; i < MWIFIEX_TX_DATA_URB; i++)
449 if (card->tx_data_list[i].urb)
450 usb_kill_urb(card->tx_data_list[i].urb);
451
452 if (card->tx_cmd.urb)
453 usb_kill_urb(card->tx_cmd.urb);
454
455 return 0;
456}
457
458/* Kernel needs to suspend all functions separately. Therefore all
459 * registered functions must have drivers with suspend and resume
460 * methods. Failing that the kernel simply removes the whole card.
461 *
462 * If already not resumed, this function turns on the traffic and
463 * sends a 'host sleep cancel' request to the firmware.
464 */
465static int mwifiex_usb_resume(struct usb_interface *intf)
466{
467 struct usb_card_rec *card = usb_get_intfdata(intf);
468 struct mwifiex_adapter *adapter;
469 int i;
470
471 if (!card || !card->adapter) {
472 pr_err("%s: card or card->adapter is NULL\n", __func__);
473 return 0;
474 }
475 adapter = card->adapter;
476
477 if (unlikely(!adapter->is_suspended)) {
478 dev_warn(adapter->dev, "Device already resumed\n");
479 return 0;
480 }
481
482 /* Indicate device resumed. The netdev queue will be resumed only
483 * after the urbs have been re-submitted
484 */
485 adapter->is_suspended = false;
486
487 if (!atomic_read(&card->rx_data_urb_pending))
488 for (i = 0; i < MWIFIEX_RX_DATA_URB; i++)
489 mwifiex_usb_submit_rx_urb(&card->rx_data_list[i],
490 MWIFIEX_RX_DATA_BUF_SIZE);
491
492 if (!atomic_read(&card->rx_cmd_urb_pending)) {
493 card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE);
494 if (card->rx_cmd.skb)
495 mwifiex_usb_submit_rx_urb(&card->rx_cmd,
496 MWIFIEX_RX_CMD_BUF_SIZE);
497 }
498
499 for (i = 0; i < adapter->priv_num; i++)
500 if (adapter->priv[i]->media_connected)
501 netif_carrier_on(adapter->priv[i]->netdev);
502
503 /* Disable Host Sleep */
504 if (adapter->hs_activated)
505 mwifiex_cancel_hs(mwifiex_get_priv(adapter,
506 MWIFIEX_BSS_ROLE_ANY),
507 MWIFIEX_ASYNC_CMD);
508
509#ifdef CONFIG_PM
510 /* Resume handler may be called due to remote wakeup,
511 * force to exit suspend anyway
512 */
513 usb_disable_autosuspend(card->udev);
514#endif /* CONFIG_PM */
515
516 return 0;
517}
518
519static void mwifiex_usb_disconnect(struct usb_interface *intf)
520{
521 struct usb_card_rec *card = usb_get_intfdata(intf);
522 struct mwifiex_adapter *adapter;
523 int i;
524
525 if (!card || !card->adapter) {
526 pr_err("%s: card or card->adapter is NULL\n", __func__);
527 return;
528 }
529
530 adapter = card->adapter;
531 if (!adapter->priv_num)
532 return;
533
534 /* In case driver is removed when asynchronous FW downloading is
535 * in progress
536 */
537 wait_for_completion(&adapter->fw_load);
538
539 if (user_rmmod) {
540#ifdef CONFIG_PM
541 if (adapter->is_suspended)
542 mwifiex_usb_resume(intf);
543#endif
544 for (i = 0; i < adapter->priv_num; i++)
545 if ((GET_BSS_ROLE(adapter->priv[i]) ==
546 MWIFIEX_BSS_ROLE_STA) &&
547 adapter->priv[i]->media_connected)
548 mwifiex_deauthenticate(adapter->priv[i], NULL);
549
550 mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
551 MWIFIEX_BSS_ROLE_ANY),
552 MWIFIEX_FUNC_SHUTDOWN);
553 }
554
555 mwifiex_usb_free(card);
556
557 dev_dbg(adapter->dev, "%s: removing card\n", __func__);
558 mwifiex_remove_card(adapter, &add_remove_card_sem);
559
560 usb_set_intfdata(intf, NULL);
561 usb_put_dev(interface_to_usbdev(intf));
562 kfree(card);
563
564 return;
565}
566
567static struct usb_driver mwifiex_usb_driver = {
568 .name = usbdriver_name,
569 .probe = mwifiex_usb_probe,
570 .disconnect = mwifiex_usb_disconnect,
571 .id_table = mwifiex_usb_table,
572 .suspend = mwifiex_usb_suspend,
573 .resume = mwifiex_usb_resume,
574 .supports_autosuspend = 1,
575};
576
577static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
578{
579 struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
580 int i;
581
582 card->tx_cmd.adapter = adapter;
583 card->tx_cmd.ep = card->tx_cmd_ep;
584
585 card->tx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL);
586 if (!card->tx_cmd.urb) {
587 dev_err(adapter->dev, "tx_cmd.urb allocation failed\n");
588 return -ENOMEM;
589 }
590
591 card->tx_data_ix = 0;
592
593 for (i = 0; i < MWIFIEX_TX_DATA_URB; i++) {
594 card->tx_data_list[i].adapter = adapter;
595 card->tx_data_list[i].ep = card->tx_data_ep;
596
597 card->tx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL);
598 if (!card->tx_data_list[i].urb) {
599 dev_err(adapter->dev,
600 "tx_data_list[] urb allocation failed\n");
601 return -ENOMEM;
602 }
603 }
604
605 return 0;
606}
607
608static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter)
609{
610 struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
611 int i;
612
613 card->rx_cmd.adapter = adapter;
614 card->rx_cmd.ep = card->rx_cmd_ep;
615
616 card->rx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL);
617 if (!card->rx_cmd.urb) {
618 dev_err(adapter->dev, "rx_cmd.urb allocation failed\n");
619 return -ENOMEM;
620 }
621
622 card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE);
623 if (!card->rx_cmd.skb) {
624 dev_err(adapter->dev, "rx_cmd.skb allocation failed\n");
625 return -ENOMEM;
626 }
627
628 if (mwifiex_usb_submit_rx_urb(&card->rx_cmd, MWIFIEX_RX_CMD_BUF_SIZE))
629 return -1;
630
631 for (i = 0; i < MWIFIEX_RX_DATA_URB; i++) {
632 card->rx_data_list[i].adapter = adapter;
633 card->rx_data_list[i].ep = card->rx_data_ep;
634
635 card->rx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL);
636 if (!card->rx_data_list[i].urb) {
637 dev_err(adapter->dev,
638 "rx_data_list[] urb allocation failed\n");
639 return -1;
640 }
641 if (mwifiex_usb_submit_rx_urb(&card->rx_data_list[i],
642 MWIFIEX_RX_DATA_BUF_SIZE))
643 return -1;
644 }
645
646 return 0;
647}
648
649static int mwifiex_write_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf,
650 u32 *len, u8 ep, u32 timeout)
651{
652 struct usb_card_rec *card = adapter->card;
653 int actual_length, ret;
654
655 if (!(*len % card->bulk_out_maxpktsize))
656 (*len)++;
657
658 /* Send the data block */
659 ret = usb_bulk_msg(card->udev, usb_sndbulkpipe(card->udev, ep), pbuf,
660 *len, &actual_length, timeout);
661 if (ret) {
662 dev_err(adapter->dev, "usb_bulk_msg for tx failed: %d\n", ret);
663 ret = -1;
664 }
665
666 *len = actual_length;
667
668 return ret;
669}
670
671static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf,
672 u32 *len, u8 ep, u32 timeout)
673{
674 struct usb_card_rec *card = adapter->card;
675 int actual_length, ret;
676
677 /* Receive the data response */
678 ret = usb_bulk_msg(card->udev, usb_rcvbulkpipe(card->udev, ep), pbuf,
679 *len, &actual_length, timeout);
680 if (ret) {
681 dev_err(adapter->dev, "usb_bulk_msg for rx failed: %d\n", ret);
682 ret = -1;
683 }
684
685 *len = actual_length;
686
687 return ret;
688}
689
690/* This function write a command/data packet to card. */
691static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
692 struct sk_buff *skb,
693 struct mwifiex_tx_param *tx_param)
694{
695 struct usb_card_rec *card = adapter->card;
696 struct urb_context *context;
697 u8 *data = (u8 *)skb->data;
698 struct urb *tx_urb;
699
700 if (adapter->is_suspended) {
701 dev_err(adapter->dev,
702 "%s: not allowed while suspended\n", __func__);
703 return -1;
704 }
705
706 if (adapter->surprise_removed) {
707 dev_err(adapter->dev, "%s: device removed\n", __func__);
708 return -1;
709 }
710
711 if (ep == card->tx_data_ep &&
712 atomic_read(&card->tx_data_urb_pending) >= MWIFIEX_TX_DATA_URB) {
713 return -EBUSY;
714 }
715
716 dev_dbg(adapter->dev, "%s: ep=%d\n", __func__, ep);
717
718 if (ep == card->tx_cmd_ep) {
719 context = &card->tx_cmd;
720 } else {
721 if (card->tx_data_ix >= MWIFIEX_TX_DATA_URB)
722 card->tx_data_ix = 0;
723 context = &card->tx_data_list[card->tx_data_ix++];
724 }
725
726 context->adapter = adapter;
727 context->ep = ep;
728 context->skb = skb;
729 tx_urb = context->urb;
730
731 usb_fill_bulk_urb(tx_urb, card->udev, usb_sndbulkpipe(card->udev, ep),
732 data, skb->len, mwifiex_usb_tx_complete,
733 (void *)context);
734
735 tx_urb->transfer_flags |= URB_ZERO_PACKET;
736
737 if (ep == card->tx_cmd_ep)
738 atomic_inc(&card->tx_cmd_urb_pending);
739 else
740 atomic_inc(&card->tx_data_urb_pending);
741
742 if (usb_submit_urb(tx_urb, GFP_ATOMIC)) {
743 dev_err(adapter->dev, "%s: usb_submit_urb failed\n", __func__);
744 if (ep == card->tx_cmd_ep) {
745 atomic_dec(&card->tx_cmd_urb_pending);
746 } else {
747 atomic_dec(&card->tx_data_urb_pending);
748 if (card->tx_data_ix)
749 card->tx_data_ix--;
750 else
751 card->tx_data_ix = MWIFIEX_TX_DATA_URB;
752 }
753
754 return -1;
755 } else {
756 if (ep == card->tx_data_ep &&
757 atomic_read(&card->tx_data_urb_pending) ==
758 MWIFIEX_TX_DATA_URB)
759 return -ENOSR;
760 }
761
762 return -EINPROGRESS;
763}
764
765/* This function register usb device and initialize parameter. */
766static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
767{
768 struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
769
770 card->adapter = adapter;
771 adapter->dev = &card->udev->dev;
772 strcpy(adapter->fw_name, USB8797_DEFAULT_FW_NAME);
773
774 return 0;
775}
776
777/* This function reads one block of firmware data. */
778static int mwifiex_get_fw_data(struct mwifiex_adapter *adapter,
779 u32 offset, u32 len, u8 *buf)
780{
781 if (!buf || !len)
782 return -1;
783
784 if (offset + len > adapter->firmware->size)
785 return -1;
786
787 memcpy(buf, adapter->firmware->data + offset, len);
788
789 return 0;
790}
791
792static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
793 struct mwifiex_fw_image *fw)
794{
795 int ret = 0;
796 u8 *firmware = fw->fw_buf, *recv_buff;
797 u32 retries = USB8797_FW_MAX_RETRY, dlen;
798 u32 fw_seqnum = 0, tlen = 0, dnld_cmd = 0;
799 struct fw_data *fwdata;
800 struct fw_sync_header sync_fw;
801 u8 check_winner = 1;
802
803 if (!firmware) {
804 dev_err(adapter->dev,
805 "No firmware image found! Terminating download\n");
806 ret = -1;
807 goto fw_exit;
808 }
809
810 /* Allocate memory for transmit */
811 fwdata = kzalloc(FW_DNLD_TX_BUF_SIZE, GFP_KERNEL);
812 if (!fwdata)
813 goto fw_exit;
814
815 /* Allocate memory for receive */
816 recv_buff = kzalloc(FW_DNLD_RX_BUF_SIZE, GFP_KERNEL);
817 if (!recv_buff)
818 goto cleanup;
819
820 do {
821 /* Send pseudo data to check winner status first */
822 if (check_winner) {
823 memset(&fwdata->fw_hdr, 0, sizeof(struct fw_header));
824 dlen = 0;
825 } else {
826 /* copy the header of the fw_data to get the length */
827 if (firmware)
828 memcpy(&fwdata->fw_hdr, &firmware[tlen],
829 sizeof(struct fw_header));
830 else
831 mwifiex_get_fw_data(adapter, tlen,
832 sizeof(struct fw_header),
833 (u8 *)&fwdata->fw_hdr);
834
835 dlen = le32_to_cpu(fwdata->fw_hdr.data_len);
836 dnld_cmd = le32_to_cpu(fwdata->fw_hdr.dnld_cmd);
837 tlen += sizeof(struct fw_header);
838
839 if (firmware)
840 memcpy(fwdata->data, &firmware[tlen], dlen);
841 else
842 mwifiex_get_fw_data(adapter, tlen, dlen,
843 (u8 *)fwdata->data);
844
845 fwdata->seq_num = cpu_to_le32(fw_seqnum);
846 tlen += dlen;
847 }
848
849 /* If the send/receive fails or CRC occurs then retry */
850 while (retries--) {
851 u8 *buf = (u8 *)fwdata;
852 u32 len = FW_DATA_XMIT_SIZE;
853
854 /* send the firmware block */
855 ret = mwifiex_write_data_sync(adapter, buf, &len,
856 MWIFIEX_USB_EP_CMD_EVENT,
857 MWIFIEX_USB_TIMEOUT);
858 if (ret) {
859 dev_err(adapter->dev,
860 "write_data_sync: failed: %d\n", ret);
861 continue;
862 }
863
864 buf = recv_buff;
865 len = FW_DNLD_RX_BUF_SIZE;
866
867 /* Receive the firmware block response */
868 ret = mwifiex_read_data_sync(adapter, buf, &len,
869 MWIFIEX_USB_EP_CMD_EVENT,
870 MWIFIEX_USB_TIMEOUT);
871 if (ret) {
872 dev_err(adapter->dev,
873 "read_data_sync: failed: %d\n", ret);
874 continue;
875 }
876
877 memcpy(&sync_fw, recv_buff,
878 sizeof(struct fw_sync_header));
879
880 /* check 1st firmware block resp for highest bit set */
881 if (check_winner) {
882 if (le32_to_cpu(sync_fw.cmd) & 0x80000000) {
883 dev_warn(adapter->dev,
884 "USB is not the winner %#x\n",
885 sync_fw.cmd);
886
887 /* returning success */
888 ret = 0;
889 goto cleanup;
890 }
891
892 dev_dbg(adapter->dev,
893 "USB is the winner, start to download FW\n");
894
895 check_winner = 0;
896 break;
897 }
898
899 /* check the firmware block response for CRC errors */
900 if (sync_fw.cmd) {
901 dev_err(adapter->dev,
902 "FW received block with CRC %#x\n",
903 sync_fw.cmd);
904 ret = -1;
905 continue;
906 }
907
908 retries = USB8797_FW_MAX_RETRY;
909 break;
910 }
911 fw_seqnum++;
912 } while ((dnld_cmd != FW_HAS_LAST_BLOCK) && retries);
913
914cleanup:
915 dev_dbg(adapter->dev, "%s: %d bytes downloaded\n", __func__, tlen);
916
917 kfree(recv_buff);
918 kfree(fwdata);
919
920 if (retries)
921 ret = 0;
922fw_exit:
923 return ret;
924}
925
926static int mwifiex_usb_dnld_fw(struct mwifiex_adapter *adapter,
927 struct mwifiex_fw_image *fw)
928{
929 int ret;
930 struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
931
932 if (card->usb_boot_state == USB8797_FW_DNLD) {
933 ret = mwifiex_prog_fw_w_helper(adapter, fw);
934 if (ret)
935 return -1;
936
937 /* Boot state changes after successful firmware download */
938 if (card->usb_boot_state == USB8797_FW_DNLD)
939 return -1;
940 }
941
942 ret = mwifiex_usb_rx_init(adapter);
943 if (!ret)
944 ret = mwifiex_usb_tx_init(adapter);
945
946 return ret;
947}
948
949static void mwifiex_submit_rx_urb(struct mwifiex_adapter *adapter, u8 ep)
950{
951 struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
952
953 skb_push(card->rx_cmd.skb, INTF_HEADER_LEN);
954 if ((ep == card->rx_cmd_ep) &&
955 (!atomic_read(&card->rx_cmd_urb_pending)))
956 mwifiex_usb_submit_rx_urb(&card->rx_cmd,
957 MWIFIEX_RX_CMD_BUF_SIZE);
958
959 return;
960}
961
962static int mwifiex_usb_cmd_event_complete(struct mwifiex_adapter *adapter,
963 struct sk_buff *skb)
964{
965 atomic_dec(&adapter->rx_pending);
966 mwifiex_submit_rx_urb(adapter, MWIFIEX_USB_EP_CMD_EVENT);
967
968 return 0;
969}
970
971static int mwifiex_usb_data_complete(struct mwifiex_adapter *adapter,
972 struct sk_buff *skb)
973{
974 atomic_dec(&adapter->rx_pending);
975 dev_kfree_skb_any(skb);
976
977 return 0;
978}
979
980/* This function wakes up the card. */
981static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
982{
983 /* Simulation of HS_AWAKE event */
984 adapter->pm_wakeup_fw_try = false;
985 adapter->pm_wakeup_card_req = false;
986 adapter->ps_state = PS_STATE_AWAKE;
987
988 return 0;
989}
990
991static struct mwifiex_if_ops usb_ops = {
992 .register_dev = mwifiex_register_dev,
993 .wakeup = mwifiex_pm_wakeup_card,
994 .wakeup_complete = mwifiex_pm_wakeup_card_complete,
995
996 /* USB specific */
997 .dnld_fw = mwifiex_usb_dnld_fw,
998 .cmdrsp_complete = mwifiex_usb_cmd_event_complete,
999 .event_complete = mwifiex_usb_cmd_event_complete,
1000 .data_complete = mwifiex_usb_data_complete,
1001 .host_to_card = mwifiex_usb_host_to_card,
1002};
1003
1004/* This function initializes the USB driver module.
1005 *
1006 * This initiates the semaphore and registers the device with
1007 * USB bus.
1008 */
1009static int mwifiex_usb_init_module(void)
1010{
1011 int ret;
1012
1013 pr_debug("Marvell USB8797 Driver\n");
1014
1015 sema_init(&add_remove_card_sem, 1);
1016
1017 ret = usb_register(&mwifiex_usb_driver);
1018 if (ret)
1019 pr_err("Driver register failed!\n");
1020 else
1021 pr_debug("info: Driver registered successfully!\n");
1022
1023 return ret;
1024}
1025
1026/* This function cleans up the USB driver.
1027 *
1028 * The following major steps are followed in .disconnect for cleanup:
1029 * - Resume the device if its suspended
1030 * - Disconnect the device if connected
1031 * - Shutdown the firmware
1032 * - Unregister the device from USB bus.
1033 */
1034static void mwifiex_usb_cleanup_module(void)
1035{
1036 if (!down_interruptible(&add_remove_card_sem))
1037 up(&add_remove_card_sem);
1038
1039 /* set the flag as user is removing this module */
1040 user_rmmod = 1;
1041
1042 usb_deregister(&mwifiex_usb_driver);
1043}
1044
1045module_init(mwifiex_usb_init_module);
1046module_exit(mwifiex_usb_cleanup_module);
1047
1048MODULE_AUTHOR("Marvell International Ltd.");
1049MODULE_DESCRIPTION("Marvell WiFi-Ex USB Driver version" USB_VERSION);
1050MODULE_VERSION(USB_VERSION);
1051MODULE_LICENSE("GPL v2");
1052MODULE_FIRMWARE("mrvl/usb8797_uapsta.bin");
diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h
new file mode 100644
index 000000000000..98c4316cd1a9
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/usb.h
@@ -0,0 +1,99 @@
1/*
2 * This file contains definitions for mwifiex USB interface driver.
3 *
4 * Copyright (C) 2012, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_USB_H
21#define _MWIFIEX_USB_H
22
23#include <linux/usb.h>
24
25#define USB8797_VID 0x1286
26#define USB8797_PID_1 0x2043
27#define USB8797_PID_2 0x2044
28
29#define USB8797_FW_DNLD 1
30#define USB8797_FW_READY 2
31#define USB8797_FW_MAX_RETRY 3
32
33#define MWIFIEX_TX_DATA_URB 6
34#define MWIFIEX_RX_DATA_URB 6
35#define MWIFIEX_USB_TIMEOUT 100
36
37#define USB8797_DEFAULT_FW_NAME "mrvl/usb8797_uapsta.bin"
38
39#define FW_DNLD_TX_BUF_SIZE 620
40#define FW_DNLD_RX_BUF_SIZE 2048
41#define FW_HAS_LAST_BLOCK 0x00000004
42
43#define FW_DATA_XMIT_SIZE \
44 (sizeof(struct fw_header) + dlen + sizeof(u32))
45
46struct urb_context {
47 struct mwifiex_adapter *adapter;
48 struct sk_buff *skb;
49 struct urb *urb;
50 u8 ep;
51};
52
53struct usb_card_rec {
54 struct mwifiex_adapter *adapter;
55 struct usb_device *udev;
56 struct usb_interface *intf;
57 u8 rx_cmd_ep;
58 struct urb_context rx_cmd;
59 atomic_t rx_cmd_urb_pending;
60 struct urb_context rx_data_list[MWIFIEX_RX_DATA_URB];
61 u8 usb_boot_state;
62 u8 rx_data_ep;
63 atomic_t rx_data_urb_pending;
64 u8 tx_data_ep;
65 u8 tx_cmd_ep;
66 atomic_t tx_data_urb_pending;
67 atomic_t tx_cmd_urb_pending;
68 int bulk_out_maxpktsize;
69 struct urb_context tx_cmd;
70 int tx_data_ix;
71 struct urb_context tx_data_list[MWIFIEX_TX_DATA_URB];
72};
73
74struct fw_header {
75 __le32 dnld_cmd;
76 __le32 base_addr;
77 __le32 data_len;
78 __le32 crc;
79};
80
81struct fw_sync_header {
82 __le32 cmd;
83 __le32 seq_num;
84};
85
86struct fw_data {
87 struct fw_header fw_hdr;
88 __le32 seq_num;
89 u8 data[1];
90};
91
92/* This function is called after the card has woken up. */
93static inline int
94mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
95{
96 return 0;
97}
98
99#endif /*_MWIFIEX_USB_H */
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 6b399976d6c8..2864c74bdb6f 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -167,6 +167,28 @@ int mwifiex_recv_packet(struct mwifiex_adapter *adapter, struct sk_buff *skb)
167 skb->dev = priv->netdev; 167 skb->dev = priv->netdev;
168 skb->protocol = eth_type_trans(skb, priv->netdev); 168 skb->protocol = eth_type_trans(skb, priv->netdev);
169 skb->ip_summed = CHECKSUM_NONE; 169 skb->ip_summed = CHECKSUM_NONE;
170
171 /* This is required only in case of 11n and USB as we alloc
172 * a buffer of 4K only if its 11N (to be able to receive 4K
173 * AMSDU packets). In case of SD we allocate buffers based
174 * on the size of packet and hence this is not needed.
175 *
176 * Modifying the truesize here as our allocation for each
177 * skb is 4K but we only receive 2K packets and this cause
178 * the kernel to start dropping packets in case where
179 * application has allocated buffer based on 2K size i.e.
180 * if there a 64K packet received (in IP fragments and
181 * application allocates 64K to receive this packet but
182 * this packet would almost double up because we allocate
183 * each 1.5K fragment in 4K and pass it up. As soon as the
184 * 64K limit hits kernel will start to drop rest of the
185 * fragments. Currently we fail the Filesndl-ht.scr script
186 * for UDP, hence this fix
187 */
188 if ((adapter->iface_type == MWIFIEX_USB) &&
189 (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
190 skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
191
170 priv->stats.rx_bytes += skb->len; 192 priv->stats.rx_bytes += skb->len;
171 priv->stats.rx_packets++; 193 priv->stats.rx_packets++;
172 if (in_interrupt()) 194 if (in_interrupt())
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 5a7316c6f125..429a1dee2d26 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -1120,11 +1120,19 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
1120 tx_info = MWIFIEX_SKB_TXCB(skb); 1120 tx_info = MWIFIEX_SKB_TXCB(skb);
1121 1121
1122 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); 1122 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
1123 tx_param.next_pkt_len = 1123
1124 ((skb_next) ? skb_next->len + 1124 if (adapter->iface_type == MWIFIEX_USB) {
1125 sizeof(struct txpd) : 0); 1125 adapter->data_sent = true;
1126 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, skb, 1126 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
1127 &tx_param); 1127 skb, NULL);
1128 } else {
1129 tx_param.next_pkt_len =
1130 ((skb_next) ? skb_next->len +
1131 sizeof(struct txpd) : 0);
1132 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
1133 skb, &tx_param);
1134 }
1135
1128 switch (ret) { 1136 switch (ret) {
1129 case -EBUSY: 1137 case -EBUSY:
1130 dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); 1138 dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index b48674b577e6..cf7bdc66f822 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -1235,7 +1235,7 @@ mwl8k_capture_bssid(struct mwl8k_priv *priv, struct ieee80211_hdr *wh)
1235{ 1235{
1236 return priv->capture_beacon && 1236 return priv->capture_beacon &&
1237 ieee80211_is_beacon(wh->frame_control) && 1237 ieee80211_is_beacon(wh->frame_control) &&
1238 !compare_ether_addr(wh->addr3, priv->capture_bssid); 1238 ether_addr_equal(wh->addr3, priv->capture_bssid);
1239} 1239}
1240 1240
1241static inline void mwl8k_save_beacon(struct ieee80211_hw *hw, 1241static inline void mwl8k_save_beacon(struct ieee80211_hw *hw,
@@ -5893,18 +5893,7 @@ static struct pci_driver mwl8k_driver = {
5893 .shutdown = __devexit_p(mwl8k_shutdown), 5893 .shutdown = __devexit_p(mwl8k_shutdown),
5894}; 5894};
5895 5895
5896static int __init mwl8k_init(void) 5896module_pci_driver(mwl8k_driver);
5897{
5898 return pci_register_driver(&mwl8k_driver);
5899}
5900
5901static void __exit mwl8k_exit(void)
5902{
5903 pci_unregister_driver(&mwl8k_driver);
5904}
5905
5906module_init(mwl8k_init);
5907module_exit(mwl8k_exit);
5908 5897
5909MODULE_DESCRIPTION(MWL8K_DESC); 5898MODULE_DESCRIPTION(MWL8K_DESC);
5910MODULE_VERSION(MWL8K_VERSION); 5899MODULE_VERSION(MWL8K_VERSION);
diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c
index 4df8cf64b56c..400a35217644 100644
--- a/drivers/net/wireless/orinoco/fw.c
+++ b/drivers/net/wireless/orinoco/fw.c
@@ -379,11 +379,8 @@ void orinoco_cache_fw(struct orinoco_private *priv, int ap)
379 379
380void orinoco_uncache_fw(struct orinoco_private *priv) 380void orinoco_uncache_fw(struct orinoco_private *priv)
381{ 381{
382 if (priv->cached_pri_fw) 382 release_firmware(priv->cached_pri_fw);
383 release_firmware(priv->cached_pri_fw); 383 release_firmware(priv->cached_fw);
384 if (priv->cached_fw)
385 release_firmware(priv->cached_fw);
386
387 priv->cached_pri_fw = NULL; 384 priv->cached_pri_fw = NULL;
388 priv->cached_fw = NULL; 385 priv->cached_fw = NULL;
389} 386}
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index ee8af1f047c8..7cffea795ad2 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -796,11 +796,14 @@ int p54_register_common(struct ieee80211_hw *dev, struct device *pdev)
796 dev_err(pdev, "Cannot register device (%d).\n", err); 796 dev_err(pdev, "Cannot register device (%d).\n", err);
797 return err; 797 return err;
798 } 798 }
799 priv->registered = true;
799 800
800#ifdef CONFIG_P54_LEDS 801#ifdef CONFIG_P54_LEDS
801 err = p54_init_leds(priv); 802 err = p54_init_leds(priv);
802 if (err) 803 if (err) {
804 p54_unregister_common(dev);
803 return err; 805 return err;
806 }
804#endif /* CONFIG_P54_LEDS */ 807#endif /* CONFIG_P54_LEDS */
805 808
806 dev_info(pdev, "is registered as '%s'\n", wiphy_name(dev->wiphy)); 809 dev_info(pdev, "is registered as '%s'\n", wiphy_name(dev->wiphy));
@@ -840,7 +843,11 @@ void p54_unregister_common(struct ieee80211_hw *dev)
840 p54_unregister_leds(priv); 843 p54_unregister_leds(priv);
841#endif /* CONFIG_P54_LEDS */ 844#endif /* CONFIG_P54_LEDS */
842 845
843 ieee80211_unregister_hw(dev); 846 if (priv->registered) {
847 priv->registered = false;
848 ieee80211_unregister_hw(dev);
849 }
850
844 mutex_destroy(&priv->conf_mutex); 851 mutex_destroy(&priv->conf_mutex);
845 mutex_destroy(&priv->eeprom_mutex); 852 mutex_destroy(&priv->eeprom_mutex);
846} 853}
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index 452fa3a64aa1..40b401ed6845 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -173,6 +173,7 @@ struct p54_common {
173 struct sk_buff_head tx_pending; 173 struct sk_buff_head tx_pending;
174 struct sk_buff_head tx_queue; 174 struct sk_buff_head tx_queue;
175 struct mutex conf_mutex; 175 struct mutex conf_mutex;
176 bool registered;
176 177
177 /* memory management (as seen by the firmware) */ 178 /* memory management (as seen by the firmware) */
178 u32 rx_start; 179 u32 rx_start;
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 45df728183fd..89318adc8c7f 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -667,15 +667,4 @@ static struct pci_driver p54p_driver = {
667 .driver.pm = P54P_PM_OPS, 667 .driver.pm = P54P_PM_OPS,
668}; 668};
669 669
670static int __init p54p_init(void) 670module_pci_driver(p54p_driver);
671{
672 return pci_register_driver(&p54p_driver);
673}
674
675static void __exit p54p_exit(void)
676{
677 pci_unregister_driver(&p54p_driver);
678}
679
680module_init(p54p_init);
681module_exit(p54p_exit);
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index f4d28c39aac7..e1eac830e2fc 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -117,21 +117,18 @@ static const struct {
117 u32 intf; 117 u32 intf;
118 enum p54u_hw_type type; 118 enum p54u_hw_type type;
119 const char *fw; 119 const char *fw;
120 const char *fw_legacy;
121 char hw[20]; 120 char hw[20];
122} p54u_fwlist[__NUM_P54U_HWTYPES] = { 121} p54u_fwlist[__NUM_P54U_HWTYPES] = {
123 { 122 {
124 .type = P54U_NET2280, 123 .type = P54U_NET2280,
125 .intf = FW_LM86, 124 .intf = FW_LM86,
126 .fw = "isl3886usb", 125 .fw = "isl3886usb",
127 .fw_legacy = "isl3890usb",
128 .hw = "ISL3886 + net2280", 126 .hw = "ISL3886 + net2280",
129 }, 127 },
130 { 128 {
131 .type = P54U_3887, 129 .type = P54U_3887,
132 .intf = FW_LM87, 130 .intf = FW_LM87,
133 .fw = "isl3887usb", 131 .fw = "isl3887usb",
134 .fw_legacy = "isl3887usb_bare",
135 .hw = "ISL3887", 132 .hw = "ISL3887",
136 }, 133 },
137}; 134};
@@ -208,6 +205,16 @@ static void p54u_free_urbs(struct ieee80211_hw *dev)
208 usb_kill_anchored_urbs(&priv->submitted); 205 usb_kill_anchored_urbs(&priv->submitted);
209} 206}
210 207
208static void p54u_stop(struct ieee80211_hw *dev)
209{
210 /*
211 * TODO: figure out how to reliably stop the 3887 and net2280 so
212 * the hardware is still usable next time we want to start it.
213 * until then, we just stop listening to the hardware..
214 */
215 p54u_free_urbs(dev);
216}
217
211static int p54u_init_urbs(struct ieee80211_hw *dev) 218static int p54u_init_urbs(struct ieee80211_hw *dev)
212{ 219{
213 struct p54u_priv *priv = dev->priv; 220 struct p54u_priv *priv = dev->priv;
@@ -257,6 +264,16 @@ static int p54u_init_urbs(struct ieee80211_hw *dev)
257 return ret; 264 return ret;
258} 265}
259 266
267static int p54u_open(struct ieee80211_hw *dev)
268{
269 /*
270 * TODO: Because we don't know how to reliably stop the 3887 and
271 * the isl3886+net2280, other than brutally cut off all
272 * communications. We have to reinitialize the urbs on every start.
273 */
274 return p54u_init_urbs(dev);
275}
276
260static __le32 p54u_lm87_chksum(const __le32 *data, size_t length) 277static __le32 p54u_lm87_chksum(const __le32 *data, size_t length)
261{ 278{
262 u32 chk = 0; 279 u32 chk = 0;
@@ -836,70 +853,137 @@ fail:
836 return err; 853 return err;
837} 854}
838 855
839static int p54u_load_firmware(struct ieee80211_hw *dev) 856static int p54_find_type(struct p54u_priv *priv)
840{ 857{
841 struct p54u_priv *priv = dev->priv; 858 int i;
842 int err, i;
843
844 BUILD_BUG_ON(ARRAY_SIZE(p54u_fwlist) != __NUM_P54U_HWTYPES);
845 859
846 for (i = 0; i < __NUM_P54U_HWTYPES; i++) 860 for (i = 0; i < __NUM_P54U_HWTYPES; i++)
847 if (p54u_fwlist[i].type == priv->hw_type) 861 if (p54u_fwlist[i].type == priv->hw_type)
848 break; 862 break;
849
850 if (i == __NUM_P54U_HWTYPES) 863 if (i == __NUM_P54U_HWTYPES)
851 return -EOPNOTSUPP; 864 return -EOPNOTSUPP;
852 865
853 err = request_firmware(&priv->fw, p54u_fwlist[i].fw, &priv->udev->dev); 866 return i;
854 if (err) { 867}
855 dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s "
856 "(%d)!\n", p54u_fwlist[i].fw, err);
857 868
858 err = request_firmware(&priv->fw, p54u_fwlist[i].fw_legacy, 869static int p54u_start_ops(struct p54u_priv *priv)
859 &priv->udev->dev); 870{
860 if (err) 871 struct ieee80211_hw *dev = priv->common.hw;
861 return err; 872 int ret;
862 }
863 873
864 err = p54_parse_firmware(dev, priv->fw); 874 ret = p54_parse_firmware(dev, priv->fw);
865 if (err) 875 if (ret)
866 goto out; 876 goto err_out;
877
878 ret = p54_find_type(priv);
879 if (ret < 0)
880 goto err_out;
867 881
868 if (priv->common.fw_interface != p54u_fwlist[i].intf) { 882 if (priv->common.fw_interface != p54u_fwlist[ret].intf) {
869 dev_err(&priv->udev->dev, "wrong firmware, please get " 883 dev_err(&priv->udev->dev, "wrong firmware, please get "
870 "a firmware for \"%s\" and try again.\n", 884 "a firmware for \"%s\" and try again.\n",
871 p54u_fwlist[i].hw); 885 p54u_fwlist[ret].hw);
872 err = -EINVAL; 886 ret = -ENODEV;
887 goto err_out;
873 } 888 }
874 889
875out: 890 ret = priv->upload_fw(dev);
876 if (err) 891 if (ret)
877 release_firmware(priv->fw); 892 goto err_out;
878 893
879 return err; 894 ret = p54u_open(dev);
895 if (ret)
896 goto err_out;
897
898 ret = p54_read_eeprom(dev);
899 if (ret)
900 goto err_stop;
901
902 p54u_stop(dev);
903
904 ret = p54_register_common(dev, &priv->udev->dev);
905 if (ret)
906 goto err_stop;
907
908 return 0;
909
910err_stop:
911 p54u_stop(dev);
912
913err_out:
914 /*
915 * p54u_disconnect will do the rest of the
916 * cleanup
917 */
918 return ret;
880} 919}
881 920
882static int p54u_open(struct ieee80211_hw *dev) 921static void p54u_load_firmware_cb(const struct firmware *firmware,
922 void *context)
883{ 923{
884 struct p54u_priv *priv = dev->priv; 924 struct p54u_priv *priv = context;
925 struct usb_device *udev = priv->udev;
885 int err; 926 int err;
886 927
887 err = p54u_init_urbs(dev); 928 complete(&priv->fw_wait_load);
888 if (err) { 929 if (firmware) {
889 return err; 930 priv->fw = firmware;
931 err = p54u_start_ops(priv);
932 } else {
933 err = -ENOENT;
934 dev_err(&udev->dev, "Firmware not found.\n");
890 } 935 }
891 936
892 priv->common.open = p54u_init_urbs; 937 if (err) {
938 struct device *parent = priv->udev->dev.parent;
893 939
894 return 0; 940 dev_err(&udev->dev, "failed to initialize device (%d)\n", err);
941
942 if (parent)
943 device_lock(parent);
944
945 device_release_driver(&udev->dev);
946 /*
947 * At this point p54u_disconnect has already freed
948 * the "priv" context. Do not use it anymore!
949 */
950 priv = NULL;
951
952 if (parent)
953 device_unlock(parent);
954 }
955
956 usb_put_dev(udev);
895} 957}
896 958
897static void p54u_stop(struct ieee80211_hw *dev) 959static int p54u_load_firmware(struct ieee80211_hw *dev,
960 struct usb_interface *intf)
898{ 961{
899 /* TODO: figure out how to reliably stop the 3887 and net2280 so 962 struct usb_device *udev = interface_to_usbdev(intf);
900 the hardware is still usable next time we want to start it. 963 struct p54u_priv *priv = dev->priv;
901 until then, we just stop listening to the hardware.. */ 964 struct device *device = &udev->dev;
902 p54u_free_urbs(dev); 965 int err, i;
966
967 BUILD_BUG_ON(ARRAY_SIZE(p54u_fwlist) != __NUM_P54U_HWTYPES);
968
969 init_completion(&priv->fw_wait_load);
970 i = p54_find_type(priv);
971 if (i < 0)
972 return i;
973
974 dev_info(&priv->udev->dev, "Loading firmware file %s\n",
975 p54u_fwlist[i].fw);
976
977 usb_get_dev(udev);
978 err = request_firmware_nowait(THIS_MODULE, 1, p54u_fwlist[i].fw,
979 device, GFP_KERNEL, priv,
980 p54u_load_firmware_cb);
981 if (err) {
982 dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s "
983 "(%d)!\n", p54u_fwlist[i].fw, err);
984 }
985
986 return err;
903} 987}
904 988
905static int __devinit p54u_probe(struct usb_interface *intf, 989static int __devinit p54u_probe(struct usb_interface *intf,
@@ -969,33 +1053,7 @@ static int __devinit p54u_probe(struct usb_interface *intf,
969 priv->common.tx = p54u_tx_net2280; 1053 priv->common.tx = p54u_tx_net2280;
970 priv->upload_fw = p54u_upload_firmware_net2280; 1054 priv->upload_fw = p54u_upload_firmware_net2280;
971 } 1055 }
972 err = p54u_load_firmware(dev); 1056 err = p54u_load_firmware(dev, intf);
973 if (err)
974 goto err_free_dev;
975
976 err = priv->upload_fw(dev);
977 if (err)
978 goto err_free_fw;
979
980 p54u_open(dev);
981 err = p54_read_eeprom(dev);
982 p54u_stop(dev);
983 if (err)
984 goto err_free_fw;
985
986 err = p54_register_common(dev, &udev->dev);
987 if (err)
988 goto err_free_fw;
989
990 return 0;
991
992err_free_fw:
993 release_firmware(priv->fw);
994
995err_free_dev:
996 p54_free_common(dev);
997 usb_set_intfdata(intf, NULL);
998 usb_put_dev(udev);
999 return err; 1057 return err;
1000} 1058}
1001 1059
@@ -1007,9 +1065,10 @@ static void __devexit p54u_disconnect(struct usb_interface *intf)
1007 if (!dev) 1065 if (!dev)
1008 return; 1066 return;
1009 1067
1068 priv = dev->priv;
1069 wait_for_completion(&priv->fw_wait_load);
1010 p54_unregister_common(dev); 1070 p54_unregister_common(dev);
1011 1071
1012 priv = dev->priv;
1013 usb_put_dev(interface_to_usbdev(intf)); 1072 usb_put_dev(interface_to_usbdev(intf));
1014 release_firmware(priv->fw); 1073 release_firmware(priv->fw);
1015 p54_free_common(dev); 1074 p54_free_common(dev);
@@ -1072,7 +1131,7 @@ static struct usb_driver p54u_driver = {
1072 .name = "p54usb", 1131 .name = "p54usb",
1073 .id_table = p54u_table, 1132 .id_table = p54u_table,
1074 .probe = p54u_probe, 1133 .probe = p54u_probe,
1075 .disconnect = p54u_disconnect, 1134 .disconnect = __devexit_p(p54u_disconnect),
1076 .pre_reset = p54u_pre_reset, 1135 .pre_reset = p54u_pre_reset,
1077 .post_reset = p54u_post_reset, 1136 .post_reset = p54u_post_reset,
1078#ifdef CONFIG_PM 1137#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/p54/p54usb.h
index ed4034ade59a..d273be7272b9 100644
--- a/drivers/net/wireless/p54/p54usb.h
+++ b/drivers/net/wireless/p54/p54usb.h
@@ -143,6 +143,9 @@ struct p54u_priv {
143 struct sk_buff_head rx_queue; 143 struct sk_buff_head rx_queue;
144 struct usb_anchor submitted; 144 struct usb_anchor submitted;
145 const struct firmware *fw; 145 const struct firmware *fw;
146
147 /* asynchronous firmware callback */
148 struct completion fw_wait_load;
146}; 149};
147 150
148#endif /* P54USB_H */ 151#endif /* P54USB_H */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index a08a6f0e4dd1..82a1cac920bd 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -308,7 +308,7 @@ static void p54_pspoll_workaround(struct p54_common *priv, struct sk_buff *skb)
308 return; 308 return;
309 309
310 /* only consider beacons from the associated BSSID */ 310 /* only consider beacons from the associated BSSID */
311 if (compare_ether_addr(hdr->addr3, priv->bssid)) 311 if (!ether_addr_equal(hdr->addr3, priv->bssid))
312 return; 312 return;
313 313
314 tim = p54_find_ie(skb, WLAN_EID_TIM); 314 tim = p54_find_ie(skb, WLAN_EID_TIM);
@@ -914,8 +914,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
914 txhdr->hw_queue = queue; 914 txhdr->hw_queue = queue;
915 txhdr->backlog = priv->tx_stats[queue].len - 1; 915 txhdr->backlog = priv->tx_stats[queue].len - 1;
916 memset(txhdr->durations, 0, sizeof(txhdr->durations)); 916 memset(txhdr->durations, 0, sizeof(txhdr->durations));
917 txhdr->tx_antenna = ((info->antenna_sel_tx == 0) ? 917 txhdr->tx_antenna = 2 & priv->tx_diversity_mask;
918 2 : info->antenna_sel_tx - 1) & priv->tx_diversity_mask;
919 if (priv->rxhw == 5) { 918 if (priv->rxhw == 5) {
920 txhdr->longbow.cts_rate = cts_rate; 919 txhdr->longbow.cts_rate = cts_rate;
921 txhdr->longbow.output_power = cpu_to_le16(priv->output_power); 920 txhdr->longbow.output_power = cpu_to_le16(priv->output_power);
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index 9b796cae4afe..a01606b36e03 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -693,8 +693,6 @@ mgt_update_addr(islpci_private *priv)
693 return ret; 693 return ret;
694} 694}
695 695
696#define VEC_SIZE(a) ARRAY_SIZE(a)
697
698int 696int
699mgt_commit(islpci_private *priv) 697mgt_commit(islpci_private *priv)
700{ 698{
@@ -704,10 +702,10 @@ mgt_commit(islpci_private *priv)
704 if (islpci_get_state(priv) < PRV_STATE_INIT) 702 if (islpci_get_state(priv) < PRV_STATE_INIT)
705 return 0; 703 return 0;
706 704
707 rvalue = mgt_commit_list(priv, commit_part1, VEC_SIZE(commit_part1)); 705 rvalue = mgt_commit_list(priv, commit_part1, ARRAY_SIZE(commit_part1));
708 706
709 if (priv->iw_mode != IW_MODE_MONITOR) 707 if (priv->iw_mode != IW_MODE_MONITOR)
710 rvalue |= mgt_commit_list(priv, commit_part2, VEC_SIZE(commit_part2)); 708 rvalue |= mgt_commit_list(priv, commit_part2, ARRAY_SIZE(commit_part2));
711 709
712 u = OID_INL_MODE; 710 u = OID_INL_MODE;
713 rvalue |= mgt_commit_list(priv, &u, 1); 711 rvalue |= mgt_commit_list(priv, &u, 1);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index d66e2980bc27..b91d1bb30b41 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -88,49 +88,6 @@ module_param_named(workaround_interval, modparam_workaround_interval,
88MODULE_PARM_DESC(workaround_interval, 88MODULE_PARM_DESC(workaround_interval,
89 "set stall workaround interval in msecs (0=disabled) (default: 0)"); 89 "set stall workaround interval in msecs (0=disabled) (default: 0)");
90 90
91
92/* various RNDIS OID defs */
93#define OID_GEN_LINK_SPEED cpu_to_le32(0x00010107)
94#define OID_GEN_RNDIS_CONFIG_PARAMETER cpu_to_le32(0x0001021b)
95
96#define OID_GEN_XMIT_OK cpu_to_le32(0x00020101)
97#define OID_GEN_RCV_OK cpu_to_le32(0x00020102)
98#define OID_GEN_XMIT_ERROR cpu_to_le32(0x00020103)
99#define OID_GEN_RCV_ERROR cpu_to_le32(0x00020104)
100#define OID_GEN_RCV_NO_BUFFER cpu_to_le32(0x00020105)
101
102#define OID_802_3_CURRENT_ADDRESS cpu_to_le32(0x01010102)
103#define OID_802_3_MULTICAST_LIST cpu_to_le32(0x01010103)
104#define OID_802_3_MAXIMUM_LIST_SIZE cpu_to_le32(0x01010104)
105
106#define OID_802_11_BSSID cpu_to_le32(0x0d010101)
107#define OID_802_11_SSID cpu_to_le32(0x0d010102)
108#define OID_802_11_INFRASTRUCTURE_MODE cpu_to_le32(0x0d010108)
109#define OID_802_11_ADD_WEP cpu_to_le32(0x0d010113)
110#define OID_802_11_REMOVE_WEP cpu_to_le32(0x0d010114)
111#define OID_802_11_DISASSOCIATE cpu_to_le32(0x0d010115)
112#define OID_802_11_AUTHENTICATION_MODE cpu_to_le32(0x0d010118)
113#define OID_802_11_PRIVACY_FILTER cpu_to_le32(0x0d010119)
114#define OID_802_11_BSSID_LIST_SCAN cpu_to_le32(0x0d01011a)
115#define OID_802_11_ENCRYPTION_STATUS cpu_to_le32(0x0d01011b)
116#define OID_802_11_ADD_KEY cpu_to_le32(0x0d01011d)
117#define OID_802_11_REMOVE_KEY cpu_to_le32(0x0d01011e)
118#define OID_802_11_ASSOCIATION_INFORMATION cpu_to_le32(0x0d01011f)
119#define OID_802_11_CAPABILITY cpu_to_le32(0x0d010122)
120#define OID_802_11_PMKID cpu_to_le32(0x0d010123)
121#define OID_802_11_NETWORK_TYPES_SUPPORTED cpu_to_le32(0x0d010203)
122#define OID_802_11_NETWORK_TYPE_IN_USE cpu_to_le32(0x0d010204)
123#define OID_802_11_TX_POWER_LEVEL cpu_to_le32(0x0d010205)
124#define OID_802_11_RSSI cpu_to_le32(0x0d010206)
125#define OID_802_11_RSSI_TRIGGER cpu_to_le32(0x0d010207)
126#define OID_802_11_FRAGMENTATION_THRESHOLD cpu_to_le32(0x0d010209)
127#define OID_802_11_RTS_THRESHOLD cpu_to_le32(0x0d01020a)
128#define OID_802_11_SUPPORTED_RATES cpu_to_le32(0x0d01020e)
129#define OID_802_11_CONFIGURATION cpu_to_le32(0x0d010211)
130#define OID_802_11_POWER_MODE cpu_to_le32(0x0d010216)
131#define OID_802_11_BSSID_LIST cpu_to_le32(0x0d010217)
132
133
134/* Typical noise/maximum signal level values taken from ndiswrapper iw_ndis.h */ 91/* Typical noise/maximum signal level values taken from ndiswrapper iw_ndis.h */
135#define WL_NOISE -96 /* typical noise level in dBm */ 92#define WL_NOISE -96 /* typical noise level in dBm */
136#define WL_SIGMAX -32 /* typical maximum signal level in dBm */ 93#define WL_SIGMAX -32 /* typical maximum signal level in dBm */
@@ -149,12 +106,6 @@ MODULE_PARM_DESC(workaround_interval,
149#define BCM4320_DEFAULT_TXPOWER_DBM_50 10 106#define BCM4320_DEFAULT_TXPOWER_DBM_50 10
150#define BCM4320_DEFAULT_TXPOWER_DBM_25 7 107#define BCM4320_DEFAULT_TXPOWER_DBM_25 7
151 108
152
153/* codes for "status" field of completion messages */
154#define RNDIS_STATUS_ADAPTER_NOT_READY cpu_to_le32(0xc0010011)
155#define RNDIS_STATUS_ADAPTER_NOT_OPEN cpu_to_le32(0xc0010012)
156
157
158/* Known device types */ 109/* Known device types */
159#define RNDIS_UNKNOWN 0 110#define RNDIS_UNKNOWN 0
160#define RNDIS_BCM4320A 1 111#define RNDIS_BCM4320A 1
@@ -515,7 +466,7 @@ struct rndis_wlan_private {
515 int infra_mode; 466 int infra_mode;
516 bool connected; 467 bool connected;
517 u8 bssid[ETH_ALEN]; 468 u8 bssid[ETH_ALEN];
518 __le32 current_command_oid; 469 u32 current_command_oid;
519 470
520 /* encryption stuff */ 471 /* encryption stuff */
521 u8 encr_tx_key_index; 472 u8 encr_tx_key_index;
@@ -670,63 +621,63 @@ static int rndis_akm_suite_to_key_mgmt(u32 akm_suite)
670} 621}
671 622
672#ifdef DEBUG 623#ifdef DEBUG
673static const char *oid_to_string(__le32 oid) 624static const char *oid_to_string(u32 oid)
674{ 625{
675 switch (oid) { 626 switch (oid) {
676#define OID_STR(oid) case oid: return(#oid) 627#define OID_STR(oid) case oid: return(#oid)
677 /* from rndis_host.h */ 628 /* from rndis_host.h */
678 OID_STR(OID_802_3_PERMANENT_ADDRESS); 629 OID_STR(RNDIS_OID_802_3_PERMANENT_ADDRESS);
679 OID_STR(OID_GEN_MAXIMUM_FRAME_SIZE); 630 OID_STR(RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE);
680 OID_STR(OID_GEN_CURRENT_PACKET_FILTER); 631 OID_STR(RNDIS_OID_GEN_CURRENT_PACKET_FILTER);
681 OID_STR(OID_GEN_PHYSICAL_MEDIUM); 632 OID_STR(RNDIS_OID_GEN_PHYSICAL_MEDIUM);
682 633
683 /* from rndis_wlan.c */ 634 /* from rndis_wlan.c */
684 OID_STR(OID_GEN_LINK_SPEED); 635 OID_STR(RNDIS_OID_GEN_LINK_SPEED);
685 OID_STR(OID_GEN_RNDIS_CONFIG_PARAMETER); 636 OID_STR(RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER);
686 637
687 OID_STR(OID_GEN_XMIT_OK); 638 OID_STR(RNDIS_OID_GEN_XMIT_OK);
688 OID_STR(OID_GEN_RCV_OK); 639 OID_STR(RNDIS_OID_GEN_RCV_OK);
689 OID_STR(OID_GEN_XMIT_ERROR); 640 OID_STR(RNDIS_OID_GEN_XMIT_ERROR);
690 OID_STR(OID_GEN_RCV_ERROR); 641 OID_STR(RNDIS_OID_GEN_RCV_ERROR);
691 OID_STR(OID_GEN_RCV_NO_BUFFER); 642 OID_STR(RNDIS_OID_GEN_RCV_NO_BUFFER);
692 643
693 OID_STR(OID_802_3_CURRENT_ADDRESS); 644 OID_STR(RNDIS_OID_802_3_CURRENT_ADDRESS);
694 OID_STR(OID_802_3_MULTICAST_LIST); 645 OID_STR(RNDIS_OID_802_3_MULTICAST_LIST);
695 OID_STR(OID_802_3_MAXIMUM_LIST_SIZE); 646 OID_STR(RNDIS_OID_802_3_MAXIMUM_LIST_SIZE);
696 647
697 OID_STR(OID_802_11_BSSID); 648 OID_STR(RNDIS_OID_802_11_BSSID);
698 OID_STR(OID_802_11_SSID); 649 OID_STR(RNDIS_OID_802_11_SSID);
699 OID_STR(OID_802_11_INFRASTRUCTURE_MODE); 650 OID_STR(RNDIS_OID_802_11_INFRASTRUCTURE_MODE);
700 OID_STR(OID_802_11_ADD_WEP); 651 OID_STR(RNDIS_OID_802_11_ADD_WEP);
701 OID_STR(OID_802_11_REMOVE_WEP); 652 OID_STR(RNDIS_OID_802_11_REMOVE_WEP);
702 OID_STR(OID_802_11_DISASSOCIATE); 653 OID_STR(RNDIS_OID_802_11_DISASSOCIATE);
703 OID_STR(OID_802_11_AUTHENTICATION_MODE); 654 OID_STR(RNDIS_OID_802_11_AUTHENTICATION_MODE);
704 OID_STR(OID_802_11_PRIVACY_FILTER); 655 OID_STR(RNDIS_OID_802_11_PRIVACY_FILTER);
705 OID_STR(OID_802_11_BSSID_LIST_SCAN); 656 OID_STR(RNDIS_OID_802_11_BSSID_LIST_SCAN);
706 OID_STR(OID_802_11_ENCRYPTION_STATUS); 657 OID_STR(RNDIS_OID_802_11_ENCRYPTION_STATUS);
707 OID_STR(OID_802_11_ADD_KEY); 658 OID_STR(RNDIS_OID_802_11_ADD_KEY);
708 OID_STR(OID_802_11_REMOVE_KEY); 659 OID_STR(RNDIS_OID_802_11_REMOVE_KEY);
709 OID_STR(OID_802_11_ASSOCIATION_INFORMATION); 660 OID_STR(RNDIS_OID_802_11_ASSOCIATION_INFORMATION);
710 OID_STR(OID_802_11_CAPABILITY); 661 OID_STR(RNDIS_OID_802_11_CAPABILITY);
711 OID_STR(OID_802_11_PMKID); 662 OID_STR(RNDIS_OID_802_11_PMKID);
712 OID_STR(OID_802_11_NETWORK_TYPES_SUPPORTED); 663 OID_STR(RNDIS_OID_802_11_NETWORK_TYPES_SUPPORTED);
713 OID_STR(OID_802_11_NETWORK_TYPE_IN_USE); 664 OID_STR(RNDIS_OID_802_11_NETWORK_TYPE_IN_USE);
714 OID_STR(OID_802_11_TX_POWER_LEVEL); 665 OID_STR(RNDIS_OID_802_11_TX_POWER_LEVEL);
715 OID_STR(OID_802_11_RSSI); 666 OID_STR(RNDIS_OID_802_11_RSSI);
716 OID_STR(OID_802_11_RSSI_TRIGGER); 667 OID_STR(RNDIS_OID_802_11_RSSI_TRIGGER);
717 OID_STR(OID_802_11_FRAGMENTATION_THRESHOLD); 668 OID_STR(RNDIS_OID_802_11_FRAGMENTATION_THRESHOLD);
718 OID_STR(OID_802_11_RTS_THRESHOLD); 669 OID_STR(RNDIS_OID_802_11_RTS_THRESHOLD);
719 OID_STR(OID_802_11_SUPPORTED_RATES); 670 OID_STR(RNDIS_OID_802_11_SUPPORTED_RATES);
720 OID_STR(OID_802_11_CONFIGURATION); 671 OID_STR(RNDIS_OID_802_11_CONFIGURATION);
721 OID_STR(OID_802_11_POWER_MODE); 672 OID_STR(RNDIS_OID_802_11_POWER_MODE);
722 OID_STR(OID_802_11_BSSID_LIST); 673 OID_STR(RNDIS_OID_802_11_BSSID_LIST);
723#undef OID_STR 674#undef OID_STR
724 } 675 }
725 676
726 return "?"; 677 return "?";
727} 678}
728#else 679#else
729static const char *oid_to_string(__le32 oid) 680static const char *oid_to_string(u32 oid)
730{ 681{
731 return "?"; 682 return "?";
732} 683}
@@ -736,7 +687,7 @@ static const char *oid_to_string(__le32 oid)
736static int rndis_error_status(__le32 rndis_status) 687static int rndis_error_status(__le32 rndis_status)
737{ 688{
738 int ret = -EINVAL; 689 int ret = -EINVAL;
739 switch (rndis_status) { 690 switch (le32_to_cpu(rndis_status)) {
740 case RNDIS_STATUS_SUCCESS: 691 case RNDIS_STATUS_SUCCESS:
741 ret = 0; 692 ret = 0;
742 break; 693 break;
@@ -755,7 +706,7 @@ static int rndis_error_status(__le32 rndis_status)
755 return ret; 706 return ret;
756} 707}
757 708
758static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len) 709static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
759{ 710{
760 struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev); 711 struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev);
761 union { 712 union {
@@ -782,9 +733,9 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
782 mutex_lock(&priv->command_lock); 733 mutex_lock(&priv->command_lock);
783 734
784 memset(u.get, 0, sizeof *u.get); 735 memset(u.get, 0, sizeof *u.get);
785 u.get->msg_type = RNDIS_MSG_QUERY; 736 u.get->msg_type = cpu_to_le32(RNDIS_MSG_QUERY);
786 u.get->msg_len = cpu_to_le32(sizeof *u.get); 737 u.get->msg_len = cpu_to_le32(sizeof *u.get);
787 u.get->oid = oid; 738 u.get->oid = cpu_to_le32(oid);
788 739
789 priv->current_command_oid = oid; 740 priv->current_command_oid = oid;
790 ret = rndis_command(dev, u.header, buflen); 741 ret = rndis_command(dev, u.header, buflen);
@@ -839,7 +790,7 @@ exit_unlock:
839 return ret; 790 return ret;
840} 791}
841 792
842static int rndis_set_oid(struct usbnet *dev, __le32 oid, const void *data, 793static int rndis_set_oid(struct usbnet *dev, u32 oid, const void *data,
843 int len) 794 int len)
844{ 795{
845 struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev); 796 struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev);
@@ -866,9 +817,9 @@ static int rndis_set_oid(struct usbnet *dev, __le32 oid, const void *data,
866 mutex_lock(&priv->command_lock); 817 mutex_lock(&priv->command_lock);
867 818
868 memset(u.set, 0, sizeof *u.set); 819 memset(u.set, 0, sizeof *u.set);
869 u.set->msg_type = RNDIS_MSG_SET; 820 u.set->msg_type = cpu_to_le32(RNDIS_MSG_SET);
870 u.set->msg_len = cpu_to_le32(sizeof(*u.set) + len); 821 u.set->msg_len = cpu_to_le32(sizeof(*u.set) + len);
871 u.set->oid = oid; 822 u.set->oid = cpu_to_le32(oid);
872 u.set->len = cpu_to_le32(len); 823 u.set->len = cpu_to_le32(len);
873 u.set->offset = cpu_to_le32(sizeof(*u.set) - 8); 824 u.set->offset = cpu_to_le32(sizeof(*u.set) - 8);
874 u.set->handle = cpu_to_le32(0); 825 u.set->handle = cpu_to_le32(0);
@@ -908,7 +859,7 @@ static int rndis_reset(struct usbnet *usbdev)
908 859
909 reset = (void *)priv->command_buffer; 860 reset = (void *)priv->command_buffer;
910 memset(reset, 0, sizeof(*reset)); 861 memset(reset, 0, sizeof(*reset));
911 reset->msg_type = RNDIS_MSG_RESET; 862 reset->msg_type = cpu_to_le32(RNDIS_MSG_RESET);
912 reset->msg_len = cpu_to_le32(sizeof(*reset)); 863 reset->msg_len = cpu_to_le32(sizeof(*reset));
913 priv->current_command_oid = 0; 864 priv->current_command_oid = 0;
914 ret = rndis_command(usbdev, (void *)reset, CONTROL_BUFFER_SIZE); 865 ret = rndis_command(usbdev, (void *)reset, CONTROL_BUFFER_SIZE);
@@ -994,7 +945,7 @@ static int rndis_set_config_parameter(struct usbnet *dev, char *param,
994 } 945 }
995#endif 946#endif
996 947
997 ret = rndis_set_oid(dev, OID_GEN_RNDIS_CONFIG_PARAMETER, 948 ret = rndis_set_oid(dev, RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER,
998 infobuf, info_len); 949 infobuf, info_len);
999 if (ret != 0) 950 if (ret != 0)
1000 netdev_dbg(dev->net, "setting rndis config parameter failed, %d\n", 951 netdev_dbg(dev->net, "setting rndis config parameter failed, %d\n",
@@ -1031,9 +982,9 @@ static int rndis_start_bssid_list_scan(struct usbnet *usbdev)
1031{ 982{
1032 __le32 tmp; 983 __le32 tmp;
1033 984
1034 /* Note: OID_802_11_BSSID_LIST_SCAN clears internal BSS list. */ 985 /* Note: RNDIS_OID_802_11_BSSID_LIST_SCAN clears internal BSS list. */
1035 tmp = cpu_to_le32(1); 986 tmp = cpu_to_le32(1);
1036 return rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp, 987 return rndis_set_oid(usbdev, RNDIS_OID_802_11_BSSID_LIST_SCAN, &tmp,
1037 sizeof(tmp)); 988 sizeof(tmp));
1038} 989}
1039 990
@@ -1042,7 +993,8 @@ static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid)
1042 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 993 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1043 int ret; 994 int ret;
1044 995
1045 ret = rndis_set_oid(usbdev, OID_802_11_SSID, ssid, sizeof(*ssid)); 996 ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_SSID,
997 ssid, sizeof(*ssid));
1046 if (ret < 0) { 998 if (ret < 0) {
1047 netdev_warn(usbdev->net, "setting SSID failed (%08X)\n", ret); 999 netdev_warn(usbdev->net, "setting SSID failed (%08X)\n", ret);
1048 return ret; 1000 return ret;
@@ -1059,7 +1011,8 @@ static int set_bssid(struct usbnet *usbdev, const u8 *bssid)
1059{ 1011{
1060 int ret; 1012 int ret;
1061 1013
1062 ret = rndis_set_oid(usbdev, OID_802_11_BSSID, bssid, ETH_ALEN); 1014 ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_BSSID,
1015 bssid, ETH_ALEN);
1063 if (ret < 0) { 1016 if (ret < 0) {
1064 netdev_warn(usbdev->net, "setting BSSID[%pM] failed (%08X)\n", 1017 netdev_warn(usbdev->net, "setting BSSID[%pM] failed (%08X)\n",
1065 bssid, ret); 1018 bssid, ret);
@@ -1083,7 +1036,8 @@ static int get_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN])
1083 int ret, len; 1036 int ret, len;
1084 1037
1085 len = ETH_ALEN; 1038 len = ETH_ALEN;
1086 ret = rndis_query_oid(usbdev, OID_802_11_BSSID, bssid, &len); 1039 ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_BSSID,
1040 bssid, &len);
1087 1041
1088 if (ret != 0) 1042 if (ret != 0)
1089 memset(bssid, 0, ETH_ALEN); 1043 memset(bssid, 0, ETH_ALEN);
@@ -1094,8 +1048,9 @@ static int get_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN])
1094static int get_association_info(struct usbnet *usbdev, 1048static int get_association_info(struct usbnet *usbdev,
1095 struct ndis_80211_assoc_info *info, int len) 1049 struct ndis_80211_assoc_info *info, int len)
1096{ 1050{
1097 return rndis_query_oid(usbdev, OID_802_11_ASSOCIATION_INFORMATION, 1051 return rndis_query_oid(usbdev,
1098 info, &len); 1052 RNDIS_OID_802_11_ASSOCIATION_INFORMATION,
1053 info, &len);
1099} 1054}
1100 1055
1101static bool is_associated(struct usbnet *usbdev) 1056static bool is_associated(struct usbnet *usbdev)
@@ -1119,7 +1074,9 @@ static int disassociate(struct usbnet *usbdev, bool reset_ssid)
1119 int i, ret = 0; 1074 int i, ret = 0;
1120 1075
1121 if (priv->radio_on) { 1076 if (priv->radio_on) {
1122 ret = rndis_set_oid(usbdev, OID_802_11_DISASSOCIATE, NULL, 0); 1077 ret = rndis_set_oid(usbdev,
1078 RNDIS_OID_802_11_DISASSOCIATE,
1079 NULL, 0);
1123 if (ret == 0) { 1080 if (ret == 0) {
1124 priv->radio_on = false; 1081 priv->radio_on = false;
1125 netdev_dbg(usbdev->net, "%s(): radio_on = false\n", 1082 netdev_dbg(usbdev->net, "%s(): radio_on = false\n",
@@ -1181,8 +1138,9 @@ static int set_auth_mode(struct usbnet *usbdev, u32 wpa_version,
1181 return -ENOTSUPP; 1138 return -ENOTSUPP;
1182 1139
1183 tmp = cpu_to_le32(auth_mode); 1140 tmp = cpu_to_le32(auth_mode);
1184 ret = rndis_set_oid(usbdev, OID_802_11_AUTHENTICATION_MODE, &tmp, 1141 ret = rndis_set_oid(usbdev,
1185 sizeof(tmp)); 1142 RNDIS_OID_802_11_AUTHENTICATION_MODE,
1143 &tmp, sizeof(tmp));
1186 if (ret != 0) { 1144 if (ret != 0) {
1187 netdev_warn(usbdev->net, "setting auth mode failed (%08X)\n", 1145 netdev_warn(usbdev->net, "setting auth mode failed (%08X)\n",
1188 ret); 1146 ret);
@@ -1208,8 +1166,9 @@ static int set_priv_filter(struct usbnet *usbdev)
1208 else 1166 else
1209 tmp = cpu_to_le32(NDIS_80211_PRIV_ACCEPT_ALL); 1167 tmp = cpu_to_le32(NDIS_80211_PRIV_ACCEPT_ALL);
1210 1168
1211 return rndis_set_oid(usbdev, OID_802_11_PRIVACY_FILTER, &tmp, 1169 return rndis_set_oid(usbdev,
1212 sizeof(tmp)); 1170 RNDIS_OID_802_11_PRIVACY_FILTER, &tmp,
1171 sizeof(tmp));
1213} 1172}
1214 1173
1215static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise) 1174static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
@@ -1234,8 +1193,9 @@ static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
1234 encr_mode = NDIS_80211_ENCR_DISABLED; 1193 encr_mode = NDIS_80211_ENCR_DISABLED;
1235 1194
1236 tmp = cpu_to_le32(encr_mode); 1195 tmp = cpu_to_le32(encr_mode);
1237 ret = rndis_set_oid(usbdev, OID_802_11_ENCRYPTION_STATUS, &tmp, 1196 ret = rndis_set_oid(usbdev,
1238 sizeof(tmp)); 1197 RNDIS_OID_802_11_ENCRYPTION_STATUS, &tmp,
1198 sizeof(tmp));
1239 if (ret != 0) { 1199 if (ret != 0) {
1240 netdev_warn(usbdev->net, "setting encr mode failed (%08X)\n", 1200 netdev_warn(usbdev->net, "setting encr mode failed (%08X)\n",
1241 ret); 1201 ret);
@@ -1255,8 +1215,9 @@ static int set_infra_mode(struct usbnet *usbdev, int mode)
1255 __func__, priv->infra_mode); 1215 __func__, priv->infra_mode);
1256 1216
1257 tmp = cpu_to_le32(mode); 1217 tmp = cpu_to_le32(mode);
1258 ret = rndis_set_oid(usbdev, OID_802_11_INFRASTRUCTURE_MODE, &tmp, 1218 ret = rndis_set_oid(usbdev,
1259 sizeof(tmp)); 1219 RNDIS_OID_802_11_INFRASTRUCTURE_MODE,
1220 &tmp, sizeof(tmp));
1260 if (ret != 0) { 1221 if (ret != 0) {
1261 netdev_warn(usbdev->net, "setting infra mode failed (%08X)\n", 1222 netdev_warn(usbdev->net, "setting infra mode failed (%08X)\n",
1262 ret); 1223 ret);
@@ -1282,8 +1243,9 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
1282 rts_threshold = 2347; 1243 rts_threshold = 2347;
1283 1244
1284 tmp = cpu_to_le32(rts_threshold); 1245 tmp = cpu_to_le32(rts_threshold);
1285 return rndis_set_oid(usbdev, OID_802_11_RTS_THRESHOLD, &tmp, 1246 return rndis_set_oid(usbdev,
1286 sizeof(tmp)); 1247 RNDIS_OID_802_11_RTS_THRESHOLD,
1248 &tmp, sizeof(tmp));
1287} 1249}
1288 1250
1289static int set_frag_threshold(struct usbnet *usbdev, u32 frag_threshold) 1251static int set_frag_threshold(struct usbnet *usbdev, u32 frag_threshold)
@@ -1296,8 +1258,9 @@ static int set_frag_threshold(struct usbnet *usbdev, u32 frag_threshold)
1296 frag_threshold = 2346; 1258 frag_threshold = 2346;
1297 1259
1298 tmp = cpu_to_le32(frag_threshold); 1260 tmp = cpu_to_le32(frag_threshold);
1299 return rndis_set_oid(usbdev, OID_802_11_FRAGMENTATION_THRESHOLD, &tmp, 1261 return rndis_set_oid(usbdev,
1300 sizeof(tmp)); 1262 RNDIS_OID_802_11_FRAGMENTATION_THRESHOLD,
1263 &tmp, sizeof(tmp));
1301} 1264}
1302 1265
1303static void set_default_iw_params(struct usbnet *usbdev) 1266static void set_default_iw_params(struct usbnet *usbdev)
@@ -1333,7 +1296,9 @@ static int set_channel(struct usbnet *usbdev, int channel)
1333 dsconfig = ieee80211_dsss_chan_to_freq(channel) * 1000; 1296 dsconfig = ieee80211_dsss_chan_to_freq(channel) * 1000;
1334 1297
1335 len = sizeof(config); 1298 len = sizeof(config);
1336 ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len); 1299 ret = rndis_query_oid(usbdev,
1300 RNDIS_OID_802_11_CONFIGURATION,
1301 &config, &len);
1337 if (ret < 0) { 1302 if (ret < 0) {
1338 netdev_dbg(usbdev->net, "%s(): querying configuration failed\n", 1303 netdev_dbg(usbdev->net, "%s(): querying configuration failed\n",
1339 __func__); 1304 __func__);
@@ -1341,8 +1306,9 @@ static int set_channel(struct usbnet *usbdev, int channel)
1341 } 1306 }
1342 1307
1343 config.ds_config = cpu_to_le32(dsconfig); 1308 config.ds_config = cpu_to_le32(dsconfig);
1344 ret = rndis_set_oid(usbdev, OID_802_11_CONFIGURATION, &config, 1309 ret = rndis_set_oid(usbdev,
1345 sizeof(config)); 1310 RNDIS_OID_802_11_CONFIGURATION,
1311 &config, sizeof(config));
1346 1312
1347 netdev_dbg(usbdev->net, "%s(): %d -> %d\n", __func__, channel, ret); 1313 netdev_dbg(usbdev->net, "%s(): %d -> %d\n", __func__, channel, ret);
1348 1314
@@ -1359,8 +1325,10 @@ static struct ieee80211_channel *get_current_channel(struct usbnet *usbdev,
1359 1325
1360 /* Get channel and beacon interval */ 1326 /* Get channel and beacon interval */
1361 len = sizeof(config); 1327 len = sizeof(config);
1362 ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len); 1328 ret = rndis_query_oid(usbdev,
1363 netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n", 1329 RNDIS_OID_802_11_CONFIGURATION,
1330 &config, &len);
1331 netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_CONFIGURATION -> %d\n",
1364 __func__, ret); 1332 __func__, ret);
1365 if (ret < 0) 1333 if (ret < 0)
1366 return NULL; 1334 return NULL;
@@ -1413,8 +1381,9 @@ static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
1413 ret); 1381 ret);
1414 } 1382 }
1415 1383
1416 ret = rndis_set_oid(usbdev, OID_802_11_ADD_WEP, &ndis_key, 1384 ret = rndis_set_oid(usbdev,
1417 sizeof(ndis_key)); 1385 RNDIS_OID_802_11_ADD_WEP, &ndis_key,
1386 sizeof(ndis_key));
1418 if (ret != 0) { 1387 if (ret != 0) {
1419 netdev_warn(usbdev->net, "adding encryption key %d failed (%08X)\n", 1388 netdev_warn(usbdev->net, "adding encryption key %d failed (%08X)\n",
1420 index + 1, ret); 1389 index + 1, ret);
@@ -1504,9 +1473,10 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
1504 get_bssid(usbdev, ndis_key.bssid); 1473 get_bssid(usbdev, ndis_key.bssid);
1505 } 1474 }
1506 1475
1507 ret = rndis_set_oid(usbdev, OID_802_11_ADD_KEY, &ndis_key, 1476 ret = rndis_set_oid(usbdev,
1508 le32_to_cpu(ndis_key.size)); 1477 RNDIS_OID_802_11_ADD_KEY, &ndis_key,
1509 netdev_dbg(usbdev->net, "%s(): OID_802_11_ADD_KEY -> %08X\n", 1478 le32_to_cpu(ndis_key.size));
1479 netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_ADD_KEY -> %08X\n",
1510 __func__, ret); 1480 __func__, ret);
1511 if (ret != 0) 1481 if (ret != 0)
1512 return ret; 1482 return ret;
@@ -1594,14 +1564,16 @@ static int remove_key(struct usbnet *usbdev, u8 index, const u8 *bssid)
1594 memset(remove_key.bssid, 0xff, 1564 memset(remove_key.bssid, 0xff,
1595 sizeof(remove_key.bssid)); 1565 sizeof(remove_key.bssid));
1596 1566
1597 ret = rndis_set_oid(usbdev, OID_802_11_REMOVE_KEY, &remove_key, 1567 ret = rndis_set_oid(usbdev,
1598 sizeof(remove_key)); 1568 RNDIS_OID_802_11_REMOVE_KEY,
1569 &remove_key, sizeof(remove_key));
1599 if (ret != 0) 1570 if (ret != 0)
1600 return ret; 1571 return ret;
1601 } else { 1572 } else {
1602 keyindex = cpu_to_le32(index); 1573 keyindex = cpu_to_le32(index);
1603 ret = rndis_set_oid(usbdev, OID_802_11_REMOVE_WEP, &keyindex, 1574 ret = rndis_set_oid(usbdev,
1604 sizeof(keyindex)); 1575 RNDIS_OID_802_11_REMOVE_WEP,
1576 &keyindex, sizeof(keyindex));
1605 if (ret != 0) { 1577 if (ret != 0) {
1606 netdev_warn(usbdev->net, 1578 netdev_warn(usbdev->net,
1607 "removing encryption key %d failed (%08X)\n", 1579 "removing encryption key %d failed (%08X)\n",
@@ -1626,14 +1598,14 @@ static void set_multicast_list(struct usbnet *usbdev)
1626 char *mc_addrs = NULL; 1598 char *mc_addrs = NULL;
1627 int mc_count; 1599 int mc_count;
1628 1600
1629 basefilter = filter = RNDIS_PACKET_TYPE_DIRECTED | 1601 basefilter = filter = cpu_to_le32(RNDIS_PACKET_TYPE_DIRECTED |
1630 RNDIS_PACKET_TYPE_BROADCAST; 1602 RNDIS_PACKET_TYPE_BROADCAST);
1631 1603
1632 if (usbdev->net->flags & IFF_PROMISC) { 1604 if (usbdev->net->flags & IFF_PROMISC) {
1633 filter |= RNDIS_PACKET_TYPE_PROMISCUOUS | 1605 filter |= cpu_to_le32(RNDIS_PACKET_TYPE_PROMISCUOUS |
1634 RNDIS_PACKET_TYPE_ALL_LOCAL; 1606 RNDIS_PACKET_TYPE_ALL_LOCAL);
1635 } else if (usbdev->net->flags & IFF_ALLMULTI) { 1607 } else if (usbdev->net->flags & IFF_ALLMULTI) {
1636 filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST; 1608 filter |= cpu_to_le32(RNDIS_PACKET_TYPE_ALL_MULTICAST);
1637 } 1609 }
1638 1610
1639 if (filter != basefilter) 1611 if (filter != basefilter)
@@ -1646,7 +1618,7 @@ static void set_multicast_list(struct usbnet *usbdev)
1646 netif_addr_lock_bh(usbdev->net); 1618 netif_addr_lock_bh(usbdev->net);
1647 mc_count = netdev_mc_count(usbdev->net); 1619 mc_count = netdev_mc_count(usbdev->net);
1648 if (mc_count > priv->multicast_size) { 1620 if (mc_count > priv->multicast_size) {
1649 filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST; 1621 filter |= cpu_to_le32(RNDIS_PACKET_TYPE_ALL_MULTICAST);
1650 } else if (mc_count) { 1622 } else if (mc_count) {
1651 int i = 0; 1623 int i = 0;
1652 1624
@@ -1669,27 +1641,28 @@ static void set_multicast_list(struct usbnet *usbdev)
1669 goto set_filter; 1641 goto set_filter;
1670 1642
1671 if (mc_count) { 1643 if (mc_count) {
1672 ret = rndis_set_oid(usbdev, OID_802_3_MULTICAST_LIST, mc_addrs, 1644 ret = rndis_set_oid(usbdev,
1673 mc_count * ETH_ALEN); 1645 RNDIS_OID_802_3_MULTICAST_LIST,
1646 mc_addrs, mc_count * ETH_ALEN);
1674 kfree(mc_addrs); 1647 kfree(mc_addrs);
1675 if (ret == 0) 1648 if (ret == 0)
1676 filter |= RNDIS_PACKET_TYPE_MULTICAST; 1649 filter |= cpu_to_le32(RNDIS_PACKET_TYPE_MULTICAST);
1677 else 1650 else
1678 filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST; 1651 filter |= cpu_to_le32(RNDIS_PACKET_TYPE_ALL_MULTICAST);
1679 1652
1680 netdev_dbg(usbdev->net, "OID_802_3_MULTICAST_LIST(%d, max: %d) -> %d\n", 1653 netdev_dbg(usbdev->net, "RNDIS_OID_802_3_MULTICAST_LIST(%d, max: %d) -> %d\n",
1681 mc_count, priv->multicast_size, ret); 1654 mc_count, priv->multicast_size, ret);
1682 } 1655 }
1683 1656
1684set_filter: 1657set_filter:
1685 ret = rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &filter, 1658 ret = rndis_set_oid(usbdev, RNDIS_OID_GEN_CURRENT_PACKET_FILTER, &filter,
1686 sizeof(filter)); 1659 sizeof(filter));
1687 if (ret < 0) { 1660 if (ret < 0) {
1688 netdev_warn(usbdev->net, "couldn't set packet filter: %08x\n", 1661 netdev_warn(usbdev->net, "couldn't set packet filter: %08x\n",
1689 le32_to_cpu(filter)); 1662 le32_to_cpu(filter));
1690 } 1663 }
1691 1664
1692 netdev_dbg(usbdev->net, "OID_GEN_CURRENT_PACKET_FILTER(%08x) -> %d\n", 1665 netdev_dbg(usbdev->net, "RNDIS_OID_GEN_CURRENT_PACKET_FILTER(%08x) -> %d\n",
1693 le32_to_cpu(filter), ret); 1666 le32_to_cpu(filter), ret);
1694} 1667}
1695 1668
@@ -1748,9 +1721,10 @@ static struct ndis_80211_pmkid *get_device_pmkids(struct usbnet *usbdev)
1748 pmkids->length = cpu_to_le32(len); 1721 pmkids->length = cpu_to_le32(len);
1749 pmkids->bssid_info_count = cpu_to_le32(max_pmkids); 1722 pmkids->bssid_info_count = cpu_to_le32(max_pmkids);
1750 1723
1751 ret = rndis_query_oid(usbdev, OID_802_11_PMKID, pmkids, &len); 1724 ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_PMKID,
1725 pmkids, &len);
1752 if (ret < 0) { 1726 if (ret < 0) {
1753 netdev_dbg(usbdev->net, "%s(): OID_802_11_PMKID(%d, %d)" 1727 netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_PMKID(%d, %d)"
1754 " -> %d\n", __func__, len, max_pmkids, ret); 1728 " -> %d\n", __func__, len, max_pmkids, ret);
1755 1729
1756 kfree(pmkids); 1730 kfree(pmkids);
@@ -1776,10 +1750,10 @@ static int set_device_pmkids(struct usbnet *usbdev,
1776 1750
1777 debug_print_pmkids(usbdev, pmkids, __func__); 1751 debug_print_pmkids(usbdev, pmkids, __func__);
1778 1752
1779 ret = rndis_set_oid(usbdev, OID_802_11_PMKID, pmkids, 1753 ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_PMKID, pmkids,
1780 le32_to_cpu(pmkids->length)); 1754 le32_to_cpu(pmkids->length));
1781 if (ret < 0) { 1755 if (ret < 0) {
1782 netdev_dbg(usbdev->net, "%s(): OID_802_11_PMKID(%d, %d) -> %d" 1756 netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_PMKID(%d, %d) -> %d"
1783 "\n", __func__, len, num_pmkids, ret); 1757 "\n", __func__, len, num_pmkids, ret);
1784 } 1758 }
1785 1759
@@ -1801,8 +1775,8 @@ static struct ndis_80211_pmkid *remove_pmkid(struct usbnet *usbdev,
1801 count = max_pmkids; 1775 count = max_pmkids;
1802 1776
1803 for (i = 0; i < count; i++) 1777 for (i = 0; i < count; i++)
1804 if (!compare_ether_addr(pmkids->bssid_info[i].bssid, 1778 if (ether_addr_equal(pmkids->bssid_info[i].bssid,
1805 pmksa->bssid)) 1779 pmksa->bssid))
1806 break; 1780 break;
1807 1781
1808 /* pmkid not found */ 1782 /* pmkid not found */
@@ -1843,8 +1817,8 @@ static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev,
1843 1817
1844 /* update with new pmkid */ 1818 /* update with new pmkid */
1845 for (i = 0; i < count; i++) { 1819 for (i = 0; i < count; i++) {
1846 if (compare_ether_addr(pmkids->bssid_info[i].bssid, 1820 if (!ether_addr_equal(pmkids->bssid_info[i].bssid,
1847 pmksa->bssid)) 1821 pmksa->bssid))
1848 continue; 1822 continue;
1849 1823
1850 memcpy(pmkids->bssid_info[i].pmkid, pmksa->pmkid, 1824 memcpy(pmkids->bssid_info[i].pmkid, pmksa->pmkid,
@@ -2113,7 +2087,8 @@ resize_buf:
2113 * resizing until it won't get any bigger. 2087 * resizing until it won't get any bigger.
2114 */ 2088 */
2115 new_len = len; 2089 new_len = len;
2116 ret = rndis_query_oid(usbdev, OID_802_11_BSSID_LIST, buf, &new_len); 2090 ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_BSSID_LIST,
2091 buf, &new_len);
2117 if (ret != 0 || new_len < sizeof(struct ndis_80211_bssid_list_ex)) 2092 if (ret != 0 || new_len < sizeof(struct ndis_80211_bssid_list_ex))
2118 goto out; 2093 goto out;
2119 2094
@@ -2139,7 +2114,7 @@ resize_buf:
2139 while (check_bssid_list_item(bssid, bssid_len, buf, len)) { 2114 while (check_bssid_list_item(bssid, bssid_len, buf, len)) {
2140 if (rndis_bss_info_update(usbdev, bssid) && match_bssid && 2115 if (rndis_bss_info_update(usbdev, bssid) && match_bssid &&
2141 matched) { 2116 matched) {
2142 if (compare_ether_addr(bssid->mac, match_bssid)) 2117 if (!ether_addr_equal(bssid->mac, match_bssid))
2143 *matched = true; 2118 *matched = true;
2144 } 2119 }
2145 2120
@@ -2511,14 +2486,15 @@ static void rndis_fill_station_info(struct usbnet *usbdev,
2511 memset(sinfo, 0, sizeof(*sinfo)); 2486 memset(sinfo, 0, sizeof(*sinfo));
2512 2487
2513 len = sizeof(linkspeed); 2488 len = sizeof(linkspeed);
2514 ret = rndis_query_oid(usbdev, OID_GEN_LINK_SPEED, &linkspeed, &len); 2489 ret = rndis_query_oid(usbdev, RNDIS_OID_GEN_LINK_SPEED, &linkspeed, &len);
2515 if (ret == 0) { 2490 if (ret == 0) {
2516 sinfo->txrate.legacy = le32_to_cpu(linkspeed) / 1000; 2491 sinfo->txrate.legacy = le32_to_cpu(linkspeed) / 1000;
2517 sinfo->filled |= STATION_INFO_TX_BITRATE; 2492 sinfo->filled |= STATION_INFO_TX_BITRATE;
2518 } 2493 }
2519 2494
2520 len = sizeof(rssi); 2495 len = sizeof(rssi);
2521 ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len); 2496 ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_RSSI,
2497 &rssi, &len);
2522 if (ret == 0) { 2498 if (ret == 0) {
2523 sinfo->signal = level_to_qual(le32_to_cpu(rssi)); 2499 sinfo->signal = level_to_qual(le32_to_cpu(rssi));
2524 sinfo->filled |= STATION_INFO_SIGNAL; 2500 sinfo->filled |= STATION_INFO_SIGNAL;
@@ -2531,7 +2507,7 @@ static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
2531 struct rndis_wlan_private *priv = wiphy_priv(wiphy); 2507 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
2532 struct usbnet *usbdev = priv->usbdev; 2508 struct usbnet *usbdev = priv->usbdev;
2533 2509
2534 if (compare_ether_addr(priv->bssid, mac)) 2510 if (!ether_addr_equal(priv->bssid, mac))
2535 return -ENOENT; 2511 return -ENOENT;
2536 2512
2537 rndis_fill_station_info(usbdev, sinfo); 2513 rndis_fill_station_info(usbdev, sinfo);
@@ -2624,7 +2600,8 @@ static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
2624 pmkid.length = cpu_to_le32(sizeof(pmkid)); 2600 pmkid.length = cpu_to_le32(sizeof(pmkid));
2625 pmkid.bssid_info_count = cpu_to_le32(0); 2601 pmkid.bssid_info_count = cpu_to_le32(0);
2626 2602
2627 return rndis_set_oid(usbdev, OID_802_11_PMKID, &pmkid, sizeof(pmkid)); 2603 return rndis_set_oid(usbdev, RNDIS_OID_802_11_PMKID,
2604 &pmkid, sizeof(pmkid));
2628} 2605}
2629 2606
2630static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, 2607static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
@@ -2654,9 +2631,10 @@ static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
2654 priv->power_mode = power_mode; 2631 priv->power_mode = power_mode;
2655 2632
2656 mode = cpu_to_le32(power_mode); 2633 mode = cpu_to_le32(power_mode);
2657 ret = rndis_set_oid(usbdev, OID_802_11_POWER_MODE, &mode, sizeof(mode)); 2634 ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_POWER_MODE,
2635 &mode, sizeof(mode));
2658 2636
2659 netdev_dbg(usbdev->net, "%s(): OID_802_11_POWER_MODE -> %d\n", 2637 netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_POWER_MODE -> %d\n",
2660 __func__, ret); 2638 __func__, ret);
2661 2639
2662 return ret; 2640 return ret;
@@ -2693,10 +2671,11 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
2693 /* Get signal quality, in case of error use rssi=0 and ignore error. */ 2671 /* Get signal quality, in case of error use rssi=0 and ignore error. */
2694 len = sizeof(rssi); 2672 len = sizeof(rssi);
2695 rssi = 0; 2673 rssi = 0;
2696 ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len); 2674 ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_RSSI,
2675 &rssi, &len);
2697 signal = level_to_qual(le32_to_cpu(rssi)); 2676 signal = level_to_qual(le32_to_cpu(rssi));
2698 2677
2699 netdev_dbg(usbdev->net, "%s(): OID_802_11_RSSI -> %d, " 2678 netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_RSSI -> %d, "
2700 "rssi:%d, qual: %d\n", __func__, ret, le32_to_cpu(rssi), 2679 "rssi:%d, qual: %d\n", __func__, ret, le32_to_cpu(rssi),
2701 level_to_qual(le32_to_cpu(rssi))); 2680 level_to_qual(le32_to_cpu(rssi)));
2702 2681
@@ -2720,8 +2699,9 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
2720 /* Get SSID, in case of error, use zero length SSID and ignore error. */ 2699 /* Get SSID, in case of error, use zero length SSID and ignore error. */
2721 len = sizeof(ssid); 2700 len = sizeof(ssid);
2722 memset(&ssid, 0, sizeof(ssid)); 2701 memset(&ssid, 0, sizeof(ssid));
2723 ret = rndis_query_oid(usbdev, OID_802_11_SSID, &ssid, &len); 2702 ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_SSID,
2724 netdev_dbg(usbdev->net, "%s(): OID_802_11_SSID -> %d, len: %d, ssid: " 2703 &ssid, &len);
2704 netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_SSID -> %d, len: %d, ssid: "
2725 "'%.32s'\n", __func__, ret, 2705 "'%.32s'\n", __func__, ret,
2726 le32_to_cpu(ssid.length), ssid.essid); 2706 le32_to_cpu(ssid.length), ssid.essid);
2727 2707
@@ -2843,7 +2823,7 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
2843 * NDIS spec says: "If the device is associated, but the associated 2823 * NDIS spec says: "If the device is associated, but the associated
2844 * BSSID is not in its BSSID scan list, then the driver must add an 2824 * BSSID is not in its BSSID scan list, then the driver must add an
2845 * entry for the BSSID at the end of the data that it returns in 2825 * entry for the BSSID at the end of the data that it returns in
2846 * response to query of OID_802_11_BSSID_LIST." 2826 * response to query of RNDIS_OID_802_11_BSSID_LIST."
2847 * 2827 *
2848 * NOTE: Seems to be true for BCM4320b variant, but not BCM4320a. 2828 * NOTE: Seems to be true for BCM4320b variant, but not BCM4320a.
2849 */ 2829 */
@@ -3095,15 +3075,15 @@ static void rndis_wlan_indication(struct usbnet *usbdev, void *ind, int buflen)
3095 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 3075 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
3096 struct rndis_indicate *msg = ind; 3076 struct rndis_indicate *msg = ind;
3097 3077
3098 switch (msg->status) { 3078 switch (le32_to_cpu(msg->status)) {
3099 case RNDIS_STATUS_MEDIA_CONNECT: 3079 case RNDIS_STATUS_MEDIA_CONNECT:
3100 if (priv->current_command_oid == OID_802_11_ADD_KEY) { 3080 if (priv->current_command_oid == RNDIS_OID_802_11_ADD_KEY) {
3101 /* OID_802_11_ADD_KEY causes sometimes extra 3081 /* RNDIS_OID_802_11_ADD_KEY causes sometimes extra
3102 * "media connect" indications which confuses driver 3082 * "media connect" indications which confuses driver
3103 * and userspace to think that device is 3083 * and userspace to think that device is
3104 * roaming/reassociating when it isn't. 3084 * roaming/reassociating when it isn't.
3105 */ 3085 */
3106 netdev_dbg(usbdev->net, "ignored OID_802_11_ADD_KEY triggered 'media connect'\n"); 3086 netdev_dbg(usbdev->net, "ignored RNDIS_OID_802_11_ADD_KEY triggered 'media connect'\n");
3107 return; 3087 return;
3108 } 3088 }
3109 3089
@@ -3148,8 +3128,9 @@ static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy)
3148 3128
3149 /* determine supported modes */ 3129 /* determine supported modes */
3150 len = sizeof(networks_supported); 3130 len = sizeof(networks_supported);
3151 retval = rndis_query_oid(usbdev, OID_802_11_NETWORK_TYPES_SUPPORTED, 3131 retval = rndis_query_oid(usbdev,
3152 &networks_supported, &len); 3132 RNDIS_OID_802_11_NETWORK_TYPES_SUPPORTED,
3133 &networks_supported, &len);
3153 if (retval >= 0) { 3134 if (retval >= 0) {
3154 n = le32_to_cpu(networks_supported.num_items); 3135 n = le32_to_cpu(networks_supported.num_items);
3155 if (n > 8) 3136 if (n > 8)
@@ -3173,9 +3154,11 @@ static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy)
3173 /* get device 802.11 capabilities, number of PMKIDs */ 3154 /* get device 802.11 capabilities, number of PMKIDs */
3174 caps = (struct ndis_80211_capability *)caps_buf; 3155 caps = (struct ndis_80211_capability *)caps_buf;
3175 len = sizeof(caps_buf); 3156 len = sizeof(caps_buf);
3176 retval = rndis_query_oid(usbdev, OID_802_11_CAPABILITY, caps, &len); 3157 retval = rndis_query_oid(usbdev,
3158 RNDIS_OID_802_11_CAPABILITY,
3159 caps, &len);
3177 if (retval >= 0) { 3160 if (retval >= 0) {
3178 netdev_dbg(usbdev->net, "OID_802_11_CAPABILITY -> len %d, " 3161 netdev_dbg(usbdev->net, "RNDIS_OID_802_11_CAPABILITY -> len %d, "
3179 "ver %d, pmkids %d, auth-encr-pairs %d\n", 3162 "ver %d, pmkids %d, auth-encr-pairs %d\n",
3180 le32_to_cpu(caps->length), 3163 le32_to_cpu(caps->length),
3181 le32_to_cpu(caps->version), 3164 le32_to_cpu(caps->version),
@@ -3247,13 +3230,14 @@ static void rndis_device_poller(struct work_struct *work)
3247 } 3230 }
3248 3231
3249 len = sizeof(rssi); 3232 len = sizeof(rssi);
3250 ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len); 3233 ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_RSSI,
3234 &rssi, &len);
3251 if (ret == 0) { 3235 if (ret == 0) {
3252 priv->last_qual = level_to_qual(le32_to_cpu(rssi)); 3236 priv->last_qual = level_to_qual(le32_to_cpu(rssi));
3253 rndis_do_cqm(usbdev, le32_to_cpu(rssi)); 3237 rndis_do_cqm(usbdev, le32_to_cpu(rssi));
3254 } 3238 }
3255 3239
3256 netdev_dbg(usbdev->net, "dev-poller: OID_802_11_RSSI -> %d, rssi:%d, qual: %d\n", 3240 netdev_dbg(usbdev->net, "dev-poller: RNDIS_OID_802_11_RSSI -> %d, rssi:%d, qual: %d\n",
3257 ret, le32_to_cpu(rssi), level_to_qual(le32_to_cpu(rssi))); 3241 ret, le32_to_cpu(rssi), level_to_qual(le32_to_cpu(rssi)));
3258 3242
3259 /* Workaround transfer stalls on poor quality links. 3243 /* Workaround transfer stalls on poor quality links.
@@ -3275,15 +3259,18 @@ static void rndis_device_poller(struct work_struct *work)
3275 * working. 3259 * working.
3276 */ 3260 */
3277 tmp = cpu_to_le32(1); 3261 tmp = cpu_to_le32(1);
3278 rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp, 3262 rndis_set_oid(usbdev,
3279 sizeof(tmp)); 3263 RNDIS_OID_802_11_BSSID_LIST_SCAN,
3264 &tmp, sizeof(tmp));
3280 3265
3281 len = CONTROL_BUFFER_SIZE; 3266 len = CONTROL_BUFFER_SIZE;
3282 buf = kmalloc(len, GFP_KERNEL); 3267 buf = kmalloc(len, GFP_KERNEL);
3283 if (!buf) 3268 if (!buf)
3284 goto end; 3269 goto end;
3285 3270
3286 rndis_query_oid(usbdev, OID_802_11_BSSID_LIST, buf, &len); 3271 rndis_query_oid(usbdev,
3272 RNDIS_OID_802_11_BSSID_LIST,
3273 buf, &len);
3287 kfree(buf); 3274 kfree(buf);
3288 } 3275 }
3289 3276
@@ -3465,13 +3452,15 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
3465 */ 3452 */
3466 usbdev->net->netdev_ops = &rndis_wlan_netdev_ops; 3453 usbdev->net->netdev_ops = &rndis_wlan_netdev_ops;
3467 3454
3468 tmp = RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST; 3455 tmp = cpu_to_le32(RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST);
3469 retval = rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &tmp, 3456 retval = rndis_set_oid(usbdev,
3470 sizeof(tmp)); 3457 RNDIS_OID_GEN_CURRENT_PACKET_FILTER,
3458 &tmp, sizeof(tmp));
3471 3459
3472 len = sizeof(tmp); 3460 len = sizeof(tmp);
3473 retval = rndis_query_oid(usbdev, OID_802_3_MAXIMUM_LIST_SIZE, &tmp, 3461 retval = rndis_query_oid(usbdev,
3474 &len); 3462 RNDIS_OID_802_3_MAXIMUM_LIST_SIZE,
3463 &tmp, &len);
3475 priv->multicast_size = le32_to_cpu(tmp); 3464 priv->multicast_size = le32_to_cpu(tmp);
3476 if (retval < 0 || priv->multicast_size < 0) 3465 if (retval < 0 || priv->multicast_size < 0)
3477 priv->multicast_size = 0; 3466 priv->multicast_size = 0;
@@ -3601,7 +3590,7 @@ static int rndis_wlan_stop(struct usbnet *usbdev)
3601 /* Set current packet filter zero to block receiving data packets from 3590 /* Set current packet filter zero to block receiving data packets from
3602 device. */ 3591 device. */
3603 filter = 0; 3592 filter = 0;
3604 rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &filter, 3593 rndis_set_oid(usbdev, RNDIS_OID_GEN_CURRENT_PACKET_FILTER, &filter,
3605 sizeof(filter)); 3594 sizeof(filter));
3606 3595
3607 return retval; 3596 return retval;
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 3a6b40239bc1..5e6b50143165 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1828,15 +1828,4 @@ static struct pci_driver rt2400pci_driver = {
1828 .resume = rt2x00pci_resume, 1828 .resume = rt2x00pci_resume,
1829}; 1829};
1830 1830
1831static int __init rt2400pci_init(void) 1831module_pci_driver(rt2400pci_driver);
1832{
1833 return pci_register_driver(&rt2400pci_driver);
1834}
1835
1836static void __exit rt2400pci_exit(void)
1837{
1838 pci_unregister_driver(&rt2400pci_driver);
1839}
1840
1841module_init(rt2400pci_init);
1842module_exit(rt2400pci_exit);
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index dcc0e1fcca77..136b849f11b5 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -2119,15 +2119,4 @@ static struct pci_driver rt2500pci_driver = {
2119 .resume = rt2x00pci_resume, 2119 .resume = rt2x00pci_resume,
2120}; 2120};
2121 2121
2122static int __init rt2500pci_init(void) 2122module_pci_driver(rt2500pci_driver);
2123{
2124 return pci_register_driver(&rt2500pci_driver);
2125}
2126
2127static void __exit rt2500pci_exit(void)
2128{
2129 pci_unregister_driver(&rt2500pci_driver);
2130}
2131
2132module_init(rt2500pci_init);
2133module_exit(rt2500pci_exit);
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 1de9c752c88b..c88fd3e61090 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1912,7 +1912,7 @@ static struct usb_device_id rt2500usb_device_table[] = {
1912 { USB_DEVICE(0x0b05, 0x1706) }, 1912 { USB_DEVICE(0x0b05, 0x1706) },
1913 { USB_DEVICE(0x0b05, 0x1707) }, 1913 { USB_DEVICE(0x0b05, 0x1707) },
1914 /* Belkin */ 1914 /* Belkin */
1915 { USB_DEVICE(0x050d, 0x7050) }, 1915 { USB_DEVICE(0x050d, 0x7050) }, /* FCC ID: K7SF5D7050A ver. 2.x */
1916 { USB_DEVICE(0x050d, 0x7051) }, 1916 { USB_DEVICE(0x050d, 0x7051) },
1917 /* Cisco Systems */ 1917 /* Cisco Systems */
1918 { USB_DEVICE(0x13b1, 0x000d) }, 1918 { USB_DEVICE(0x13b1, 0x000d) },
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 063bfa8b91f4..9348521e0832 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -83,6 +83,7 @@
83#define REV_RT3090E 0x0211 83#define REV_RT3090E 0x0211
84#define REV_RT3390E 0x0211 84#define REV_RT3390E 0x0211
85#define REV_RT5390F 0x0502 85#define REV_RT5390F 0x0502
86#define REV_RT5390R 0x1502
86 87
87/* 88/*
88 * Signal information. 89 * Signal information.
@@ -98,9 +99,11 @@
98#define EEPROM_BASE 0x0000 99#define EEPROM_BASE 0x0000
99#define EEPROM_SIZE 0x0110 100#define EEPROM_SIZE 0x0110
100#define BBP_BASE 0x0000 101#define BBP_BASE 0x0000
101#define BBP_SIZE 0x0080 102#define BBP_SIZE 0x00ff
102#define RF_BASE 0x0004 103#define RF_BASE 0x0004
103#define RF_SIZE 0x0010 104#define RF_SIZE 0x0010
105#define RFCSR_BASE 0x0000
106#define RFCSR_SIZE 0x0040
104 107
105/* 108/*
106 * Number of TX queues. 109 * Number of TX queues.
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 6c0a12ea6a15..dfc90d34be6d 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -290,11 +290,25 @@ int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
290 msleep(10); 290 msleep(10);
291 } 291 }
292 292
293 ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n"); 293 ERROR(rt2x00dev, "WPDMA TX/RX busy [0x%08x].\n", reg);
294 return -EACCES; 294 return -EACCES;
295} 295}
296EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready); 296EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready);
297 297
298void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev)
299{
300 u32 reg;
301
302 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
303 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
304 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
305 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
306 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
307 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
308 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
309}
310EXPORT_SYMBOL_GPL(rt2800_disable_wpdma);
311
298static bool rt2800_check_firmware_crc(const u8 *data, const size_t len) 312static bool rt2800_check_firmware_crc(const u8 *data, const size_t len)
299{ 313{
300 u16 fw_crc; 314 u16 fw_crc;
@@ -412,6 +426,8 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
412 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002); 426 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
413 } 427 }
414 428
429 rt2800_disable_wpdma(rt2x00dev);
430
415 /* 431 /*
416 * Write firmware to the device. 432 * Write firmware to the device.
417 */ 433 */
@@ -436,10 +452,7 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
436 * Disable DMA, will be reenabled later when enabling 452 * Disable DMA, will be reenabled later when enabling
437 * the radio. 453 * the radio.
438 */ 454 */
439 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 455 rt2800_disable_wpdma(rt2x00dev);
440 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
441 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
442 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
443 456
444 /* 457 /*
445 * Initialize firmware. 458 * Initialize firmware.
@@ -823,6 +836,13 @@ const struct rt2x00debug rt2800_rt2x00debug = {
823 .word_size = sizeof(u32), 836 .word_size = sizeof(u32),
824 .word_count = RF_SIZE / sizeof(u32), 837 .word_count = RF_SIZE / sizeof(u32),
825 }, 838 },
839 .rfcsr = {
840 .read = rt2800_rfcsr_read,
841 .write = rt2800_rfcsr_write,
842 .word_base = RFCSR_BASE,
843 .word_size = sizeof(u8),
844 .word_count = RFCSR_SIZE / sizeof(u8),
845 },
826}; 846};
827EXPORT_SYMBOL_GPL(rt2800_rt2x00debug); 847EXPORT_SYMBOL_GPL(rt2800_rt2x00debug);
828#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 848#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
@@ -2717,13 +2737,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2717 unsigned int i; 2737 unsigned int i;
2718 int ret; 2738 int ret;
2719 2739
2720 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 2740 rt2800_disable_wpdma(rt2x00dev);
2721 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
2722 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
2723 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
2724 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
2725 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
2726 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
2727 2741
2728 ret = rt2800_drv_init_registers(rt2x00dev); 2742 ret = rt2800_drv_init_registers(rt2x00dev);
2729 if (ret) 2743 if (ret)
@@ -3349,6 +3363,13 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3349 rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg); 3363 rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
3350 } 3364 }
3351 3365
3366 /* This chip has hardware antenna diversity*/
3367 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) {
3368 rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */
3369 rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */
3370 rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */
3371 }
3372
3352 rt2800_bbp_read(rt2x00dev, 152, &value); 3373 rt2800_bbp_read(rt2x00dev, 152, &value);
3353 if (ant == 0) 3374 if (ant == 0)
3354 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); 3375 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
@@ -3997,10 +4018,7 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev)
3997{ 4018{
3998 u32 reg; 4019 u32 reg;
3999 4020
4000 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 4021 rt2800_disable_wpdma(rt2x00dev);
4001 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
4002 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
4003 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
4004 4022
4005 /* Wait for DMA, ignore error */ 4023 /* Wait for DMA, ignore error */
4006 rt2800_wait_wpdma_ready(rt2x00dev); 4024 rt2800_wait_wpdma_ready(rt2x00dev);
@@ -4287,6 +4305,11 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4287 rt2x00dev->default_ant.rx = ANTENNA_A; 4305 rt2x00dev->default_ant.rx = ANTENNA_A;
4288 } 4306 }
4289 4307
4308 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) {
4309 rt2x00dev->default_ant.tx = ANTENNA_HW_DIVERSITY; /* Unused */
4310 rt2x00dev->default_ant.rx = ANTENNA_HW_DIVERSITY; /* Unused */
4311 }
4312
4290 /* 4313 /*
4291 * Determine external LNA informations. 4314 * Determine external LNA informations.
4292 */ 4315 */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 419e36cb06be..18a0b67b4c68 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -208,5 +208,6 @@ int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
208 u8 buf_size); 208 u8 buf_size);
209int rt2800_get_survey(struct ieee80211_hw *hw, int idx, 209int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
210 struct survey_info *survey); 210 struct survey_info *survey);
211void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev);
211 212
212#endif /* RT2800LIB_H */ 213#endif /* RT2800LIB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 0397bbf0ce01..931331d95217 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -361,7 +361,6 @@ static void rt2800pci_clear_entry(struct queue_entry *entry)
361static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev) 361static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
362{ 362{
363 struct queue_entry_priv_pci *entry_priv; 363 struct queue_entry_priv_pci *entry_priv;
364 u32 reg;
365 364
366 /* 365 /*
367 * Initialize registers. 366 * Initialize registers.
@@ -394,6 +393,16 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
394 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX3, 0); 393 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX3, 0);
395 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX3, 0); 394 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX3, 0);
396 395
396 rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR4, 0);
397 rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT4, 0);
398 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX4, 0);
399 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX4, 0);
400
401 rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR5, 0);
402 rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT5, 0);
403 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX5, 0);
404 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX5, 0);
405
397 entry_priv = rt2x00dev->rx->entries[0].priv_data; 406 entry_priv = rt2x00dev->rx->entries[0].priv_data;
398 rt2x00pci_register_write(rt2x00dev, RX_BASE_PTR, entry_priv->desc_dma); 407 rt2x00pci_register_write(rt2x00dev, RX_BASE_PTR, entry_priv->desc_dma);
399 rt2x00pci_register_write(rt2x00dev, RX_MAX_CNT, 408 rt2x00pci_register_write(rt2x00dev, RX_MAX_CNT,
@@ -402,14 +411,7 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
402 rt2x00dev->rx[0].limit - 1); 411 rt2x00dev->rx[0].limit - 1);
403 rt2x00pci_register_write(rt2x00dev, RX_DRX_IDX, 0); 412 rt2x00pci_register_write(rt2x00dev, RX_DRX_IDX, 0);
404 413
405 /* 414 rt2800_disable_wpdma(rt2x00dev);
406 * Enable global DMA configuration
407 */
408 rt2x00pci_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
409 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
410 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
411 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
412 rt2x00pci_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
413 415
414 rt2x00pci_register_write(rt2x00dev, DELAY_INT_CFG, 0); 416 rt2x00pci_register_write(rt2x00dev, DELAY_INT_CFG, 0);
415 417
@@ -504,8 +506,10 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
504{ 506{
505 int retval; 507 int retval;
506 508
507 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) || 509 /* Wait for DMA, ignore error until we initialize queues. */
508 rt2800pci_init_queues(rt2x00dev))) 510 rt2800_wait_wpdma_ready(rt2x00dev);
511
512 if (unlikely(rt2800pci_init_queues(rt2x00dev)))
509 return -EIO; 513 return -EIO;
510 514
511 retval = rt2800_enable_radio(rt2x00dev); 515 retval = rt2800_enable_radio(rt2x00dev);
@@ -1184,7 +1188,9 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1184 { PCI_DEVICE(0x1814, 0x3593) }, 1188 { PCI_DEVICE(0x1814, 0x3593) },
1185#endif 1189#endif
1186#ifdef CONFIG_RT2800PCI_RT53XX 1190#ifdef CONFIG_RT2800PCI_RT53XX
1191 { PCI_DEVICE(0x1814, 0x5362) },
1187 { PCI_DEVICE(0x1814, 0x5390) }, 1192 { PCI_DEVICE(0x1814, 0x5390) },
1193 { PCI_DEVICE(0x1814, 0x5392) },
1188 { PCI_DEVICE(0x1814, 0x539a) }, 1194 { PCI_DEVICE(0x1814, 0x539a) },
1189 { PCI_DEVICE(0x1814, 0x539f) }, 1195 { PCI_DEVICE(0x1814, 0x539f) },
1190#endif 1196#endif
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 001735f7a661..5601302d09ad 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -922,6 +922,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
922 { USB_DEVICE(0x1482, 0x3c09) }, 922 { USB_DEVICE(0x1482, 0x3c09) },
923 /* AirTies */ 923 /* AirTies */
924 { USB_DEVICE(0x1eda, 0x2012) }, 924 { USB_DEVICE(0x1eda, 0x2012) },
925 { USB_DEVICE(0x1eda, 0x2210) },
925 { USB_DEVICE(0x1eda, 0x2310) }, 926 { USB_DEVICE(0x1eda, 0x2310) },
926 /* Allwin */ 927 /* Allwin */
927 { USB_DEVICE(0x8516, 0x2070) }, 928 { USB_DEVICE(0x8516, 0x2070) },
@@ -991,6 +992,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
991 /* DVICO */ 992 /* DVICO */
992 { USB_DEVICE(0x0fe9, 0xb307) }, 993 { USB_DEVICE(0x0fe9, 0xb307) },
993 /* Edimax */ 994 /* Edimax */
995 { USB_DEVICE(0x7392, 0x4085) },
994 { USB_DEVICE(0x7392, 0x7711) }, 996 { USB_DEVICE(0x7392, 0x7711) },
995 { USB_DEVICE(0x7392, 0x7717) }, 997 { USB_DEVICE(0x7392, 0x7717) },
996 { USB_DEVICE(0x7392, 0x7718) }, 998 { USB_DEVICE(0x7392, 0x7718) },
@@ -1066,6 +1068,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
1066 /* Philips */ 1068 /* Philips */
1067 { USB_DEVICE(0x0471, 0x200f) }, 1069 { USB_DEVICE(0x0471, 0x200f) },
1068 /* Planex */ 1070 /* Planex */
1071 { USB_DEVICE(0x2019, 0x5201) },
1069 { USB_DEVICE(0x2019, 0xab25) }, 1072 { USB_DEVICE(0x2019, 0xab25) },
1070 { USB_DEVICE(0x2019, 0xed06) }, 1073 { USB_DEVICE(0x2019, 0xed06) },
1071 /* Quanta */ 1074 /* Quanta */
@@ -1134,6 +1137,10 @@ static struct usb_device_id rt2800usb_device_table[] = {
1134#ifdef CONFIG_RT2800USB_RT33XX 1137#ifdef CONFIG_RT2800USB_RT33XX
1135 /* Belkin */ 1138 /* Belkin */
1136 { USB_DEVICE(0x050d, 0x945b) }, 1139 { USB_DEVICE(0x050d, 0x945b) },
1140 /* Panasonic */
1141 { USB_DEVICE(0x083a, 0xb511) },
1142 /* Philips */
1143 { USB_DEVICE(0x0471, 0x20dd) },
1137 /* Ralink */ 1144 /* Ralink */
1138 { USB_DEVICE(0x148f, 0x3370) }, 1145 { USB_DEVICE(0x148f, 0x3370) },
1139 { USB_DEVICE(0x148f, 0x8070) }, 1146 { USB_DEVICE(0x148f, 0x8070) },
@@ -1145,6 +1152,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
1145 { USB_DEVICE(0x8516, 0x3572) }, 1152 { USB_DEVICE(0x8516, 0x3572) },
1146 /* Askey */ 1153 /* Askey */
1147 { USB_DEVICE(0x1690, 0x0744) }, 1154 { USB_DEVICE(0x1690, 0x0744) },
1155 { USB_DEVICE(0x1690, 0x0761) },
1156 { USB_DEVICE(0x1690, 0x0764) },
1148 /* Cisco */ 1157 /* Cisco */
1149 { USB_DEVICE(0x167b, 0x4001) }, 1158 { USB_DEVICE(0x167b, 0x4001) },
1150 /* EnGenius */ 1159 /* EnGenius */
@@ -1159,20 +1168,25 @@ static struct usb_device_id rt2800usb_device_table[] = {
1159 /* Sitecom */ 1168 /* Sitecom */
1160 { USB_DEVICE(0x0df6, 0x0041) }, 1169 { USB_DEVICE(0x0df6, 0x0041) },
1161 { USB_DEVICE(0x0df6, 0x0062) }, 1170 { USB_DEVICE(0x0df6, 0x0062) },
1171 { USB_DEVICE(0x0df6, 0x0065) },
1172 { USB_DEVICE(0x0df6, 0x0066) },
1173 { USB_DEVICE(0x0df6, 0x0068) },
1162 /* Toshiba */ 1174 /* Toshiba */
1163 { USB_DEVICE(0x0930, 0x0a07) }, 1175 { USB_DEVICE(0x0930, 0x0a07) },
1164 /* Zinwell */ 1176 /* Zinwell */
1165 { USB_DEVICE(0x5a57, 0x0284) }, 1177 { USB_DEVICE(0x5a57, 0x0284) },
1166#endif 1178#endif
1167#ifdef CONFIG_RT2800USB_RT53XX 1179#ifdef CONFIG_RT2800USB_RT53XX
1168 /* Alpha */
1169 { USB_DEVICE(0x2001, 0x3c15) },
1170 { USB_DEVICE(0x2001, 0x3c19) },
1171 /* Arcadyan */ 1180 /* Arcadyan */
1172 { USB_DEVICE(0x043e, 0x7a12) }, 1181 { USB_DEVICE(0x043e, 0x7a12) },
1173 /* Azurewave */ 1182 /* Azurewave */
1174 { USB_DEVICE(0x13d3, 0x3329) }, 1183 { USB_DEVICE(0x13d3, 0x3329) },
1175 { USB_DEVICE(0x13d3, 0x3365) }, 1184 { USB_DEVICE(0x13d3, 0x3365) },
1185 /* D-Link */
1186 { USB_DEVICE(0x2001, 0x3c15) },
1187 { USB_DEVICE(0x2001, 0x3c19) },
1188 { USB_DEVICE(0x2001, 0x3c1c) },
1189 { USB_DEVICE(0x2001, 0x3c1d) },
1176 /* LG innotek */ 1190 /* LG innotek */
1177 { USB_DEVICE(0x043e, 0x7a22) }, 1191 { USB_DEVICE(0x043e, 0x7a22) },
1178 /* Panasonic */ 1192 /* Panasonic */
@@ -1224,12 +1238,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
1224 { USB_DEVICE(0x07d1, 0x3c0b) }, 1238 { USB_DEVICE(0x07d1, 0x3c0b) },
1225 { USB_DEVICE(0x07d1, 0x3c17) }, 1239 { USB_DEVICE(0x07d1, 0x3c17) },
1226 { USB_DEVICE(0x2001, 0x3c17) }, 1240 { USB_DEVICE(0x2001, 0x3c17) },
1227 /* Edimax */
1228 { USB_DEVICE(0x7392, 0x4085) },
1229 /* Encore */ 1241 /* Encore */
1230 { USB_DEVICE(0x203d, 0x14a1) }, 1242 { USB_DEVICE(0x203d, 0x14a1) },
1231 /* Fujitsu Stylistic 550 */
1232 { USB_DEVICE(0x1690, 0x0761) },
1233 /* Gemtek */ 1243 /* Gemtek */
1234 { USB_DEVICE(0x15a9, 0x0010) }, 1244 { USB_DEVICE(0x15a9, 0x0010) },
1235 /* Gigabyte */ 1245 /* Gigabyte */
@@ -1250,7 +1260,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
1250 { USB_DEVICE(0x05a6, 0x0101) }, 1260 { USB_DEVICE(0x05a6, 0x0101) },
1251 { USB_DEVICE(0x1d4d, 0x0010) }, 1261 { USB_DEVICE(0x1d4d, 0x0010) },
1252 /* Planex */ 1262 /* Planex */
1253 { USB_DEVICE(0x2019, 0x5201) },
1254 { USB_DEVICE(0x2019, 0xab24) }, 1263 { USB_DEVICE(0x2019, 0xab24) },
1255 /* Qcom */ 1264 /* Qcom */
1256 { USB_DEVICE(0x18e8, 0x6259) }, 1265 { USB_DEVICE(0x18e8, 0x6259) },
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 471f87cab4ab..ca36cccaba31 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -692,6 +692,8 @@ enum rt2x00_state_flags {
692 */ 692 */
693 CONFIG_CHANNEL_HT40, 693 CONFIG_CHANNEL_HT40,
694 CONFIG_POWERSAVING, 694 CONFIG_POWERSAVING,
695 CONFIG_HT_DISABLED,
696 CONFIG_QOS_DISABLED,
695 697
696 /* 698 /*
697 * Mark we currently are sequentially reading TX_STA_FIFO register 699 * Mark we currently are sequentially reading TX_STA_FIFO register
@@ -1280,7 +1282,7 @@ void rt2x00lib_dmadone(struct queue_entry *entry);
1280void rt2x00lib_txdone(struct queue_entry *entry, 1282void rt2x00lib_txdone(struct queue_entry *entry,
1281 struct txdone_entry_desc *txdesc); 1283 struct txdone_entry_desc *txdesc);
1282void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status); 1284void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status);
1283void rt2x00lib_rxdone(struct queue_entry *entry); 1285void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp);
1284 1286
1285/* 1287/*
1286 * mac80211 handlers. 1288 * mac80211 handlers.
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 293676bfa571..e7361d913e8e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -217,6 +217,11 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
217 libconf.conf = conf; 217 libconf.conf = conf;
218 218
219 if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) { 219 if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) {
220 if (!conf_is_ht(conf))
221 set_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags);
222 else
223 clear_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags);
224
220 if (conf_is_ht40(conf)) { 225 if (conf_is_ht40(conf)) {
221 set_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags); 226 set_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags);
222 hw_value = rt2x00ht_center_channel(rt2x00dev, conf); 227 hw_value = rt2x00ht_center_channel(rt2x00dev, conf);
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 78787fcc919e..3bb8cafbac59 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -70,6 +70,7 @@ struct rt2x00debug_intf {
70 * - eeprom offset/value files 70 * - eeprom offset/value files
71 * - bbp offset/value files 71 * - bbp offset/value files
72 * - rf offset/value files 72 * - rf offset/value files
73 * - rfcsr offset/value files
73 * - queue folder 74 * - queue folder
74 * - frame dump file 75 * - frame dump file
75 * - queue stats file 76 * - queue stats file
@@ -89,6 +90,8 @@ struct rt2x00debug_intf {
89 struct dentry *bbp_val_entry; 90 struct dentry *bbp_val_entry;
90 struct dentry *rf_off_entry; 91 struct dentry *rf_off_entry;
91 struct dentry *rf_val_entry; 92 struct dentry *rf_val_entry;
93 struct dentry *rfcsr_off_entry;
94 struct dentry *rfcsr_val_entry;
92 struct dentry *queue_folder; 95 struct dentry *queue_folder;
93 struct dentry *queue_frame_dump_entry; 96 struct dentry *queue_frame_dump_entry;
94 struct dentry *queue_stats_entry; 97 struct dentry *queue_stats_entry;
@@ -131,6 +134,7 @@ struct rt2x00debug_intf {
131 unsigned int offset_eeprom; 134 unsigned int offset_eeprom;
132 unsigned int offset_bbp; 135 unsigned int offset_bbp;
133 unsigned int offset_rf; 136 unsigned int offset_rf;
137 unsigned int offset_rfcsr;
134}; 138};
135 139
136void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev, 140void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
@@ -525,6 +529,7 @@ RT2X00DEBUGFS_OPS(csr, "0x%.8x\n", u32);
525RT2X00DEBUGFS_OPS(eeprom, "0x%.4x\n", u16); 529RT2X00DEBUGFS_OPS(eeprom, "0x%.4x\n", u16);
526RT2X00DEBUGFS_OPS(bbp, "0x%.2x\n", u8); 530RT2X00DEBUGFS_OPS(bbp, "0x%.2x\n", u8);
527RT2X00DEBUGFS_OPS(rf, "0x%.8x\n", u32); 531RT2X00DEBUGFS_OPS(rf, "0x%.8x\n", u32);
532RT2X00DEBUGFS_OPS(rfcsr, "0x%.2x\n", u8);
528 533
529static ssize_t rt2x00debug_read_dev_flags(struct file *file, 534static ssize_t rt2x00debug_read_dev_flags(struct file *file,
530 char __user *buf, 535 char __user *buf,
@@ -614,7 +619,7 @@ static struct dentry *rt2x00debug_create_file_chipset(const char *name,
614 const struct rt2x00debug *debug = intf->debug; 619 const struct rt2x00debug *debug = intf->debug;
615 char *data; 620 char *data;
616 621
617 data = kzalloc(8 * MAX_LINE_LENGTH, GFP_KERNEL); 622 data = kzalloc(9 * MAX_LINE_LENGTH, GFP_KERNEL);
618 if (!data) 623 if (!data)
619 return NULL; 624 return NULL;
620 625
@@ -624,22 +629,22 @@ static struct dentry *rt2x00debug_create_file_chipset(const char *name,
624 data += sprintf(data, "revision:\t%04x\n", intf->rt2x00dev->chip.rev); 629 data += sprintf(data, "revision:\t%04x\n", intf->rt2x00dev->chip.rev);
625 data += sprintf(data, "\n"); 630 data += sprintf(data, "\n");
626 data += sprintf(data, "register\tbase\twords\twordsize\n"); 631 data += sprintf(data, "register\tbase\twords\twordsize\n");
627 data += sprintf(data, "csr\t%d\t%d\t%d\n", 632#define RT2X00DEBUGFS_SPRINTF_REGISTER(__name) \
628 debug->csr.word_base, 633{ \
629 debug->csr.word_count, 634 if(debug->__name.read) \
630 debug->csr.word_size); 635 data += sprintf(data, __stringify(__name) \
631 data += sprintf(data, "eeprom\t%d\t%d\t%d\n", 636 "\t%d\t%d\t%d\n", \
632 debug->eeprom.word_base, 637 debug->__name.word_base, \
633 debug->eeprom.word_count, 638 debug->__name.word_count, \
634 debug->eeprom.word_size); 639 debug->__name.word_size); \
635 data += sprintf(data, "bbp\t%d\t%d\t%d\n", 640}
636 debug->bbp.word_base, 641 RT2X00DEBUGFS_SPRINTF_REGISTER(csr);
637 debug->bbp.word_count, 642 RT2X00DEBUGFS_SPRINTF_REGISTER(eeprom);
638 debug->bbp.word_size); 643 RT2X00DEBUGFS_SPRINTF_REGISTER(bbp);
639 data += sprintf(data, "rf\t%d\t%d\t%d\n", 644 RT2X00DEBUGFS_SPRINTF_REGISTER(rf);
640 debug->rf.word_base, 645 RT2X00DEBUGFS_SPRINTF_REGISTER(rfcsr);
641 debug->rf.word_count, 646#undef RT2X00DEBUGFS_SPRINTF_REGISTER
642 debug->rf.word_size); 647
643 blob->size = strlen(blob->data); 648 blob->size = strlen(blob->data);
644 649
645 return debugfs_create_blob(name, S_IRUSR, intf->driver_folder, blob); 650 return debugfs_create_blob(name, S_IRUSR, intf->driver_folder, blob);
@@ -694,31 +699,34 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
694 if (IS_ERR(intf->register_folder) || !intf->register_folder) 699 if (IS_ERR(intf->register_folder) || !intf->register_folder)
695 goto exit; 700 goto exit;
696 701
697#define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \ 702#define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \
698({ \ 703({ \
699 (__intf)->__name##_off_entry = \ 704 if(debug->__name.read) { \
700 debugfs_create_u32(__stringify(__name) "_offset", \ 705 (__intf)->__name##_off_entry = \
701 S_IRUSR | S_IWUSR, \ 706 debugfs_create_u32(__stringify(__name) "_offset", \
702 (__intf)->register_folder, \ 707 S_IRUSR | S_IWUSR, \
703 &(__intf)->offset_##__name); \ 708 (__intf)->register_folder, \
704 if (IS_ERR((__intf)->__name##_off_entry) \ 709 &(__intf)->offset_##__name); \
705 || !(__intf)->__name##_off_entry) \ 710 if (IS_ERR((__intf)->__name##_off_entry) \
706 goto exit; \ 711 || !(__intf)->__name##_off_entry) \
707 \ 712 goto exit; \
708 (__intf)->__name##_val_entry = \ 713 \
709 debugfs_create_file(__stringify(__name) "_value", \ 714 (__intf)->__name##_val_entry = \
710 S_IRUSR | S_IWUSR, \ 715 debugfs_create_file(__stringify(__name) "_value", \
711 (__intf)->register_folder, \ 716 S_IRUSR | S_IWUSR, \
712 (__intf), &rt2x00debug_fop_##__name);\ 717 (__intf)->register_folder, \
713 if (IS_ERR((__intf)->__name##_val_entry) \ 718 (__intf), &rt2x00debug_fop_##__name); \
714 || !(__intf)->__name##_val_entry) \ 719 if (IS_ERR((__intf)->__name##_val_entry) \
715 goto exit; \ 720 || !(__intf)->__name##_val_entry) \
721 goto exit; \
722 } \
716}) 723})
717 724
718 RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, csr); 725 RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, csr);
719 RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, eeprom); 726 RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, eeprom);
720 RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, bbp); 727 RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, bbp);
721 RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, rf); 728 RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, rf);
729 RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, rfcsr);
722 730
723#undef RT2X00DEBUGFS_CREATE_REGISTER_ENTRY 731#undef RT2X00DEBUGFS_CREATE_REGISTER_ENTRY
724 732
@@ -770,6 +778,8 @@ void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev)
770 debugfs_remove(intf->queue_stats_entry); 778 debugfs_remove(intf->queue_stats_entry);
771 debugfs_remove(intf->queue_frame_dump_entry); 779 debugfs_remove(intf->queue_frame_dump_entry);
772 debugfs_remove(intf->queue_folder); 780 debugfs_remove(intf->queue_folder);
781 debugfs_remove(intf->rfcsr_val_entry);
782 debugfs_remove(intf->rfcsr_off_entry);
773 debugfs_remove(intf->rf_val_entry); 783 debugfs_remove(intf->rf_val_entry);
774 debugfs_remove(intf->rf_off_entry); 784 debugfs_remove(intf->rf_off_entry);
775 debugfs_remove(intf->bbp_val_entry); 785 debugfs_remove(intf->bbp_val_entry);
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.h b/drivers/net/wireless/rt2x00/rt2x00debug.h
index fa11409cb5c6..e11d39bdfef7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.h
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.h
@@ -65,6 +65,7 @@ struct rt2x00debug {
65 RT2X00DEBUGFS_REGISTER_ENTRY(eeprom, u16); 65 RT2X00DEBUGFS_REGISTER_ENTRY(eeprom, u16);
66 RT2X00DEBUGFS_REGISTER_ENTRY(bbp, u8); 66 RT2X00DEBUGFS_REGISTER_ENTRY(bbp, u8);
67 RT2X00DEBUGFS_REGISTER_ENTRY(rf, u32); 67 RT2X00DEBUGFS_REGISTER_ENTRY(rf, u32);
68 RT2X00DEBUGFS_REGISTER_ENTRY(rfcsr, u8);
68}; 69};
69 70
70#endif /* RT2X00DEBUG_H */ 71#endif /* RT2X00DEBUG_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 90cc5e772650..e5404e576251 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -391,9 +391,10 @@ void rt2x00lib_txdone(struct queue_entry *entry,
391 tx_info->flags |= IEEE80211_TX_STAT_AMPDU; 391 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
392 tx_info->status.ampdu_len = 1; 392 tx_info->status.ampdu_len = 1;
393 tx_info->status.ampdu_ack_len = success ? 1 : 0; 393 tx_info->status.ampdu_ack_len = success ? 1 : 0;
394 394 /*
395 if (!success) 395 * TODO: Need to tear down BA session here
396 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 396 * if not successful.
397 */
397 } 398 }
398 399
399 if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) { 400 if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
@@ -587,7 +588,7 @@ static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev,
587 return 0; 588 return 0;
588} 589}
589 590
590void rt2x00lib_rxdone(struct queue_entry *entry) 591void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp)
591{ 592{
592 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 593 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
593 struct rxdone_entry_desc rxdesc; 594 struct rxdone_entry_desc rxdesc;
@@ -607,7 +608,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry)
607 * Allocate a new sk_buffer. If no new buffer available, drop the 608 * Allocate a new sk_buffer. If no new buffer available, drop the
608 * received frame and reuse the existing buffer. 609 * received frame and reuse the existing buffer.
609 */ 610 */
610 skb = rt2x00queue_alloc_rxskb(entry); 611 skb = rt2x00queue_alloc_rxskb(entry, gfp);
611 if (!skb) 612 if (!skb)
612 goto submit_entry; 613 goto submit_entry;
613 614
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.c b/drivers/net/wireless/rt2x00/rt2x00leds.c
index ca585e34d00e..8679d781a264 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.c
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.c
@@ -124,17 +124,15 @@ static int rt2x00leds_register_led(struct rt2x00_dev *rt2x00dev,
124 124
125void rt2x00leds_register(struct rt2x00_dev *rt2x00dev) 125void rt2x00leds_register(struct rt2x00_dev *rt2x00dev)
126{ 126{
127 char dev_name[16]; 127 char name[36];
128 char name[32];
129 int retval; 128 int retval;
130 unsigned long on_period; 129 unsigned long on_period;
131 unsigned long off_period; 130 unsigned long off_period;
132 131 const char *phy_name = wiphy_name(rt2x00dev->hw->wiphy);
133 snprintf(dev_name, sizeof(dev_name), "%s-%s",
134 rt2x00dev->ops->name, wiphy_name(rt2x00dev->hw->wiphy));
135 132
136 if (rt2x00dev->led_radio.flags & LED_INITIALIZED) { 133 if (rt2x00dev->led_radio.flags & LED_INITIALIZED) {
137 snprintf(name, sizeof(name), "%s::radio", dev_name); 134 snprintf(name, sizeof(name), "%s-%s::radio",
135 rt2x00dev->ops->name, phy_name);
138 136
139 retval = rt2x00leds_register_led(rt2x00dev, 137 retval = rt2x00leds_register_led(rt2x00dev,
140 &rt2x00dev->led_radio, 138 &rt2x00dev->led_radio,
@@ -144,7 +142,8 @@ void rt2x00leds_register(struct rt2x00_dev *rt2x00dev)
144 } 142 }
145 143
146 if (rt2x00dev->led_assoc.flags & LED_INITIALIZED) { 144 if (rt2x00dev->led_assoc.flags & LED_INITIALIZED) {
147 snprintf(name, sizeof(name), "%s::assoc", dev_name); 145 snprintf(name, sizeof(name), "%s-%s::assoc",
146 rt2x00dev->ops->name, phy_name);
148 147
149 retval = rt2x00leds_register_led(rt2x00dev, 148 retval = rt2x00leds_register_led(rt2x00dev,
150 &rt2x00dev->led_assoc, 149 &rt2x00dev->led_assoc,
@@ -154,7 +153,8 @@ void rt2x00leds_register(struct rt2x00_dev *rt2x00dev)
154 } 153 }
155 154
156 if (rt2x00dev->led_qual.flags & LED_INITIALIZED) { 155 if (rt2x00dev->led_qual.flags & LED_INITIALIZED) {
157 snprintf(name, sizeof(name), "%s::quality", dev_name); 156 snprintf(name, sizeof(name), "%s-%s::quality",
157 rt2x00dev->ops->name, phy_name);
158 158
159 retval = rt2x00leds_register_led(rt2x00dev, 159 retval = rt2x00leds_register_led(rt2x00dev,
160 &rt2x00dev->led_qual, 160 &rt2x00dev->led_qual,
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 78bd43b8961f..a0935987fa3a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -103,7 +103,7 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
103 * rt2x00queue_alloc_rxskb - allocate a skb for RX purposes. 103 * rt2x00queue_alloc_rxskb - allocate a skb for RX purposes.
104 * @entry: The entry for which the skb will be applicable. 104 * @entry: The entry for which the skb will be applicable.
105 */ 105 */
106struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry); 106struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp);
107 107
108/** 108/**
109 * rt2x00queue_free_skb - free a skb 109 * rt2x00queue_free_skb - free a skb
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 2df2eb6d3e06..b49773ef72f2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -709,9 +709,19 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
709 rt2x00dev->intf_associated--; 709 rt2x00dev->intf_associated--;
710 710
711 rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated); 711 rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
712
713 clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
712 } 714 }
713 715
714 /* 716 /*
717 * Check for access point which do not support 802.11e . We have to
718 * generate data frames sequence number in S/W for such AP, because
719 * of H/W bug.
720 */
721 if (changes & BSS_CHANGED_QOS && !bss_conf->qos)
722 set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
723
724 /*
715 * When the erp information has changed, we should perform 725 * When the erp information has changed, we should perform
716 * additional configuration steps. For all other changes we are done. 726 * additional configuration steps. For all other changes we are done.
717 */ 727 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 17148bb24426..0a4653a92cab 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -92,7 +92,7 @@ bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
92 /* 92 /*
93 * Send the frame to rt2x00lib for further processing. 93 * Send the frame to rt2x00lib for further processing.
94 */ 94 */
95 rt2x00lib_rxdone(entry); 95 rt2x00lib_rxdone(entry, GFP_ATOMIC);
96 } 96 }
97 97
98 return !max_rx; 98 return !max_rx;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 9b1b2b7a7807..4c662eccf53c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -33,7 +33,7 @@
33#include "rt2x00.h" 33#include "rt2x00.h"
34#include "rt2x00lib.h" 34#include "rt2x00lib.h"
35 35
36struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry) 36struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
37{ 37{
38 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 38 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
39 struct sk_buff *skb; 39 struct sk_buff *skb;
@@ -68,7 +68,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
68 /* 68 /*
69 * Allocate skbuffer. 69 * Allocate skbuffer.
70 */ 70 */
71 skb = dev_alloc_skb(frame_size + head_size + tail_size); 71 skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp);
72 if (!skb) 72 if (!skb)
73 return NULL; 73 return NULL;
74 74
@@ -213,8 +213,19 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
213 213
214 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 214 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
215 215
216 if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) 216 if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) {
217 return; 217 /*
218 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
219 * seqno on retransmited data (non-QOS) frames. To workaround
220 * the problem let's generate seqno in software if QOS is
221 * disabled.
222 */
223 if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
224 __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
225 else
226 /* H/W will generate sequence number */
227 return;
228 }
218 229
219 /* 230 /*
220 * The hardware is not able to insert a sequence number. Assign a 231 * The hardware is not able to insert a sequence number. Assign a
@@ -320,14 +331,6 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
320 txdesc->u.ht.wcid = sta_priv->wcid; 331 txdesc->u.ht.wcid = sta_priv->wcid;
321 } 332 }
322 333
323 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
324
325 /*
326 * Only one STBC stream is supported for now.
327 */
328 if (tx_info->flags & IEEE80211_TX_CTL_STBC)
329 txdesc->u.ht.stbc = 1;
330
331 /* 334 /*
332 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the 335 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
333 * mcs rate to be used 336 * mcs rate to be used
@@ -351,6 +354,24 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
351 txdesc->u.ht.mcs |= 0x08; 354 txdesc->u.ht.mcs |= 0x08;
352 } 355 }
353 356
357 if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) {
358 if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
359 txdesc->u.ht.txop = TXOP_SIFS;
360 else
361 txdesc->u.ht.txop = TXOP_BACKOFF;
362
363 /* Left zero on all other settings. */
364 return;
365 }
366
367 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
368
369 /*
370 * Only one STBC stream is supported for now.
371 */
372 if (tx_info->flags & IEEE80211_TX_CTL_STBC)
373 txdesc->u.ht.stbc = 1;
374
354 /* 375 /*
355 * This frame is eligible for an AMPDU, however, don't aggregate 376 * This frame is eligible for an AMPDU, however, don't aggregate
356 * frames that are intended to probe a specific tx rate. 377 * frames that are intended to probe a specific tx rate.
@@ -1142,7 +1163,7 @@ static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
1142 struct sk_buff *skb; 1163 struct sk_buff *skb;
1143 1164
1144 for (i = 0; i < queue->limit; i++) { 1165 for (i = 0; i < queue->limit; i++) {
1145 skb = rt2x00queue_alloc_rxskb(&queue->entries[i]); 1166 skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL);
1146 if (!skb) 1167 if (!skb)
1147 return -ENOMEM; 1168 return -ENOMEM;
1148 queue->entries[i].skb = skb; 1169 queue->entries[i].skb = skb;
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 66094eb21b61..d357d1ed92f6 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -358,7 +358,7 @@ static void rt2x00usb_work_rxdone(struct work_struct *work)
358 /* 358 /*
359 * Send the frame to rt2x00lib for further processing. 359 * Send the frame to rt2x00lib for further processing.
360 */ 360 */
361 rt2x00lib_rxdone(entry); 361 rt2x00lib_rxdone(entry, GFP_KERNEL);
362 } 362 }
363} 363}
364 364
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index e0c6d117429d..ee22bd74579d 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -3092,15 +3092,4 @@ static struct pci_driver rt61pci_driver = {
3092 .resume = rt2x00pci_resume, 3092 .resume = rt2x00pci_resume,
3093}; 3093};
3094 3094
3095static int __init rt61pci_init(void) 3095module_pci_driver(rt61pci_driver);
3096{
3097 return pci_register_driver(&rt61pci_driver);
3098}
3099
3100static void __exit rt61pci_exit(void)
3101{
3102 pci_unregister_driver(&rt61pci_driver);
3103}
3104
3105module_init(rt61pci_init);
3106module_exit(rt61pci_exit);
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index e477a964081d..155136691a38 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2412,6 +2412,7 @@ static struct usb_device_id rt73usb_device_table[] = {
2412 { USB_DEVICE(0x0b05, 0x1723) }, 2412 { USB_DEVICE(0x0b05, 0x1723) },
2413 { USB_DEVICE(0x0b05, 0x1724) }, 2413 { USB_DEVICE(0x0b05, 0x1724) },
2414 /* Belkin */ 2414 /* Belkin */
2415 { USB_DEVICE(0x050d, 0x7050) }, /* FCC ID: K7SF5D7050B ver. 3.x */
2415 { USB_DEVICE(0x050d, 0x705a) }, 2416 { USB_DEVICE(0x050d, 0x705a) },
2416 { USB_DEVICE(0x050d, 0x905b) }, 2417 { USB_DEVICE(0x050d, 0x905b) },
2417 { USB_DEVICE(0x050d, 0x905c) }, 2418 { USB_DEVICE(0x050d, 0x905c) },
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 2f14a5fb0cbb..2bebcb71a1e9 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -1173,15 +1173,4 @@ static struct pci_driver rtl8180_driver = {
1173#endif /* CONFIG_PM */ 1173#endif /* CONFIG_PM */
1174}; 1174};
1175 1175
1176static int __init rtl8180_init(void) 1176module_pci_driver(rtl8180_driver);
1177{
1178 return pci_register_driver(&rtl8180_driver);
1179}
1180
1181static void __exit rtl8180_exit(void)
1182{
1183 pci_unregister_driver(&rtl8180_driver);
1184}
1185
1186module_init(rtl8180_init);
1187module_exit(rtl8180_exit);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index cf53ac9d6f23..d8114962b0c9 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -294,6 +294,7 @@ static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
294 hdr->retry = cpu_to_le32((info->control.rates[0].count - 1) << 8); 294 hdr->retry = cpu_to_le32((info->control.rates[0].count - 1) << 8);
295 hdr->tx_duration = 295 hdr->tx_duration =
296 ieee80211_generic_frame_duration(dev, priv->vif, 296 ieee80211_generic_frame_duration(dev, priv->vif,
297 info->band,
297 skb->len, txrate); 298 skb->len, txrate);
298 buf = hdr; 299 buf = hdr;
299 300
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index e54488db0e10..f4c852c6749b 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1460,7 +1460,7 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
1460 return; 1460 return;
1461 1461
1462 /* and only beacons from the associated BSSID, please */ 1462 /* and only beacons from the associated BSSID, please */
1463 if (compare_ether_addr(hdr->addr3, rtlpriv->mac80211.bssid)) 1463 if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
1464 return; 1464 return;
1465 1465
1466 if (rtl_find_221_ie(hw, data, len)) 1466 if (rtl_find_221_ie(hw, data, len))
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
index 5c7d57947d23..3d8cc4a0c86d 100644
--- a/drivers/net/wireless/rtlwifi/cam.c
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -328,10 +328,9 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
328 RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, "sta_addr is NULL\n"); 328 RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, "sta_addr is NULL\n");
329 } 329 }
330 330
331 if ((sta_addr[0]|sta_addr[1]|sta_addr[2]|sta_addr[3]|\ 331 if (is_zero_ether_addr(sta_addr)) {
332 sta_addr[4]|sta_addr[5]) == 0) {
333 RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, 332 RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
334 "sta_addr is 00:00:00:00:00:00\n"); 333 "sta_addr is %pM\n", sta_addr);
335 return; 334 return;
336 } 335 }
337 /* Does STA already exist? */ 336 /* Does STA already exist? */
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 288b035a3579..2062ea1d7c80 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -34,6 +34,7 @@
34#include "ps.h" 34#include "ps.h"
35#include "efuse.h" 35#include "efuse.h"
36#include <linux/export.h> 36#include <linux/export.h>
37#include <linux/kmemleak.h>
37 38
38static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = { 39static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
39 PCI_VENDOR_ID_INTEL, 40 PCI_VENDOR_ID_INTEL,
@@ -1099,6 +1100,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
1099 u32 bufferaddress; 1100 u32 bufferaddress;
1100 if (!skb) 1101 if (!skb)
1101 return 0; 1102 return 0;
1103 kmemleak_not_leak(skb);
1102 entry = &rtlpci->rx_ring[rx_queue_idx].desc[i]; 1104 entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
1103 1105
1104 /*skb->dev = dev; */ 1106 /*skb->dev = dev; */
@@ -1851,14 +1853,6 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
1851 /*like read eeprom and so on */ 1853 /*like read eeprom and so on */
1852 rtlpriv->cfg->ops->read_eeprom_info(hw); 1854 rtlpriv->cfg->ops->read_eeprom_info(hw);
1853 1855
1854 if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
1855 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
1856 err = -ENODEV;
1857 goto fail3;
1858 }
1859
1860 rtlpriv->cfg->ops->init_sw_leds(hw);
1861
1862 /*aspm */ 1856 /*aspm */
1863 rtl_pci_init_aspm(hw); 1857 rtl_pci_init_aspm(hw);
1864 1858
@@ -1877,6 +1871,14 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
1877 goto fail3; 1871 goto fail3;
1878 } 1872 }
1879 1873
1874 if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
1875 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
1876 err = -ENODEV;
1877 goto fail3;
1878 }
1879
1880 rtlpriv->cfg->ops->init_sw_leds(hw);
1881
1880 err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group); 1882 err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
1881 if (err) { 1883 if (err) {
1882 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 1884 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
@@ -1941,6 +1943,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
1941 rtl_deinit_deferred_work(hw); 1943 rtl_deinit_deferred_work(hw);
1942 rtlpriv->intf_ops->adapter_stop(hw); 1944 rtlpriv->intf_ops->adapter_stop(hw);
1943 } 1945 }
1946 rtlpriv->cfg->ops->disable_interrupt(hw);
1944 1947
1945 /*deinit rfkill */ 1948 /*deinit rfkill */
1946 rtl_deinit_rfkill(hw); 1949 rtl_deinit_rfkill(hw);
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 5b9c3b5e8c92..5ae26647f340 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -480,7 +480,7 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
480 return; 480 return;
481 481
482 /* and only beacons from the associated BSSID, please */ 482 /* and only beacons from the associated BSSID, please */
483 if (compare_ether_addr(hdr->addr3, rtlpriv->mac80211.bssid)) 483 if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
484 return; 484 return;
485 485
486 rtlpriv->psc.last_beacon = jiffies; 486 rtlpriv->psc.last_beacon = jiffies;
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
index c66f08a0524a..d5cbf01da8ac 100644
--- a/drivers/net/wireless/rtlwifi/rc.c
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -225,8 +225,7 @@ static void rtl_rate_init(void *ppriv,
225static void rtl_rate_update(void *ppriv, 225static void rtl_rate_update(void *ppriv,
226 struct ieee80211_supported_band *sband, 226 struct ieee80211_supported_band *sband,
227 struct ieee80211_sta *sta, void *priv_sta, 227 struct ieee80211_sta *sta, void *priv_sta,
228 u32 changed, 228 u32 changed)
229 enum nl80211_channel_type oper_chan_type)
230{ 229{
231} 230}
232 231
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index 1208b753f62f..f7f48c7ac854 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -33,9 +33,6 @@
33#include "../pci.h" 33#include "../pci.h"
34#include "../base.h" 34#include "../base.h"
35 35
36struct dig_t dm_digtable;
37static struct ps_t dm_pstable;
38
39#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1) 36#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1)
40#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1) 37#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1)
41#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1) 38#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1)
@@ -163,33 +160,37 @@ static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
163 160
164static void rtl92c_dm_diginit(struct ieee80211_hw *hw) 161static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
165{ 162{
166 dm_digtable.dig_enable_flag = true; 163 struct rtl_priv *rtlpriv = rtl_priv(hw);
167 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 164 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
168 dm_digtable.cur_igvalue = 0x20; 165
169 dm_digtable.pre_igvalue = 0x0; 166 dm_digtable->dig_enable_flag = true;
170 dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT; 167 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
171 dm_digtable.presta_connectstate = DIG_STA_DISCONNECT; 168 dm_digtable->cur_igvalue = 0x20;
172 dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT; 169 dm_digtable->pre_igvalue = 0x0;
173 dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW; 170 dm_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
174 dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH; 171 dm_digtable->presta_connectstate = DIG_STA_DISCONNECT;
175 dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW; 172 dm_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
176 dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH; 173 dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
177 dm_digtable.rx_gain_range_max = DM_DIG_MAX; 174 dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
178 dm_digtable.rx_gain_range_min = DM_DIG_MIN; 175 dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
179 dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT; 176 dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
180 dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX; 177 dm_digtable->rx_gain_range_max = DM_DIG_MAX;
181 dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN; 178 dm_digtable->rx_gain_range_min = DM_DIG_MIN;
182 dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX; 179 dm_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT;
183 dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX; 180 dm_digtable->backoff_val_range_max = DM_DIG_BACKOFF_MAX;
181 dm_digtable->backoff_val_range_min = DM_DIG_BACKOFF_MIN;
182 dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX;
183 dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
184} 184}
185 185
186static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw) 186static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
187{ 187{
188 struct rtl_priv *rtlpriv = rtl_priv(hw); 188 struct rtl_priv *rtlpriv = rtl_priv(hw);
189 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
189 long rssi_val_min = 0; 190 long rssi_val_min = 0;
190 191
191 if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) && 192 if ((dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
192 (dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) { 193 (dm_digtable->cursta_connectctate == DIG_STA_CONNECT)) {
193 if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0) 194 if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
194 rssi_val_min = 195 rssi_val_min =
195 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb > 196 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
@@ -198,10 +199,10 @@ static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
198 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; 199 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
199 else 200 else
200 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb; 201 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
201 } else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT || 202 } else if (dm_digtable->cursta_connectctate == DIG_STA_CONNECT ||
202 dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) { 203 dm_digtable->cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
203 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb; 204 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
204 } else if (dm_digtable.curmultista_connectstate == 205 } else if (dm_digtable->curmultista_connectstate ==
205 DIG_MULTISTA_CONNECT) { 206 DIG_MULTISTA_CONNECT) {
206 rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; 207 rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
207 } 208 }
@@ -260,7 +261,8 @@ static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
260static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw) 261static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
261{ 262{
262 struct rtl_priv *rtlpriv = rtl_priv(hw); 263 struct rtl_priv *rtlpriv = rtl_priv(hw);
263 u8 value_igi = dm_digtable.cur_igvalue; 264 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
265 u8 value_igi = dm_digtable->cur_igvalue;
264 266
265 if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0) 267 if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
266 value_igi--; 268 value_igi--;
@@ -277,43 +279,44 @@ static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
277 if (rtlpriv->falsealm_cnt.cnt_all > 10000) 279 if (rtlpriv->falsealm_cnt.cnt_all > 10000)
278 value_igi = 0x32; 280 value_igi = 0x32;
279 281
280 dm_digtable.cur_igvalue = value_igi; 282 dm_digtable->cur_igvalue = value_igi;
281 rtl92c_dm_write_dig(hw); 283 rtl92c_dm_write_dig(hw);
282} 284}
283 285
284static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw) 286static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
285{ 287{
286 struct rtl_priv *rtlpriv = rtl_priv(hw); 288 struct rtl_priv *rtlpriv = rtl_priv(hw);
289 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
287 290
288 if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) { 291 if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable->fa_highthresh) {
289 if ((dm_digtable.backoff_val - 2) < 292 if ((dm_digtable->backoff_val - 2) <
290 dm_digtable.backoff_val_range_min) 293 dm_digtable->backoff_val_range_min)
291 dm_digtable.backoff_val = 294 dm_digtable->backoff_val =
292 dm_digtable.backoff_val_range_min; 295 dm_digtable->backoff_val_range_min;
293 else 296 else
294 dm_digtable.backoff_val -= 2; 297 dm_digtable->backoff_val -= 2;
295 } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) { 298 } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable->fa_lowthresh) {
296 if ((dm_digtable.backoff_val + 2) > 299 if ((dm_digtable->backoff_val + 2) >
297 dm_digtable.backoff_val_range_max) 300 dm_digtable->backoff_val_range_max)
298 dm_digtable.backoff_val = 301 dm_digtable->backoff_val =
299 dm_digtable.backoff_val_range_max; 302 dm_digtable->backoff_val_range_max;
300 else 303 else
301 dm_digtable.backoff_val += 2; 304 dm_digtable->backoff_val += 2;
302 } 305 }
303 306
304 if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) > 307 if ((dm_digtable->rssi_val_min + 10 - dm_digtable->backoff_val) >
305 dm_digtable.rx_gain_range_max) 308 dm_digtable->rx_gain_range_max)
306 dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max; 309 dm_digtable->cur_igvalue = dm_digtable->rx_gain_range_max;
307 else if ((dm_digtable.rssi_val_min + 10 - 310 else if ((dm_digtable->rssi_val_min + 10 -
308 dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min) 311 dm_digtable->backoff_val) < dm_digtable->rx_gain_range_min)
309 dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min; 312 dm_digtable->cur_igvalue = dm_digtable->rx_gain_range_min;
310 else 313 else
311 dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 - 314 dm_digtable->cur_igvalue = dm_digtable->rssi_val_min + 10 -
312 dm_digtable.backoff_val; 315 dm_digtable->backoff_val;
313 316
314 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, 317 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
315 "rssi_val_min = %x backoff_val %x\n", 318 "rssi_val_min = %x backoff_val %x\n",
316 dm_digtable.rssi_val_min, dm_digtable.backoff_val); 319 dm_digtable->rssi_val_min, dm_digtable->backoff_val);
317 320
318 rtl92c_dm_write_dig(hw); 321 rtl92c_dm_write_dig(hw);
319} 322}
@@ -322,6 +325,7 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
322{ 325{
323 static u8 initialized; /* initialized to false */ 326 static u8 initialized; /* initialized to false */
324 struct rtl_priv *rtlpriv = rtl_priv(hw); 327 struct rtl_priv *rtlpriv = rtl_priv(hw);
328 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
325 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 329 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
326 long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; 330 long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
327 bool multi_sta = false; 331 bool multi_sta = false;
@@ -330,68 +334,69 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
330 multi_sta = true; 334 multi_sta = true;
331 335
332 if (!multi_sta || 336 if (!multi_sta ||
333 dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) { 337 dm_digtable->cursta_connectctate != DIG_STA_DISCONNECT) {
334 initialized = false; 338 initialized = false;
335 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 339 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
336 return; 340 return;
337 } else if (initialized == false) { 341 } else if (initialized == false) {
338 initialized = true; 342 initialized = true;
339 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; 343 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
340 dm_digtable.cur_igvalue = 0x20; 344 dm_digtable->cur_igvalue = 0x20;
341 rtl92c_dm_write_dig(hw); 345 rtl92c_dm_write_dig(hw);
342 } 346 }
343 347
344 if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) { 348 if (dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) {
345 if ((rssi_strength < dm_digtable.rssi_lowthresh) && 349 if ((rssi_strength < dm_digtable->rssi_lowthresh) &&
346 (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) { 350 (dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
347 351
348 if (dm_digtable.dig_ext_port_stage == 352 if (dm_digtable->dig_ext_port_stage ==
349 DIG_EXT_PORT_STAGE_2) { 353 DIG_EXT_PORT_STAGE_2) {
350 dm_digtable.cur_igvalue = 0x20; 354 dm_digtable->cur_igvalue = 0x20;
351 rtl92c_dm_write_dig(hw); 355 rtl92c_dm_write_dig(hw);
352 } 356 }
353 357
354 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1; 358 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_1;
355 } else if (rssi_strength > dm_digtable.rssi_highthresh) { 359 } else if (rssi_strength > dm_digtable->rssi_highthresh) {
356 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2; 360 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_2;
357 rtl92c_dm_ctrl_initgain_by_fa(hw); 361 rtl92c_dm_ctrl_initgain_by_fa(hw);
358 } 362 }
359 } else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) { 363 } else if (dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) {
360 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; 364 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
361 dm_digtable.cur_igvalue = 0x20; 365 dm_digtable->cur_igvalue = 0x20;
362 rtl92c_dm_write_dig(hw); 366 rtl92c_dm_write_dig(hw);
363 } 367 }
364 368
365 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, 369 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
366 "curmultista_connectstate = %x dig_ext_port_stage %x\n", 370 "curmultista_connectstate = %x dig_ext_port_stage %x\n",
367 dm_digtable.curmultista_connectstate, 371 dm_digtable->curmultista_connectstate,
368 dm_digtable.dig_ext_port_stage); 372 dm_digtable->dig_ext_port_stage);
369} 373}
370 374
371static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw) 375static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
372{ 376{
373 struct rtl_priv *rtlpriv = rtl_priv(hw); 377 struct rtl_priv *rtlpriv = rtl_priv(hw);
378 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
374 379
375 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, 380 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
376 "presta_connectstate = %x, cursta_connectctate = %x\n", 381 "presta_connectstate = %x, cursta_connectctate = %x\n",
377 dm_digtable.presta_connectstate, 382 dm_digtable->presta_connectstate,
378 dm_digtable.cursta_connectctate); 383 dm_digtable->cursta_connectctate);
379 384
380 if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate 385 if (dm_digtable->presta_connectstate == dm_digtable->cursta_connectctate
381 || dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT 386 || dm_digtable->cursta_connectctate == DIG_STA_BEFORE_CONNECT
382 || dm_digtable.cursta_connectctate == DIG_STA_CONNECT) { 387 || dm_digtable->cursta_connectctate == DIG_STA_CONNECT) {
383 388
384 if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) { 389 if (dm_digtable->cursta_connectctate != DIG_STA_DISCONNECT) {
385 dm_digtable.rssi_val_min = 390 dm_digtable->rssi_val_min =
386 rtl92c_dm_initial_gain_min_pwdb(hw); 391 rtl92c_dm_initial_gain_min_pwdb(hw);
387 rtl92c_dm_ctrl_initgain_by_rssi(hw); 392 rtl92c_dm_ctrl_initgain_by_rssi(hw);
388 } 393 }
389 } else { 394 } else {
390 dm_digtable.rssi_val_min = 0; 395 dm_digtable->rssi_val_min = 0;
391 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 396 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
392 dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT; 397 dm_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT;
393 dm_digtable.cur_igvalue = 0x20; 398 dm_digtable->cur_igvalue = 0x20;
394 dm_digtable.pre_igvalue = 0; 399 dm_digtable->pre_igvalue = 0;
395 rtl92c_dm_write_dig(hw); 400 rtl92c_dm_write_dig(hw);
396 } 401 }
397} 402}
@@ -400,40 +405,41 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
400{ 405{
401 struct rtl_priv *rtlpriv = rtl_priv(hw); 406 struct rtl_priv *rtlpriv = rtl_priv(hw);
402 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 407 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
408 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
403 409
404 if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) { 410 if (dm_digtable->cursta_connectctate == DIG_STA_CONNECT) {
405 dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw); 411 dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
406 412
407 if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) { 413 if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
408 if (dm_digtable.rssi_val_min <= 25) 414 if (dm_digtable->rssi_val_min <= 25)
409 dm_digtable.cur_cck_pd_state = 415 dm_digtable->cur_cck_pd_state =
410 CCK_PD_STAGE_LowRssi; 416 CCK_PD_STAGE_LowRssi;
411 else 417 else
412 dm_digtable.cur_cck_pd_state = 418 dm_digtable->cur_cck_pd_state =
413 CCK_PD_STAGE_HighRssi; 419 CCK_PD_STAGE_HighRssi;
414 } else { 420 } else {
415 if (dm_digtable.rssi_val_min <= 20) 421 if (dm_digtable->rssi_val_min <= 20)
416 dm_digtable.cur_cck_pd_state = 422 dm_digtable->cur_cck_pd_state =
417 CCK_PD_STAGE_LowRssi; 423 CCK_PD_STAGE_LowRssi;
418 else 424 else
419 dm_digtable.cur_cck_pd_state = 425 dm_digtable->cur_cck_pd_state =
420 CCK_PD_STAGE_HighRssi; 426 CCK_PD_STAGE_HighRssi;
421 } 427 }
422 } else { 428 } else {
423 dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX; 429 dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
424 } 430 }
425 431
426 if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) { 432 if (dm_digtable->pre_cck_pd_state != dm_digtable->cur_cck_pd_state) {
427 if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) { 433 if (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
428 if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800) 434 if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
429 dm_digtable.cur_cck_fa_state = 435 dm_digtable->cur_cck_fa_state =
430 CCK_FA_STAGE_High; 436 CCK_FA_STAGE_High;
431 else 437 else
432 dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low; 438 dm_digtable->cur_cck_fa_state = CCK_FA_STAGE_Low;
433 439
434 if (dm_digtable.pre_cck_fa_state != 440 if (dm_digtable->pre_cck_fa_state !=
435 dm_digtable.cur_cck_fa_state) { 441 dm_digtable->cur_cck_fa_state) {
436 if (dm_digtable.cur_cck_fa_state == 442 if (dm_digtable->cur_cck_fa_state ==
437 CCK_FA_STAGE_Low) 443 CCK_FA_STAGE_Low)
438 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 444 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
439 0x83); 445 0x83);
@@ -441,8 +447,8 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
441 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 447 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
442 0xcd); 448 0xcd);
443 449
444 dm_digtable.pre_cck_fa_state = 450 dm_digtable->pre_cck_fa_state =
445 dm_digtable.cur_cck_fa_state; 451 dm_digtable->cur_cck_fa_state;
446 } 452 }
447 453
448 rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40); 454 rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
@@ -458,11 +464,11 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
458 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 464 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
459 MASKBYTE2, 0xd3); 465 MASKBYTE2, 0xd3);
460 } 466 }
461 dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state; 467 dm_digtable->pre_cck_pd_state = dm_digtable->cur_cck_pd_state;
462 } 468 }
463 469
464 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "CCKPDStage=%x\n", 470 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "CCKPDStage=%x\n",
465 dm_digtable.cur_cck_pd_state); 471 dm_digtable->cur_cck_pd_state);
466 472
467 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "is92C=%x\n", 473 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "is92C=%x\n",
468 IS_92C_SERIAL(rtlhal->version)); 474 IS_92C_SERIAL(rtlhal->version));
@@ -470,31 +476,34 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
470 476
471static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw) 477static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
472{ 478{
479 struct rtl_priv *rtlpriv = rtl_priv(hw);
480 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
473 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 481 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
474 482
475 if (mac->act_scanning) 483 if (mac->act_scanning)
476 return; 484 return;
477 485
478 if (mac->link_state >= MAC80211_LINKED) 486 if (mac->link_state >= MAC80211_LINKED)
479 dm_digtable.cursta_connectctate = DIG_STA_CONNECT; 487 dm_digtable->cursta_connectctate = DIG_STA_CONNECT;
480 else 488 else
481 dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT; 489 dm_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
482 490
483 rtl92c_dm_initial_gain_sta(hw); 491 rtl92c_dm_initial_gain_sta(hw);
484 rtl92c_dm_initial_gain_multi_sta(hw); 492 rtl92c_dm_initial_gain_multi_sta(hw);
485 rtl92c_dm_cck_packet_detection_thresh(hw); 493 rtl92c_dm_cck_packet_detection_thresh(hw);
486 494
487 dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate; 495 dm_digtable->presta_connectstate = dm_digtable->cursta_connectctate;
488 496
489} 497}
490 498
491static void rtl92c_dm_dig(struct ieee80211_hw *hw) 499static void rtl92c_dm_dig(struct ieee80211_hw *hw)
492{ 500{
493 struct rtl_priv *rtlpriv = rtl_priv(hw); 501 struct rtl_priv *rtlpriv = rtl_priv(hw);
502 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
494 503
495 if (rtlpriv->dm.dm_initialgain_enable == false) 504 if (rtlpriv->dm.dm_initialgain_enable == false)
496 return; 505 return;
497 if (dm_digtable.dig_enable_flag == false) 506 if (dm_digtable->dig_enable_flag == false)
498 return; 507 return;
499 508
500 rtl92c_dm_ctrl_initgain_by_twoport(hw); 509 rtl92c_dm_ctrl_initgain_by_twoport(hw);
@@ -514,23 +523,24 @@ static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
514void rtl92c_dm_write_dig(struct ieee80211_hw *hw) 523void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
515{ 524{
516 struct rtl_priv *rtlpriv = rtl_priv(hw); 525 struct rtl_priv *rtlpriv = rtl_priv(hw);
526 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
517 527
518 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 528 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
519 "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n", 529 "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n",
520 dm_digtable.cur_igvalue, dm_digtable.pre_igvalue, 530 dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
521 dm_digtable.backoff_val); 531 dm_digtable->backoff_val);
522 532
523 dm_digtable.cur_igvalue += 2; 533 dm_digtable->cur_igvalue += 2;
524 if (dm_digtable.cur_igvalue > 0x3f) 534 if (dm_digtable->cur_igvalue > 0x3f)
525 dm_digtable.cur_igvalue = 0x3f; 535 dm_digtable->cur_igvalue = 0x3f;
526 536
527 if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) { 537 if (dm_digtable->pre_igvalue != dm_digtable->cur_igvalue) {
528 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, 538 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
529 dm_digtable.cur_igvalue); 539 dm_digtable->cur_igvalue);
530 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, 540 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
531 dm_digtable.cur_igvalue); 541 dm_digtable->cur_igvalue);
532 542
533 dm_digtable.pre_igvalue = dm_digtable.cur_igvalue; 543 dm_digtable->pre_igvalue = dm_digtable->cur_igvalue;
534 } 544 }
535} 545}
536EXPORT_SYMBOL(rtl92c_dm_write_dig); 546EXPORT_SYMBOL(rtl92c_dm_write_dig);
@@ -1223,15 +1233,20 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
1223 1233
1224static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw) 1234static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1225{ 1235{
1226 dm_pstable.pre_ccastate = CCA_MAX; 1236 struct rtl_priv *rtlpriv = rtl_priv(hw);
1227 dm_pstable.cur_ccasate = CCA_MAX; 1237 struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
1228 dm_pstable.pre_rfstate = RF_MAX; 1238
1229 dm_pstable.cur_rfstate = RF_MAX; 1239 dm_pstable->pre_ccastate = CCA_MAX;
1230 dm_pstable.rssi_val_min = 0; 1240 dm_pstable->cur_ccasate = CCA_MAX;
1241 dm_pstable->pre_rfstate = RF_MAX;
1242 dm_pstable->cur_rfstate = RF_MAX;
1243 dm_pstable->rssi_val_min = 0;
1231} 1244}
1232 1245
1233void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal) 1246void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
1234{ 1247{
1248 struct rtl_priv *rtlpriv = rtl_priv(hw);
1249 struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
1235 static u8 initialize; 1250 static u8 initialize;
1236 static u32 reg_874, reg_c70, reg_85c, reg_a74; 1251 static u32 reg_874, reg_c70, reg_85c, reg_a74;
1237 1252
@@ -1251,27 +1266,27 @@ void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
1251 } 1266 }
1252 1267
1253 if (!bforce_in_normal) { 1268 if (!bforce_in_normal) {
1254 if (dm_pstable.rssi_val_min != 0) { 1269 if (dm_pstable->rssi_val_min != 0) {
1255 if (dm_pstable.pre_rfstate == RF_NORMAL) { 1270 if (dm_pstable->pre_rfstate == RF_NORMAL) {
1256 if (dm_pstable.rssi_val_min >= 30) 1271 if (dm_pstable->rssi_val_min >= 30)
1257 dm_pstable.cur_rfstate = RF_SAVE; 1272 dm_pstable->cur_rfstate = RF_SAVE;
1258 else 1273 else
1259 dm_pstable.cur_rfstate = RF_NORMAL; 1274 dm_pstable->cur_rfstate = RF_NORMAL;
1260 } else { 1275 } else {
1261 if (dm_pstable.rssi_val_min <= 25) 1276 if (dm_pstable->rssi_val_min <= 25)
1262 dm_pstable.cur_rfstate = RF_NORMAL; 1277 dm_pstable->cur_rfstate = RF_NORMAL;
1263 else 1278 else
1264 dm_pstable.cur_rfstate = RF_SAVE; 1279 dm_pstable->cur_rfstate = RF_SAVE;
1265 } 1280 }
1266 } else { 1281 } else {
1267 dm_pstable.cur_rfstate = RF_MAX; 1282 dm_pstable->cur_rfstate = RF_MAX;
1268 } 1283 }
1269 } else { 1284 } else {
1270 dm_pstable.cur_rfstate = RF_NORMAL; 1285 dm_pstable->cur_rfstate = RF_NORMAL;
1271 } 1286 }
1272 1287
1273 if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) { 1288 if (dm_pstable->pre_rfstate != dm_pstable->cur_rfstate) {
1274 if (dm_pstable.cur_rfstate == RF_SAVE) { 1289 if (dm_pstable->cur_rfstate == RF_SAVE) {
1275 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, 1290 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1276 0x1C0000, 0x2); 1291 0x1C0000, 0x2);
1277 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0); 1292 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0);
@@ -1293,7 +1308,7 @@ void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
1293 rtl_set_bbreg(hw, 0x818, BIT(28), 0x0); 1308 rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
1294 } 1309 }
1295 1310
1296 dm_pstable.pre_rfstate = dm_pstable.cur_rfstate; 1311 dm_pstable->pre_rfstate = dm_pstable->cur_rfstate;
1297 } 1312 }
1298} 1313}
1299EXPORT_SYMBOL(rtl92c_dm_rf_saving); 1314EXPORT_SYMBOL(rtl92c_dm_rf_saving);
@@ -1301,36 +1316,37 @@ EXPORT_SYMBOL(rtl92c_dm_rf_saving);
1301static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw) 1316static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1302{ 1317{
1303 struct rtl_priv *rtlpriv = rtl_priv(hw); 1318 struct rtl_priv *rtlpriv = rtl_priv(hw);
1319 struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
1304 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1320 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1305 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1321 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1306 1322
1307 if (((mac->link_state == MAC80211_NOLINK)) && 1323 if (((mac->link_state == MAC80211_NOLINK)) &&
1308 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) { 1324 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
1309 dm_pstable.rssi_val_min = 0; 1325 dm_pstable->rssi_val_min = 0;
1310 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "Not connected to any\n"); 1326 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "Not connected to any\n");
1311 } 1327 }
1312 1328
1313 if (mac->link_state == MAC80211_LINKED) { 1329 if (mac->link_state == MAC80211_LINKED) {
1314 if (mac->opmode == NL80211_IFTYPE_ADHOC) { 1330 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
1315 dm_pstable.rssi_val_min = 1331 dm_pstable->rssi_val_min =
1316 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; 1332 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
1317 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, 1333 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1318 "AP Client PWDB = 0x%lx\n", 1334 "AP Client PWDB = 0x%lx\n",
1319 dm_pstable.rssi_val_min); 1335 dm_pstable->rssi_val_min);
1320 } else { 1336 } else {
1321 dm_pstable.rssi_val_min = 1337 dm_pstable->rssi_val_min =
1322 rtlpriv->dm.undecorated_smoothed_pwdb; 1338 rtlpriv->dm.undecorated_smoothed_pwdb;
1323 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, 1339 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1324 "STA Default Port PWDB = 0x%lx\n", 1340 "STA Default Port PWDB = 0x%lx\n",
1325 dm_pstable.rssi_val_min); 1341 dm_pstable->rssi_val_min);
1326 } 1342 }
1327 } else { 1343 } else {
1328 dm_pstable.rssi_val_min = 1344 dm_pstable->rssi_val_min =
1329 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; 1345 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
1330 1346
1331 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, 1347 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1332 "AP Ext Port PWDB = 0x%lx\n", 1348 "AP Ext Port PWDB = 0x%lx\n",
1333 dm_pstable.rssi_val_min); 1349 dm_pstable->rssi_val_min);
1334 } 1350 }
1335 1351
1336 if (IS_92C_SERIAL(rtlhal->version)) 1352 if (IS_92C_SERIAL(rtlhal->version))
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
index 2178e3761883..518e208c0180 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
@@ -91,40 +91,6 @@
91#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74 91#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
92#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67 92#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
93 93
94struct ps_t {
95 u8 pre_ccastate;
96 u8 cur_ccasate;
97 u8 pre_rfstate;
98 u8 cur_rfstate;
99 long rssi_val_min;
100};
101
102struct dig_t {
103 u8 dig_enable_flag;
104 u8 dig_ext_port_stage;
105 u32 rssi_lowthresh;
106 u32 rssi_highthresh;
107 u32 fa_lowthresh;
108 u32 fa_highthresh;
109 u8 cursta_connectctate;
110 u8 presta_connectstate;
111 u8 curmultista_connectstate;
112 u8 pre_igvalue;
113 u8 cur_igvalue;
114 char backoff_val;
115 char backoff_val_range_max;
116 char backoff_val_range_min;
117 u8 rx_gain_range_max;
118 u8 rx_gain_range_min;
119 u8 rssi_val_min;
120 u8 pre_cck_pd_state;
121 u8 cur_cck_pd_state;
122 u8 pre_cck_fa_state;
123 u8 cur_cck_fa_state;
124 u8 pre_ccastate;
125 u8 cur_ccasate;
126};
127
128struct swat_t { 94struct swat_t {
129 u8 failure_cnt; 95 u8 failure_cnt;
130 u8 try_flag; 96 u8 try_flag;
@@ -189,7 +155,6 @@ enum dm_dig_connect_e {
189 DIG_CONNECT_MAX 155 DIG_CONNECT_MAX
190}; 156};
191 157
192extern struct dig_t dm_digtable;
193void rtl92c_dm_init(struct ieee80211_hw *hw); 158void rtl92c_dm_init(struct ieee80211_hw *hw);
194void rtl92c_dm_watchdog(struct ieee80211_hw *hw); 159void rtl92c_dm_watchdog(struct ieee80211_hw *hw);
195void rtl92c_dm_write_dig(struct ieee80211_hw *hw); 160void rtl92c_dm_write_dig(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index c20b3c30f62e..692c8ef5ee89 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -34,6 +34,7 @@
34#include "../rtl8192ce/def.h" 34#include "../rtl8192ce/def.h"
35#include "fw_common.h" 35#include "fw_common.h"
36#include <linux/export.h> 36#include <linux/export.h>
37#include <linux/kmemleak.h>
37 38
38static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable) 39static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
39{ 40{
@@ -776,6 +777,8 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
776 skb = dev_alloc_skb(totalpacketlen); 777 skb = dev_alloc_skb(totalpacketlen);
777 if (!skb) 778 if (!skb)
778 return; 779 return;
780 kmemleak_not_leak(skb);
781
779 memcpy((u8 *) skb_put(skb, totalpacketlen), 782 memcpy((u8 *) skb_put(skb, totalpacketlen),
780 &reserved_page_packet, totalpacketlen); 783 &reserved_page_packet, totalpacketlen);
781 784
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index 4c016241f340..cdcad7d9f15e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -1881,6 +1881,7 @@ void rtl92c_phy_set_io(struct ieee80211_hw *hw)
1881{ 1881{
1882 struct rtl_priv *rtlpriv = rtl_priv(hw); 1882 struct rtl_priv *rtlpriv = rtl_priv(hw);
1883 struct rtl_phy *rtlphy = &(rtlpriv->phy); 1883 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1884 struct dig_t dm_digtable = rtlpriv->dm_digtable;
1884 1885
1885 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, 1886 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1886 "--->Cmd(%#x), set_io_inprogress(%d)\n", 1887 "--->Cmd(%#x), set_io_inprogress(%d)\n",
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
index 26747fa86005..d4a3d032c7bf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
@@ -86,40 +86,6 @@
86#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74 86#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
87#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67 87#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
88 88
89struct ps_t {
90 u8 pre_ccastate;
91 u8 cur_ccasate;
92 u8 pre_rfstate;
93 u8 cur_rfstate;
94 long rssi_val_min;
95};
96
97struct dig_t {
98 u8 dig_enable_flag;
99 u8 dig_ext_port_stage;
100 u32 rssi_lowthresh;
101 u32 rssi_highthresh;
102 u32 fa_lowthresh;
103 u32 fa_highthresh;
104 u8 cursta_connectctate;
105 u8 presta_connectstate;
106 u8 curmultista_connectstate;
107 u8 pre_igvalue;
108 u8 cur_igvalue;
109 char backoff_val;
110 char backoff_val_range_max;
111 char backoff_val_range_min;
112 u8 rx_gain_range_max;
113 u8 rx_gain_range_min;
114 u8 rssi_val_min;
115 u8 pre_cck_pd_state;
116 u8 cur_cck_pd_state;
117 u8 pre_cck_fa_state;
118 u8 cur_cck_fa_state;
119 u8 pre_ccastate;
120 u8 cur_ccasate;
121};
122
123struct swat_t { 89struct swat_t {
124 u8 failure_cnt; 90 u8 failure_cnt;
125 u8 try_flag; 91 u8 try_flag;
@@ -184,7 +150,6 @@ enum dm_dig_connect_e {
184 DIG_CONNECT_MAX 150 DIG_CONNECT_MAX
185}; 151};
186 152
187extern struct dig_t dm_digtable;
188void rtl92c_dm_init(struct ieee80211_hw *hw); 153void rtl92c_dm_init(struct ieee80211_hw *hw);
189void rtl92c_dm_watchdog(struct ieee80211_hw *hw); 154void rtl92c_dm_watchdog(struct ieee80211_hw *hw);
190void rtl92c_dm_write_dig(struct ieee80211_hw *hw); 155void rtl92c_dm_write_dig(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index 2c3b73366cd2..3aa927f8b9b9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -389,21 +389,4 @@ static struct pci_driver rtl92ce_driver = {
389 .driver.pm = &rtlwifi_pm_ops, 389 .driver.pm = &rtlwifi_pm_ops,
390}; 390};
391 391
392static int __init rtl92ce_module_init(void) 392module_pci_driver(rtl92ce_driver);
393{
394 int ret;
395
396 ret = pci_register_driver(&rtl92ce_driver);
397 if (ret)
398 RT_ASSERT(false, "No device found\n");
399
400 return ret;
401}
402
403static void __exit rtl92ce_module_exit(void)
404{
405 pci_unregister_driver(&rtl92ce_driver);
406}
407
408module_init(rtl92ce_module_init);
409module_exit(rtl92ce_module_exit);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 37b13636a778..3af874e69595 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -508,14 +508,14 @@ static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw,
508 508
509 packet_matchbssid = 509 packet_matchbssid =
510 ((IEEE80211_FTYPE_CTL != type) && 510 ((IEEE80211_FTYPE_CTL != type) &&
511 (!compare_ether_addr(mac->bssid, 511 ether_addr_equal(mac->bssid,
512 (c_fc & IEEE80211_FCTL_TODS) ? 512 (c_fc & IEEE80211_FCTL_TODS) ? hdr->addr1 :
513 hdr->addr1 : (c_fc & IEEE80211_FCTL_FROMDS) ? 513 (c_fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
514 hdr->addr2 : hdr->addr3)) && 514 hdr->addr3) &&
515 (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv)); 515 (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
516 516
517 packet_toself = packet_matchbssid && 517 packet_toself = packet_matchbssid &&
518 (!compare_ether_addr(praddr, rtlefuse->dev_addr)); 518 ether_addr_equal(praddr, rtlefuse->dev_addr);
519 519
520 if (ieee80211_is_beacon(fc)) 520 if (ieee80211_is_beacon(fc))
521 packet_beacon = true; 521 packet_beacon = true;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
index efb9ab270403..c4adb9777365 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -530,12 +530,7 @@
530 SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val) 530 SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val)
531 531
532#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ 532#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
533do { \ 533 memset(__pdesc, 0, min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET))
534 if (_size > TX_DESC_NEXT_DESC_OFFSET) \
535 memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
536 else \
537 memset(__pdesc, 0, _size); \
538} while (0);
539 534
540struct rx_fwinfo_92c { 535struct rx_fwinfo_92c {
541 u8 gain_trsw[4]; 536 u8 gain_trsw[4];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 025bdc2eba44..7e91c76582ec 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -1099,14 +1099,14 @@ void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
1099 praddr = hdr->addr1; 1099 praddr = hdr->addr1;
1100 packet_matchbssid = 1100 packet_matchbssid =
1101 ((IEEE80211_FTYPE_CTL != type) && 1101 ((IEEE80211_FTYPE_CTL != type) &&
1102 (!compare_ether_addr(mac->bssid, 1102 ether_addr_equal(mac->bssid,
1103 (cpu_fc & IEEE80211_FCTL_TODS) ? 1103 (cpu_fc & IEEE80211_FCTL_TODS) ? hdr->addr1 :
1104 hdr->addr1 : (cpu_fc & IEEE80211_FCTL_FROMDS) ? 1104 (cpu_fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
1105 hdr->addr2 : hdr->addr3)) && 1105 hdr->addr3) &&
1106 (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv)); 1106 (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
1107 1107
1108 packet_toself = packet_matchbssid && 1108 packet_toself = packet_matchbssid &&
1109 (!compare_ether_addr(praddr, rtlefuse->dev_addr)); 1109 ether_addr_equal(praddr, rtlefuse->dev_addr);
1110 if (ieee80211_is_beacon(fc)) 1110 if (ieee80211_is_beacon(fc))
1111 packet_beacon = true; 1111 packet_beacon = true;
1112 _rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo, 1112 _rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 82c85286ab2e..7737fb0c6661 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -338,6 +338,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
338 {RTL_USB_DEVICE(0x2019, 0x1201, rtl92cu_hal_cfg)}, /*Planex-Vencer*/ 338 {RTL_USB_DEVICE(0x2019, 0x1201, rtl92cu_hal_cfg)}, /*Planex-Vencer*/
339 339
340 /****** 8192CU ********/ 340 /****** 8192CU ********/
341 {RTL_USB_DEVICE(0x050d, 0x1004, rtl92cu_hal_cfg)}, /*Belcom-SurfN300*/
341 {RTL_USB_DEVICE(0x050d, 0x2102, rtl92cu_hal_cfg)}, /*Belcom-Sercomm*/ 342 {RTL_USB_DEVICE(0x050d, 0x2102, rtl92cu_hal_cfg)}, /*Belcom-Sercomm*/
342 {RTL_USB_DEVICE(0x050d, 0x2103, rtl92cu_hal_cfg)}, /*Belcom-Edimax*/ 343 {RTL_USB_DEVICE(0x050d, 0x2103, rtl92cu_hal_cfg)}, /*Belcom-Edimax*/
343 {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/ 344 {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/rtlwifi/rtl8192de/def.h
index eafdf76ed64d..939c905f547f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/def.h
@@ -151,9 +151,6 @@ enum version_8192d {
151 151
152/* for 92D */ 152/* for 92D */
153#define CHIP_92D_SINGLEPHY BIT(9) 153#define CHIP_92D_SINGLEPHY BIT(9)
154#define C_CUT_VERSION BIT(13)
155#define D_CUT_VERSION ((BIT(12)|BIT(13)))
156#define E_CUT_VERSION BIT(14)
157 154
158/* Chip specific */ 155/* Chip specific */
159#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3) 156#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3)
@@ -173,7 +170,10 @@ enum version_8192d {
173#define RF_TYPE_1T2R BIT(4) 170#define RF_TYPE_1T2R BIT(4)
174#define RF_TYPE_2T2R BIT(5) 171#define RF_TYPE_2T2R BIT(5)
175#define CHIP_VENDOR_UMC BIT(7) 172#define CHIP_VENDOR_UMC BIT(7)
176#define B_CUT_VERSION BIT(12) 173#define CHIP_92D_B_CUT BIT(12)
174#define CHIP_92D_C_CUT BIT(13)
175#define CHIP_92D_D_CUT (BIT(13)|BIT(12))
176#define CHIP_92D_E_CUT BIT(14)
177 177
178/* MASK */ 178/* MASK */
179#define IC_TYPE_MASK (BIT(0)|BIT(1)|BIT(2)) 179#define IC_TYPE_MASK (BIT(0)|BIT(1)|BIT(2))
@@ -205,15 +205,13 @@ enum version_8192d {
205 CHIP_92D) ? true : false) 205 CHIP_92D) ? true : false)
206#define IS_92D_C_CUT(version) ((IS_92D(version)) ? \ 206#define IS_92D_C_CUT(version) ((IS_92D(version)) ? \
207 ((GET_CVID_CUT_VERSION(version) == \ 207 ((GET_CVID_CUT_VERSION(version) == \
208 0x2000) ? true : false) : false) 208 CHIP_92D_C_CUT) ? true : false) : false)
209#define IS_92D_D_CUT(version) ((IS_92D(version)) ? \ 209#define IS_92D_D_CUT(version) ((IS_92D(version)) ? \
210 ((GET_CVID_CUT_VERSION(version) == \ 210 ((GET_CVID_CUT_VERSION(version) == \
211 0x3000) ? true : false) : false) 211 CHIP_92D_D_CUT) ? true : false) : false)
212#define IS_92D_E_CUT(version) ((IS_92D(version)) ? \ 212#define IS_92D_E_CUT(version) ((IS_92D(version)) ? \
213 ((GET_CVID_CUT_VERSION(version) == \ 213 ((GET_CVID_CUT_VERSION(version) == \
214 0x4000) ? true : false) : false) 214 CHIP_92D_E_CUT) ? true : false) : false)
215#define CHIP_92D_C_CUT BIT(10)
216#define CHIP_92D_D_CUT BIT(11)
217 215
218enum rf_optype { 216enum rf_optype {
219 RF_OP_BY_SW_3WIRE = 0, 217 RF_OP_BY_SW_3WIRE = 0,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index 4737018c9daa..a7d63a84551a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -37,8 +37,6 @@
37 37
38#define UNDEC_SM_PWDB entry_min_undecoratedsmoothed_pwdb 38#define UNDEC_SM_PWDB entry_min_undecoratedsmoothed_pwdb
39 39
40struct dig_t de_digtable;
41
42static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = { 40static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = {
43 0x7f8001fe, /* 0, +6.0dB */ 41 0x7f8001fe, /* 0, +6.0dB */
44 0x788001e2, /* 1, +5.5dB */ 42 0x788001e2, /* 1, +5.5dB */
@@ -159,27 +157,30 @@ static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
159 157
160static void rtl92d_dm_diginit(struct ieee80211_hw *hw) 158static void rtl92d_dm_diginit(struct ieee80211_hw *hw)
161{ 159{
162 de_digtable.dig_enable_flag = true; 160 struct rtl_priv *rtlpriv = rtl_priv(hw);
163 de_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 161 struct dig_t *de_digtable = &rtlpriv->dm_digtable;
164 de_digtable.cur_igvalue = 0x20; 162
165 de_digtable.pre_igvalue = 0x0; 163 de_digtable->dig_enable_flag = true;
166 de_digtable.cursta_connectctate = DIG_STA_DISCONNECT; 164 de_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
167 de_digtable.presta_connectstate = DIG_STA_DISCONNECT; 165 de_digtable->cur_igvalue = 0x20;
168 de_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT; 166 de_digtable->pre_igvalue = 0x0;
169 de_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW; 167 de_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
170 de_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH; 168 de_digtable->presta_connectstate = DIG_STA_DISCONNECT;
171 de_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW; 169 de_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
172 de_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH; 170 de_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
173 de_digtable.rx_gain_range_max = DM_DIG_FA_UPPER; 171 de_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
174 de_digtable.rx_gain_range_min = DM_DIG_FA_LOWER; 172 de_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
175 de_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT; 173 de_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
176 de_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX; 174 de_digtable->rx_gain_range_max = DM_DIG_FA_UPPER;
177 de_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN; 175 de_digtable->rx_gain_range_min = DM_DIG_FA_LOWER;
178 de_digtable.pre_cck_pd_state = CCK_PD_STAGE_LOWRSSI; 176 de_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT;
179 de_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX; 177 de_digtable->backoff_val_range_max = DM_DIG_BACKOFF_MAX;
180 de_digtable.large_fa_hit = 0; 178 de_digtable->backoff_val_range_min = DM_DIG_BACKOFF_MIN;
181 de_digtable.recover_cnt = 0; 179 de_digtable->pre_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
182 de_digtable.forbidden_igi = DM_DIG_FA_LOWER; 180 de_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
181 de_digtable->large_fa_hit = 0;
182 de_digtable->recover_cnt = 0;
183 de_digtable->forbidden_igi = DM_DIG_FA_LOWER;
183} 184}
184 185
185static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) 186static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
@@ -266,68 +267,70 @@ static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
266static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw) 267static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
267{ 268{
268 struct rtl_priv *rtlpriv = rtl_priv(hw); 269 struct rtl_priv *rtlpriv = rtl_priv(hw);
270 struct dig_t *de_digtable = &rtlpriv->dm_digtable;
269 struct rtl_mac *mac = rtl_mac(rtlpriv); 271 struct rtl_mac *mac = rtl_mac(rtlpriv);
270 272
271 /* Determine the minimum RSSI */ 273 /* Determine the minimum RSSI */
272 if ((mac->link_state < MAC80211_LINKED) && 274 if ((mac->link_state < MAC80211_LINKED) &&
273 (rtlpriv->dm.UNDEC_SM_PWDB == 0)) { 275 (rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
274 de_digtable.min_undecorated_pwdb_for_dm = 0; 276 de_digtable->min_undecorated_pwdb_for_dm = 0;
275 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, 277 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
276 "Not connected to any\n"); 278 "Not connected to any\n");
277 } 279 }
278 if (mac->link_state >= MAC80211_LINKED) { 280 if (mac->link_state >= MAC80211_LINKED) {
279 if (mac->opmode == NL80211_IFTYPE_AP || 281 if (mac->opmode == NL80211_IFTYPE_AP ||
280 mac->opmode == NL80211_IFTYPE_ADHOC) { 282 mac->opmode == NL80211_IFTYPE_ADHOC) {
281 de_digtable.min_undecorated_pwdb_for_dm = 283 de_digtable->min_undecorated_pwdb_for_dm =
282 rtlpriv->dm.UNDEC_SM_PWDB; 284 rtlpriv->dm.UNDEC_SM_PWDB;
283 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, 285 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
284 "AP Client PWDB = 0x%lx\n", 286 "AP Client PWDB = 0x%lx\n",
285 rtlpriv->dm.UNDEC_SM_PWDB); 287 rtlpriv->dm.UNDEC_SM_PWDB);
286 } else { 288 } else {
287 de_digtable.min_undecorated_pwdb_for_dm = 289 de_digtable->min_undecorated_pwdb_for_dm =
288 rtlpriv->dm.undecorated_smoothed_pwdb; 290 rtlpriv->dm.undecorated_smoothed_pwdb;
289 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, 291 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
290 "STA Default Port PWDB = 0x%x\n", 292 "STA Default Port PWDB = 0x%x\n",
291 de_digtable.min_undecorated_pwdb_for_dm); 293 de_digtable->min_undecorated_pwdb_for_dm);
292 } 294 }
293 } else { 295 } else {
294 de_digtable.min_undecorated_pwdb_for_dm = 296 de_digtable->min_undecorated_pwdb_for_dm =
295 rtlpriv->dm.UNDEC_SM_PWDB; 297 rtlpriv->dm.UNDEC_SM_PWDB;
296 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, 298 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
297 "AP Ext Port or disconnect PWDB = 0x%x\n", 299 "AP Ext Port or disconnect PWDB = 0x%x\n",
298 de_digtable.min_undecorated_pwdb_for_dm); 300 de_digtable->min_undecorated_pwdb_for_dm);
299 } 301 }
300 302
301 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n", 303 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
302 de_digtable.min_undecorated_pwdb_for_dm); 304 de_digtable->min_undecorated_pwdb_for_dm);
303} 305}
304 306
305static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) 307static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
306{ 308{
307 struct rtl_priv *rtlpriv = rtl_priv(hw); 309 struct rtl_priv *rtlpriv = rtl_priv(hw);
310 struct dig_t *de_digtable = &rtlpriv->dm_digtable;
308 unsigned long flag = 0; 311 unsigned long flag = 0;
309 312
310 if (de_digtable.cursta_connectctate == DIG_STA_CONNECT) { 313 if (de_digtable->cursta_connectctate == DIG_STA_CONNECT) {
311 if (de_digtable.pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) { 314 if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
312 if (de_digtable.min_undecorated_pwdb_for_dm <= 25) 315 if (de_digtable->min_undecorated_pwdb_for_dm <= 25)
313 de_digtable.cur_cck_pd_state = 316 de_digtable->cur_cck_pd_state =
314 CCK_PD_STAGE_LOWRSSI; 317 CCK_PD_STAGE_LOWRSSI;
315 else 318 else
316 de_digtable.cur_cck_pd_state = 319 de_digtable->cur_cck_pd_state =
317 CCK_PD_STAGE_HIGHRSSI; 320 CCK_PD_STAGE_HIGHRSSI;
318 } else { 321 } else {
319 if (de_digtable.min_undecorated_pwdb_for_dm <= 20) 322 if (de_digtable->min_undecorated_pwdb_for_dm <= 20)
320 de_digtable.cur_cck_pd_state = 323 de_digtable->cur_cck_pd_state =
321 CCK_PD_STAGE_LOWRSSI; 324 CCK_PD_STAGE_LOWRSSI;
322 else 325 else
323 de_digtable.cur_cck_pd_state = 326 de_digtable->cur_cck_pd_state =
324 CCK_PD_STAGE_HIGHRSSI; 327 CCK_PD_STAGE_HIGHRSSI;
325 } 328 }
326 } else { 329 } else {
327 de_digtable.cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI; 330 de_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
328 } 331 }
329 if (de_digtable.pre_cck_pd_state != de_digtable.cur_cck_pd_state) { 332 if (de_digtable->pre_cck_pd_state != de_digtable->cur_cck_pd_state) {
330 if (de_digtable.cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) { 333 if (de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
331 rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); 334 rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
332 rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0x83); 335 rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0x83);
333 rtl92d_release_cckandrw_pagea_ctl(hw, &flag); 336 rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
@@ -336,13 +339,13 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
336 rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0xcd); 339 rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0xcd);
337 rtl92d_release_cckandrw_pagea_ctl(hw, &flag); 340 rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
338 } 341 }
339 de_digtable.pre_cck_pd_state = de_digtable.cur_cck_pd_state; 342 de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
340 } 343 }
341 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n", 344 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
342 de_digtable.cursta_connectctate == DIG_STA_CONNECT ? 345 de_digtable->cursta_connectctate == DIG_STA_CONNECT ?
343 "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT"); 346 "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
344 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n", 347 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
345 de_digtable.cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ? 348 de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
346 "Low RSSI " : "High RSSI "); 349 "Low RSSI " : "High RSSI ");
347 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n", 350 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n",
348 IS_92D_SINGLEPHY(rtlpriv->rtlhal.version)); 351 IS_92D_SINGLEPHY(rtlpriv->rtlhal.version));
@@ -352,37 +355,40 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
352void rtl92d_dm_write_dig(struct ieee80211_hw *hw) 355void rtl92d_dm_write_dig(struct ieee80211_hw *hw)
353{ 356{
354 struct rtl_priv *rtlpriv = rtl_priv(hw); 357 struct rtl_priv *rtlpriv = rtl_priv(hw);
358 struct dig_t *de_digtable = &rtlpriv->dm_digtable;
355 359
356 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 360 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
357 "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n", 361 "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n",
358 de_digtable.cur_igvalue, de_digtable.pre_igvalue, 362 de_digtable->cur_igvalue, de_digtable->pre_igvalue,
359 de_digtable.backoff_val); 363 de_digtable->backoff_val);
360 if (de_digtable.dig_enable_flag == false) { 364 if (de_digtable->dig_enable_flag == false) {
361 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n"); 365 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n");
362 de_digtable.pre_igvalue = 0x17; 366 de_digtable->pre_igvalue = 0x17;
363 return; 367 return;
364 } 368 }
365 if (de_digtable.pre_igvalue != de_digtable.cur_igvalue) { 369 if (de_digtable->pre_igvalue != de_digtable->cur_igvalue) {
366 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, 370 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
367 de_digtable.cur_igvalue); 371 de_digtable->cur_igvalue);
368 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, 372 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
369 de_digtable.cur_igvalue); 373 de_digtable->cur_igvalue);
370 de_digtable.pre_igvalue = de_digtable.cur_igvalue; 374 de_digtable->pre_igvalue = de_digtable->cur_igvalue;
371 } 375 }
372} 376}
373 377
374static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv) 378static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv)
375{ 379{
380 struct dig_t *de_digtable = &rtlpriv->dm_digtable;
381
376 if ((rtlpriv->mac80211.link_state >= MAC80211_LINKED) && 382 if ((rtlpriv->mac80211.link_state >= MAC80211_LINKED) &&
377 (rtlpriv->mac80211.vendor == PEER_CISCO)) { 383 (rtlpriv->mac80211.vendor == PEER_CISCO)) {
378 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n"); 384 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n");
379 if (de_digtable.last_min_undecorated_pwdb_for_dm >= 50 385 if (de_digtable->last_min_undecorated_pwdb_for_dm >= 50
380 && de_digtable.min_undecorated_pwdb_for_dm < 50) { 386 && de_digtable->min_undecorated_pwdb_for_dm < 50) {
381 rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00); 387 rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00);
382 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 388 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
383 "Early Mode Off\n"); 389 "Early Mode Off\n");
384 } else if (de_digtable.last_min_undecorated_pwdb_for_dm <= 55 && 390 } else if (de_digtable->last_min_undecorated_pwdb_for_dm <= 55 &&
385 de_digtable.min_undecorated_pwdb_for_dm > 55) { 391 de_digtable->min_undecorated_pwdb_for_dm > 55) {
386 rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f); 392 rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
387 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 393 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
388 "Early Mode On\n"); 394 "Early Mode On\n");
@@ -396,14 +402,15 @@ static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv)
396static void rtl92d_dm_dig(struct ieee80211_hw *hw) 402static void rtl92d_dm_dig(struct ieee80211_hw *hw)
397{ 403{
398 struct rtl_priv *rtlpriv = rtl_priv(hw); 404 struct rtl_priv *rtlpriv = rtl_priv(hw);
399 u8 value_igi = de_digtable.cur_igvalue; 405 struct dig_t *de_digtable = &rtlpriv->dm_digtable;
406 u8 value_igi = de_digtable->cur_igvalue;
400 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); 407 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
401 408
402 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n"); 409 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n");
403 if (rtlpriv->rtlhal.earlymode_enable) { 410 if (rtlpriv->rtlhal.earlymode_enable) {
404 rtl92d_early_mode_enabled(rtlpriv); 411 rtl92d_early_mode_enabled(rtlpriv);
405 de_digtable.last_min_undecorated_pwdb_for_dm = 412 de_digtable->last_min_undecorated_pwdb_for_dm =
406 de_digtable.min_undecorated_pwdb_for_dm; 413 de_digtable->min_undecorated_pwdb_for_dm;
407 } 414 }
408 if (!rtlpriv->dm.dm_initialgain_enable) 415 if (!rtlpriv->dm.dm_initialgain_enable)
409 return; 416 return;
@@ -421,9 +428,9 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
421 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n"); 428 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
422 /* Decide the current status and if modify initial gain or not */ 429 /* Decide the current status and if modify initial gain or not */
423 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) 430 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
424 de_digtable.cursta_connectctate = DIG_STA_CONNECT; 431 de_digtable->cursta_connectctate = DIG_STA_CONNECT;
425 else 432 else
426 de_digtable.cursta_connectctate = DIG_STA_DISCONNECT; 433 de_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
427 434
428 /* adjust initial gain according to false alarm counter */ 435 /* adjust initial gain according to false alarm counter */
429 if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0) 436 if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0)
@@ -436,64 +443,64 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
436 value_igi += 2; 443 value_igi += 2;
437 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 444 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
438 "dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n", 445 "dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n",
439 de_digtable.large_fa_hit, de_digtable.forbidden_igi); 446 de_digtable->large_fa_hit, de_digtable->forbidden_igi);
440 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 447 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
441 "dm_DIG() Before: Recover_cnt=%d, rx_gain_range_min=%x\n", 448 "dm_DIG() Before: Recover_cnt=%d, rx_gain_range_min=%x\n",
442 de_digtable.recover_cnt, de_digtable.rx_gain_range_min); 449 de_digtable->recover_cnt, de_digtable->rx_gain_range_min);
443 450
444 /* deal with abnorally large false alarm */ 451 /* deal with abnorally large false alarm */
445 if (falsealm_cnt->cnt_all > 10000) { 452 if (falsealm_cnt->cnt_all > 10000) {
446 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 453 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
447 "dm_DIG(): Abnormally false alarm case\n"); 454 "dm_DIG(): Abnormally false alarm case\n");
448 455
449 de_digtable.large_fa_hit++; 456 de_digtable->large_fa_hit++;
450 if (de_digtable.forbidden_igi < de_digtable.cur_igvalue) { 457 if (de_digtable->forbidden_igi < de_digtable->cur_igvalue) {
451 de_digtable.forbidden_igi = de_digtable.cur_igvalue; 458 de_digtable->forbidden_igi = de_digtable->cur_igvalue;
452 de_digtable.large_fa_hit = 1; 459 de_digtable->large_fa_hit = 1;
453 } 460 }
454 if (de_digtable.large_fa_hit >= 3) { 461 if (de_digtable->large_fa_hit >= 3) {
455 if ((de_digtable.forbidden_igi + 1) > DM_DIG_MAX) 462 if ((de_digtable->forbidden_igi + 1) > DM_DIG_MAX)
456 de_digtable.rx_gain_range_min = DM_DIG_MAX; 463 de_digtable->rx_gain_range_min = DM_DIG_MAX;
457 else 464 else
458 de_digtable.rx_gain_range_min = 465 de_digtable->rx_gain_range_min =
459 (de_digtable.forbidden_igi + 1); 466 (de_digtable->forbidden_igi + 1);
460 de_digtable.recover_cnt = 3600; /* 3600=2hr */ 467 de_digtable->recover_cnt = 3600; /* 3600=2hr */
461 } 468 }
462 } else { 469 } else {
463 /* Recovery mechanism for IGI lower bound */ 470 /* Recovery mechanism for IGI lower bound */
464 if (de_digtable.recover_cnt != 0) { 471 if (de_digtable->recover_cnt != 0) {
465 de_digtable.recover_cnt--; 472 de_digtable->recover_cnt--;
466 } else { 473 } else {
467 if (de_digtable.large_fa_hit == 0) { 474 if (de_digtable->large_fa_hit == 0) {
468 if ((de_digtable.forbidden_igi - 1) < 475 if ((de_digtable->forbidden_igi - 1) <
469 DM_DIG_FA_LOWER) { 476 DM_DIG_FA_LOWER) {
470 de_digtable.forbidden_igi = 477 de_digtable->forbidden_igi =
471 DM_DIG_FA_LOWER; 478 DM_DIG_FA_LOWER;
472 de_digtable.rx_gain_range_min = 479 de_digtable->rx_gain_range_min =
473 DM_DIG_FA_LOWER; 480 DM_DIG_FA_LOWER;
474 481
475 } else { 482 } else {
476 de_digtable.forbidden_igi--; 483 de_digtable->forbidden_igi--;
477 de_digtable.rx_gain_range_min = 484 de_digtable->rx_gain_range_min =
478 (de_digtable.forbidden_igi + 1); 485 (de_digtable->forbidden_igi + 1);
479 } 486 }
480 } else if (de_digtable.large_fa_hit == 3) { 487 } else if (de_digtable->large_fa_hit == 3) {
481 de_digtable.large_fa_hit = 0; 488 de_digtable->large_fa_hit = 0;
482 } 489 }
483 } 490 }
484 } 491 }
485 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 492 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
486 "dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n", 493 "dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n",
487 de_digtable.large_fa_hit, de_digtable.forbidden_igi); 494 de_digtable->large_fa_hit, de_digtable->forbidden_igi);
488 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 495 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
489 "dm_DIG() After: recover_cnt=%d, rx_gain_range_min=%x\n", 496 "dm_DIG() After: recover_cnt=%d, rx_gain_range_min=%x\n",
490 de_digtable.recover_cnt, de_digtable.rx_gain_range_min); 497 de_digtable->recover_cnt, de_digtable->rx_gain_range_min);
491 498
492 if (value_igi > DM_DIG_MAX) 499 if (value_igi > DM_DIG_MAX)
493 value_igi = DM_DIG_MAX; 500 value_igi = DM_DIG_MAX;
494 else if (value_igi < de_digtable.rx_gain_range_min) 501 else if (value_igi < de_digtable->rx_gain_range_min)
495 value_igi = de_digtable.rx_gain_range_min; 502 value_igi = de_digtable->rx_gain_range_min;
496 de_digtable.cur_igvalue = value_igi; 503 de_digtable->cur_igvalue = value_igi;
497 rtl92d_dm_write_dig(hw); 504 rtl92d_dm_write_dig(hw);
498 if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) 505 if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G)
499 rtl92d_dm_cck_packet_detection_thresh(hw); 506 rtl92d_dm_cck_packet_detection_thresh(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.h b/drivers/net/wireless/rtlwifi/rtl8192de/dm.h
index 91030ec8ac3e..3fea0c11c24a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.h
@@ -87,55 +87,6 @@
87#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67 87#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
88#define INDEX_MAPPING_NUM 13 88#define INDEX_MAPPING_NUM 13
89 89
90struct ps_t {
91 u8 pre_ccastate;
92 u8 cur_ccasate;
93
94 u8 pre_rfstate;
95 u8 cur_rfstate;
96
97 long rssi_val_min;
98};
99
100struct dig_t {
101 u8 dig_enable_flag;
102 u8 dig_ext_port_stage;
103
104 u32 rssi_lowthresh;
105 u32 rssi_highthresh;
106
107 u32 fa_lowthresh;
108 u32 fa_highthresh;
109
110 u8 cursta_connectctate;
111 u8 presta_connectstate;
112 u8 curmultista_connectstate;
113
114 u8 pre_igvalue;
115 u8 cur_igvalue;
116
117 char backoff_val;
118 char backoff_val_range_max;
119 char backoff_val_range_min;
120 u8 rx_gain_range_max;
121 u8 rx_gain_range_min;
122 u8 min_undecorated_pwdb_for_dm;
123 long last_min_undecorated_pwdb_for_dm;
124
125 u8 pre_cck_pd_state;
126 u8 cur_cck_pd_state;
127
128 u8 pre_cck_fa_state;
129 u8 cur_cck_fa_state;
130
131 u8 pre_ccastate;
132 u8 cur_ccasate;
133
134 u8 large_fa_hit;
135 u8 forbidden_igi;
136 u32 recover_cnt;
137};
138
139struct swat { 90struct swat {
140 u8 failure_cnt; 91 u8 failure_cnt;
141 u8 try_flag; 92 u8 try_flag;
@@ -200,8 +151,6 @@ enum dm_dig_connect {
200 DIG_CONNECT_MAX 151 DIG_CONNECT_MAX
201}; 152};
202 153
203extern struct dig_t de_digtable;
204
205void rtl92d_dm_init(struct ieee80211_hw *hw); 154void rtl92d_dm_init(struct ieee80211_hw *hw);
206void rtl92d_dm_watchdog(struct ieee80211_hw *hw); 155void rtl92d_dm_watchdog(struct ieee80211_hw *hw);
207void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw); 156void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index 509f5af38adf..b338d526c422 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -1743,9 +1743,13 @@ static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw)
1743 chipver |= CHIP_92D_D_CUT; 1743 chipver |= CHIP_92D_D_CUT;
1744 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "D-CUT!!!\n"); 1744 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "D-CUT!!!\n");
1745 break; 1745 break;
1746 case 0xCC33:
1747 chipver |= CHIP_92D_E_CUT;
1748 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "E-CUT!!!\n");
1749 break;
1746 default: 1750 default:
1747 chipver |= CHIP_92D_D_CUT; 1751 chipver |= CHIP_92D_D_CUT;
1748 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unkown CUT!\n"); 1752 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unknown CUT!\n");
1749 break; 1753 break;
1750 } 1754 }
1751 rtlpriv->rtlhal.version = chipver; 1755 rtlpriv->rtlhal.version = chipver;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 28fc5fb8057b..18380a7829f1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3064,6 +3064,7 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
3064static void rtl92d_phy_set_io(struct ieee80211_hw *hw) 3064static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
3065{ 3065{
3066 struct rtl_priv *rtlpriv = rtl_priv(hw); 3066 struct rtl_priv *rtlpriv = rtl_priv(hw);
3067 struct dig_t *de_digtable = &rtlpriv->dm_digtable;
3067 struct rtl_phy *rtlphy = &(rtlpriv->phy); 3068 struct rtl_phy *rtlphy = &(rtlpriv->phy);
3068 3069
3069 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, 3070 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
@@ -3071,13 +3072,13 @@ static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
3071 rtlphy->current_io_type, rtlphy->set_io_inprogress); 3072 rtlphy->current_io_type, rtlphy->set_io_inprogress);
3072 switch (rtlphy->current_io_type) { 3073 switch (rtlphy->current_io_type) {
3073 case IO_CMD_RESUME_DM_BY_SCAN: 3074 case IO_CMD_RESUME_DM_BY_SCAN:
3074 de_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1; 3075 de_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1;
3075 rtl92d_dm_write_dig(hw); 3076 rtl92d_dm_write_dig(hw);
3076 rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel); 3077 rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel);
3077 break; 3078 break;
3078 case IO_CMD_PAUSE_DM_BY_SCAN: 3079 case IO_CMD_PAUSE_DM_BY_SCAN:
3079 rtlphy->initgain_backup.xaagccore1 = de_digtable.cur_igvalue; 3080 rtlphy->initgain_backup.xaagccore1 = de_digtable->cur_igvalue;
3080 de_digtable.cur_igvalue = 0x37; 3081 de_digtable->cur_igvalue = 0x37;
3081 rtl92d_dm_write_dig(hw); 3082 rtl92d_dm_write_dig(hw);
3082 break; 3083 break;
3083 default: 3084 default:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index a7f6126e2f86..1666ef7fd87b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -466,12 +466,13 @@ static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw,
466 type = WLAN_FC_GET_TYPE(fc); 466 type = WLAN_FC_GET_TYPE(fc);
467 praddr = hdr->addr1; 467 praddr = hdr->addr1;
468 packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) && 468 packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
469 (!compare_ether_addr(mac->bssid, (cfc & IEEE80211_FCTL_TODS) ? 469 ether_addr_equal(mac->bssid,
470 hdr->addr1 : (cfc & IEEE80211_FCTL_FROMDS) ? 470 (cfc & IEEE80211_FCTL_TODS) ? hdr->addr1 :
471 hdr->addr2 : hdr->addr3)) && (!pstats->hwerror) && 471 (cfc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
472 (!pstats->crc) && (!pstats->icv)); 472 hdr->addr3) &&
473 (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
473 packet_toself = packet_matchbssid && 474 packet_toself = packet_matchbssid &&
474 (!compare_ether_addr(praddr, rtlefuse->dev_addr)); 475 ether_addr_equal(praddr, rtlefuse->dev_addr);
475 if (ieee80211_is_beacon(fc)) 476 if (ieee80211_is_beacon(fc))
476 packet_beacon = true; 477 packet_beacon = true;
477 _rtl92de_query_rxphystatus(hw, pstats, pdesc, p_drvinfo, 478 _rtl92de_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
index 0dc736c2723b..057a52431b00 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
@@ -530,12 +530,8 @@
530 SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val) 530 SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val)
531 531
532#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ 532#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
533do { \ 533 memset((void *)__pdesc, 0, \
534 if (_size > TX_DESC_NEXT_DESC_OFFSET) \ 534 min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET))
535 memset((void *)__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
536 else \
537 memset((void *)__pdesc, 0, _size); \
538} while (0);
539 535
540/* For 92D early mode */ 536/* For 92D early mode */
541#define SET_EARLYMODE_PKTNUM(__paddr, __value) \ 537#define SET_EARLYMODE_PKTNUM(__paddr, __value) \
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
index d1b0a1e14971..20afec62ce05 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
@@ -252,12 +252,7 @@
252 * the desc is cleared. */ 252 * the desc is cleared. */
253#define TX_DESC_NEXT_DESC_OFFSET 36 253#define TX_DESC_NEXT_DESC_OFFSET 36
254#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ 254#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
255do { \ 255 memset(__pdesc, 0, min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET))
256 if (_size > TX_DESC_NEXT_DESC_OFFSET) \
257 memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
258 else \
259 memset(__pdesc, 0, _size); \
260} while (0);
261 256
262/* Rx Desc */ 257/* Rx Desc */
263#define RX_STATUS_DESC_SIZE 24 258#define RX_STATUS_DESC_SIZE 24
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
index fbabae17259e..2e1158026fb7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
@@ -35,7 +35,6 @@
35#include "dm.h" 35#include "dm.h"
36#include "fw.h" 36#include "fw.h"
37 37
38struct dig_t digtable;
39static const u32 edca_setting_dl[PEER_MAX] = { 38static const u32 edca_setting_dl[PEER_MAX] = {
40 0xa44f, /* 0 UNKNOWN */ 39 0xa44f, /* 0 UNKNOWN */
41 0x5ea44f, /* 1 REALTEK_90 */ 40 0x5ea44f, /* 1 REALTEK_90 */
@@ -421,62 +420,64 @@ static void _rtl92s_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
421static void rtl92s_backoff_enable_flag(struct ieee80211_hw *hw) 420static void rtl92s_backoff_enable_flag(struct ieee80211_hw *hw)
422{ 421{
423 struct rtl_priv *rtlpriv = rtl_priv(hw); 422 struct rtl_priv *rtlpriv = rtl_priv(hw);
423 struct dig_t *digtable = &rtlpriv->dm_digtable;
424 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); 424 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
425 425
426 if (falsealm_cnt->cnt_all > digtable.fa_highthresh) { 426 if (falsealm_cnt->cnt_all > digtable->fa_highthresh) {
427 if ((digtable.backoff_val - 6) < 427 if ((digtable->backoff_val - 6) <
428 digtable.backoffval_range_min) 428 digtable->backoffval_range_min)
429 digtable.backoff_val = digtable.backoffval_range_min; 429 digtable->backoff_val = digtable->backoffval_range_min;
430 else 430 else
431 digtable.backoff_val -= 6; 431 digtable->backoff_val -= 6;
432 } else if (falsealm_cnt->cnt_all < digtable.fa_lowthresh) { 432 } else if (falsealm_cnt->cnt_all < digtable->fa_lowthresh) {
433 if ((digtable.backoff_val + 6) > 433 if ((digtable->backoff_val + 6) >
434 digtable.backoffval_range_max) 434 digtable->backoffval_range_max)
435 digtable.backoff_val = 435 digtable->backoff_val =
436 digtable.backoffval_range_max; 436 digtable->backoffval_range_max;
437 else 437 else
438 digtable.backoff_val += 6; 438 digtable->backoff_val += 6;
439 } 439 }
440} 440}
441 441
442static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw) 442static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw)
443{ 443{
444 struct rtl_priv *rtlpriv = rtl_priv(hw); 444 struct rtl_priv *rtlpriv = rtl_priv(hw);
445 struct dig_t *digtable = &rtlpriv->dm_digtable;
445 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); 446 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
446 static u8 initialized, force_write; 447 static u8 initialized, force_write;
447 u8 initial_gain = 0; 448 u8 initial_gain = 0;
448 449
449 if ((digtable.pre_sta_connectstate == digtable.cur_sta_connectstate) || 450 if ((digtable->pre_sta_connectstate == digtable->cur_sta_connectstate) ||
450 (digtable.cur_sta_connectstate == DIG_STA_BEFORE_CONNECT)) { 451 (digtable->cur_sta_connectstate == DIG_STA_BEFORE_CONNECT)) {
451 if (digtable.cur_sta_connectstate == DIG_STA_BEFORE_CONNECT) { 452 if (digtable->cur_sta_connectstate == DIG_STA_BEFORE_CONNECT) {
452 if (rtlpriv->psc.rfpwr_state != ERFON) 453 if (rtlpriv->psc.rfpwr_state != ERFON)
453 return; 454 return;
454 455
455 if (digtable.backoff_enable_flag) 456 if (digtable->backoff_enable_flag)
456 rtl92s_backoff_enable_flag(hw); 457 rtl92s_backoff_enable_flag(hw);
457 else 458 else
458 digtable.backoff_val = DM_DIG_BACKOFF; 459 digtable->backoff_val = DM_DIG_BACKOFF;
459 460
460 if ((digtable.rssi_val + 10 - digtable.backoff_val) > 461 if ((digtable->rssi_val + 10 - digtable->backoff_val) >
461 digtable.rx_gain_range_max) 462 digtable->rx_gain_range_max)
462 digtable.cur_igvalue = 463 digtable->cur_igvalue =
463 digtable.rx_gain_range_max; 464 digtable->rx_gain_range_max;
464 else if ((digtable.rssi_val + 10 - digtable.backoff_val) 465 else if ((digtable->rssi_val + 10 - digtable->backoff_val)
465 < digtable.rx_gain_range_min) 466 < digtable->rx_gain_range_min)
466 digtable.cur_igvalue = 467 digtable->cur_igvalue =
467 digtable.rx_gain_range_min; 468 digtable->rx_gain_range_min;
468 else 469 else
469 digtable.cur_igvalue = digtable.rssi_val + 10 - 470 digtable->cur_igvalue = digtable->rssi_val + 10 -
470 digtable.backoff_val; 471 digtable->backoff_val;
471 472
472 if (falsealm_cnt->cnt_all > 10000) 473 if (falsealm_cnt->cnt_all > 10000)
473 digtable.cur_igvalue = 474 digtable->cur_igvalue =
474 (digtable.cur_igvalue > 0x33) ? 475 (digtable->cur_igvalue > 0x33) ?
475 digtable.cur_igvalue : 0x33; 476 digtable->cur_igvalue : 0x33;
476 477
477 if (falsealm_cnt->cnt_all > 16000) 478 if (falsealm_cnt->cnt_all > 16000)
478 digtable.cur_igvalue = 479 digtable->cur_igvalue =
479 digtable.rx_gain_range_max; 480 digtable->rx_gain_range_max;
480 /* connected -> connected or disconnected -> disconnected */ 481 /* connected -> connected or disconnected -> disconnected */
481 } else { 482 } else {
482 /* Firmware control DIG, do nothing in driver dm */ 483 /* Firmware control DIG, do nothing in driver dm */
@@ -486,31 +487,31 @@ static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw)
486 * disconnected or beforeconnect->(dis)connected */ 487 * disconnected or beforeconnect->(dis)connected */
487 } else { 488 } else {
488 /* Enable FW DIG */ 489 /* Enable FW DIG */
489 digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 490 digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
490 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_ENABLE); 491 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_ENABLE);
491 492
492 digtable.backoff_val = DM_DIG_BACKOFF; 493 digtable->backoff_val = DM_DIG_BACKOFF;
493 digtable.cur_igvalue = rtlpriv->phy.default_initialgain[0]; 494 digtable->cur_igvalue = rtlpriv->phy.default_initialgain[0];
494 digtable.pre_igvalue = 0; 495 digtable->pre_igvalue = 0;
495 return; 496 return;
496 } 497 }
497 498
498 /* Forced writing to prevent from fw-dig overwriting. */ 499 /* Forced writing to prevent from fw-dig overwriting. */
499 if (digtable.pre_igvalue != rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 500 if (digtable->pre_igvalue != rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1,
500 MASKBYTE0)) 501 MASKBYTE0))
501 force_write = 1; 502 force_write = 1;
502 503
503 if ((digtable.pre_igvalue != digtable.cur_igvalue) || 504 if ((digtable->pre_igvalue != digtable->cur_igvalue) ||
504 !initialized || force_write) { 505 !initialized || force_write) {
505 /* Disable FW DIG */ 506 /* Disable FW DIG */
506 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_DISABLE); 507 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_DISABLE);
507 508
508 initial_gain = (u8)digtable.cur_igvalue; 509 initial_gain = (u8)digtable->cur_igvalue;
509 510
510 /* Set initial gain. */ 511 /* Set initial gain. */
511 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0, initial_gain); 512 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0, initial_gain);
512 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0, initial_gain); 513 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0, initial_gain);
513 digtable.pre_igvalue = digtable.cur_igvalue; 514 digtable->pre_igvalue = digtable->cur_igvalue;
514 initialized = 1; 515 initialized = 1;
515 force_write = 0; 516 force_write = 0;
516 } 517 }
@@ -519,6 +520,7 @@ static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw)
519static void _rtl92s_dm_ctrl_initgain_bytwoport(struct ieee80211_hw *hw) 520static void _rtl92s_dm_ctrl_initgain_bytwoport(struct ieee80211_hw *hw)
520{ 521{
521 struct rtl_priv *rtlpriv = rtl_priv(hw); 522 struct rtl_priv *rtlpriv = rtl_priv(hw);
523 struct dig_t *digtable = &rtlpriv->dm_digtable;
522 524
523 if (rtlpriv->mac80211.act_scanning) 525 if (rtlpriv->mac80211.act_scanning)
524 return; 526 return;
@@ -526,17 +528,17 @@ static void _rtl92s_dm_ctrl_initgain_bytwoport(struct ieee80211_hw *hw)
526 /* Decide the current status and if modify initial gain or not */ 528 /* Decide the current status and if modify initial gain or not */
527 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED || 529 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED ||
528 rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC) 530 rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC)
529 digtable.cur_sta_connectstate = DIG_STA_CONNECT; 531 digtable->cur_sta_connectstate = DIG_STA_CONNECT;
530 else 532 else
531 digtable.cur_sta_connectstate = DIG_STA_DISCONNECT; 533 digtable->cur_sta_connectstate = DIG_STA_DISCONNECT;
532 534
533 digtable.rssi_val = rtlpriv->dm.undecorated_smoothed_pwdb; 535 digtable->rssi_val = rtlpriv->dm.undecorated_smoothed_pwdb;
534 536
535 /* Change dig mode to rssi */ 537 /* Change dig mode to rssi */
536 if (digtable.cur_sta_connectstate != DIG_STA_DISCONNECT) { 538 if (digtable->cur_sta_connectstate != DIG_STA_DISCONNECT) {
537 if (digtable.dig_twoport_algorithm == 539 if (digtable->dig_twoport_algorithm ==
538 DIG_TWO_PORT_ALGO_FALSE_ALARM) { 540 DIG_TWO_PORT_ALGO_FALSE_ALARM) {
539 digtable.dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI; 541 digtable->dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI;
540 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_MODE_SS); 542 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_MODE_SS);
541 } 543 }
542 } 544 }
@@ -544,13 +546,14 @@ static void _rtl92s_dm_ctrl_initgain_bytwoport(struct ieee80211_hw *hw)
544 _rtl92s_dm_false_alarm_counter_statistics(hw); 546 _rtl92s_dm_false_alarm_counter_statistics(hw);
545 _rtl92s_dm_initial_gain_sta_beforeconnect(hw); 547 _rtl92s_dm_initial_gain_sta_beforeconnect(hw);
546 548
547 digtable.pre_sta_connectstate = digtable.cur_sta_connectstate; 549 digtable->pre_sta_connectstate = digtable->cur_sta_connectstate;
548} 550}
549 551
550static void _rtl92s_dm_ctrl_initgain_byrssi(struct ieee80211_hw *hw) 552static void _rtl92s_dm_ctrl_initgain_byrssi(struct ieee80211_hw *hw)
551{ 553{
552 struct rtl_priv *rtlpriv = rtl_priv(hw); 554 struct rtl_priv *rtlpriv = rtl_priv(hw);
553 struct rtl_phy *rtlphy = &(rtlpriv->phy); 555 struct rtl_phy *rtlphy = &(rtlpriv->phy);
556 struct dig_t *digtable = &rtlpriv->dm_digtable;
554 557
555 /* 2T2R TP issue */ 558 /* 2T2R TP issue */
556 if (rtlphy->rf_type == RF_2T2R) 559 if (rtlphy->rf_type == RF_2T2R)
@@ -559,7 +562,7 @@ static void _rtl92s_dm_ctrl_initgain_byrssi(struct ieee80211_hw *hw)
559 if (!rtlpriv->dm.dm_initialgain_enable) 562 if (!rtlpriv->dm.dm_initialgain_enable)
560 return; 563 return;
561 564
562 if (digtable.dig_enable_flag == false) 565 if (digtable->dig_enable_flag == false)
563 return; 566 return;
564 567
565 _rtl92s_dm_ctrl_initgain_bytwoport(hw); 568 _rtl92s_dm_ctrl_initgain_bytwoport(hw);
@@ -639,51 +642,52 @@ static void _rtl92s_dm_dynamic_txpower(struct ieee80211_hw *hw)
639static void _rtl92s_dm_init_dig(struct ieee80211_hw *hw) 642static void _rtl92s_dm_init_dig(struct ieee80211_hw *hw)
640{ 643{
641 struct rtl_priv *rtlpriv = rtl_priv(hw); 644 struct rtl_priv *rtlpriv = rtl_priv(hw);
645 struct dig_t *digtable = &rtlpriv->dm_digtable;
642 646
643 /* Disable DIG scheme now.*/ 647 /* Disable DIG scheme now.*/
644 digtable.dig_enable_flag = true; 648 digtable->dig_enable_flag = true;
645 digtable.backoff_enable_flag = true; 649 digtable->backoff_enable_flag = true;
646 650
647 if ((rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) && 651 if ((rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) &&
648 (hal_get_firmwareversion(rtlpriv) >= 0x3c)) 652 (hal_get_firmwareversion(rtlpriv) >= 0x3c))
649 digtable.dig_algorithm = DIG_ALGO_BY_TOW_PORT; 653 digtable->dig_algorithm = DIG_ALGO_BY_TOW_PORT;
650 else 654 else
651 digtable.dig_algorithm = 655 digtable->dig_algorithm =
652 DIG_ALGO_BEFORE_CONNECT_BY_RSSI_AND_ALARM; 656 DIG_ALGO_BEFORE_CONNECT_BY_RSSI_AND_ALARM;
653 657
654 digtable.dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI; 658 digtable->dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI;
655 digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 659 digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
656 /* off=by real rssi value, on=by digtable.rssi_val for new dig */ 660 /* off=by real rssi value, on=by digtable->rssi_val for new dig */
657 digtable.dig_dbgmode = DM_DBG_OFF; 661 digtable->dig_dbgmode = DM_DBG_OFF;
658 digtable.dig_slgorithm_switch = 0; 662 digtable->dig_slgorithm_switch = 0;
659 663
660 /* 2007/10/04 MH Define init gain threshol. */ 664 /* 2007/10/04 MH Define init gain threshol. */
661 digtable.dig_state = DM_STA_DIG_MAX; 665 digtable->dig_state = DM_STA_DIG_MAX;
662 digtable.dig_highpwrstate = DM_STA_DIG_MAX; 666 digtable->dig_highpwrstate = DM_STA_DIG_MAX;
663 667
664 digtable.cur_sta_connectstate = DIG_STA_DISCONNECT; 668 digtable->cur_sta_connectstate = DIG_STA_DISCONNECT;
665 digtable.pre_sta_connectstate = DIG_STA_DISCONNECT; 669 digtable->pre_sta_connectstate = DIG_STA_DISCONNECT;
666 digtable.cur_ap_connectstate = DIG_AP_DISCONNECT; 670 digtable->cur_ap_connectstate = DIG_AP_DISCONNECT;
667 digtable.pre_ap_connectstate = DIG_AP_DISCONNECT; 671 digtable->pre_ap_connectstate = DIG_AP_DISCONNECT;
668 672
669 digtable.rssi_lowthresh = DM_DIG_THRESH_LOW; 673 digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
670 digtable.rssi_highthresh = DM_DIG_THRESH_HIGH; 674 digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
671 675
672 digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW; 676 digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
673 digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH; 677 digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
674 678
675 digtable.rssi_highpower_lowthresh = DM_DIG_HIGH_PWR_THRESH_LOW; 679 digtable->rssi_highpower_lowthresh = DM_DIG_HIGH_PWR_THRESH_LOW;
676 digtable.rssi_highpower_highthresh = DM_DIG_HIGH_PWR_THRESH_HIGH; 680 digtable->rssi_highpower_highthresh = DM_DIG_HIGH_PWR_THRESH_HIGH;
677 681
678 /* for dig debug rssi value */ 682 /* for dig debug rssi value */
679 digtable.rssi_val = 50; 683 digtable->rssi_val = 50;
680 digtable.backoff_val = DM_DIG_BACKOFF; 684 digtable->backoff_val = DM_DIG_BACKOFF;
681 digtable.rx_gain_range_max = DM_DIG_MAX; 685 digtable->rx_gain_range_max = DM_DIG_MAX;
682 686
683 digtable.rx_gain_range_min = DM_DIG_MIN; 687 digtable->rx_gain_range_min = DM_DIG_MIN;
684 688
685 digtable.backoffval_range_max = DM_DIG_BACKOFF_MAX; 689 digtable->backoffval_range_max = DM_DIG_BACKOFF_MAX;
686 digtable.backoffval_range_min = DM_DIG_BACKOFF_MIN; 690 digtable->backoffval_range_min = DM_DIG_BACKOFF_MIN;
687} 691}
688 692
689static void _rtl92s_dm_init_dynamic_txpower(struct ieee80211_hw *hw) 693static void _rtl92s_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.h b/drivers/net/wireless/rtlwifi/rtl8192se/dm.h
index e1b19a641765..2e9052c8fe4b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.h
@@ -29,48 +29,6 @@
29#ifndef __RTL_92S_DM_H__ 29#ifndef __RTL_92S_DM_H__
30#define __RTL_92S_DM_H__ 30#define __RTL_92S_DM_H__
31 31
32struct dig_t {
33 u8 dig_enable_flag;
34 u8 dig_algorithm;
35 u8 dig_twoport_algorithm;
36 u8 dig_ext_port_stage;
37 u8 dig_dbgmode;
38 u8 dig_slgorithm_switch;
39
40 long rssi_lowthresh;
41 long rssi_highthresh;
42
43 u32 fa_lowthresh;
44 u32 fa_highthresh;
45
46 long rssi_highpower_lowthresh;
47 long rssi_highpower_highthresh;
48
49 u8 dig_state;
50 u8 dig_highpwrstate;
51 u8 cur_sta_connectstate;
52 u8 pre_sta_connectstate;
53 u8 cur_ap_connectstate;
54 u8 pre_ap_connectstate;
55
56 u8 cur_pd_thstate;
57 u8 pre_pd_thstate;
58 u8 cur_cs_ratiostate;
59 u8 pre_cs_ratiostate;
60
61 u32 pre_igvalue;
62 u32 cur_igvalue;
63
64 u8 backoff_enable_flag;
65 char backoff_val;
66 char backoffval_range_max;
67 char backoffval_range_min;
68 u8 rx_gain_range_max;
69 u8 rx_gain_range_min;
70
71 long rssi_val;
72};
73
74enum dm_dig_alg { 32enum dm_dig_alg {
75 DIG_ALGO_BY_FALSE_ALARM = 0, 33 DIG_ALGO_BY_FALSE_ALARM = 0,
76 DIG_ALGO_BY_RSSI = 1, 34 DIG_ALGO_BY_RSSI = 1,
@@ -154,8 +112,6 @@ enum dm_ratr_sta {
154#define DM_DIG_BACKOFF_MAX 12 112#define DM_DIG_BACKOFF_MAX 12
155#define DM_DIG_BACKOFF_MIN -4 113#define DM_DIG_BACKOFF_MIN -4
156 114
157extern struct dig_t digtable;
158
159void rtl92s_dm_watchdog(struct ieee80211_hw *hw); 115void rtl92s_dm_watchdog(struct ieee80211_hw *hw);
160void rtl92s_dm_init(struct ieee80211_hw *hw); 116void rtl92s_dm_init(struct ieee80211_hw *hw);
161void rtl92s_dm_init_edca_turbo(struct ieee80211_hw *hw); 117void rtl92s_dm_init_edca_turbo(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.h b/drivers/net/wireless/rtlwifi/rtl8192se/fw.h
index b4afff626437..d53f4332464d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.h
@@ -345,7 +345,7 @@ enum fw_h2c_cmd {
345 do { \ 345 do { \
346 udelay(1000); \ 346 udelay(1000); \
347 rtlpriv->rtlhal.fwcmd_iomap &= (~_Bit); \ 347 rtlpriv->rtlhal.fwcmd_iomap &= (~_Bit); \
348 } while (0); 348 } while (0)
349 349
350#define FW_CMD_IO_UPDATE(rtlpriv, _val) \ 350#define FW_CMD_IO_UPDATE(rtlpriv, _val) \
351 rtlpriv->rtlhal.fwcmd_iomap = _val; 351 rtlpriv->rtlhal.fwcmd_iomap = _val;
@@ -354,13 +354,13 @@ enum fw_h2c_cmd {
354 do { \ 354 do { \
355 rtl_write_word(rtlpriv, LBUS_MON_ADDR, (u16)_val); \ 355 rtl_write_word(rtlpriv, LBUS_MON_ADDR, (u16)_val); \
356 FW_CMD_IO_UPDATE(rtlpriv, _val); \ 356 FW_CMD_IO_UPDATE(rtlpriv, _val); \
357 } while (0); 357 } while (0)
358 358
359#define FW_CMD_PARA_SET(rtlpriv, _val) \ 359#define FW_CMD_PARA_SET(rtlpriv, _val) \
360 do { \ 360 do { \
361 rtl_write_dword(rtlpriv, LBUS_ADDR_MASK, _val); \ 361 rtl_write_dword(rtlpriv, LBUS_ADDR_MASK, _val); \
362 rtlpriv->rtlhal.fwcmd_ioparam = _val; \ 362 rtlpriv->rtlhal.fwcmd_ioparam = _val; \
363 } while (0); 363 } while (0)
364 364
365#define FW_CMD_IO_QUERY(rtlpriv) \ 365#define FW_CMD_IO_QUERY(rtlpriv) \
366 (u16)(rtlpriv->rtlhal.fwcmd_iomap) 366 (u16)(rtlpriv->rtlhal.fwcmd_iomap)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index 4a499928e4c6..8d7099bc472c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -1450,6 +1450,7 @@ static void _rtl92s_phy_set_fwcmd_io(struct ieee80211_hw *hw)
1450bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio) 1450bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
1451{ 1451{
1452 struct rtl_priv *rtlpriv = rtl_priv(hw); 1452 struct rtl_priv *rtlpriv = rtl_priv(hw);
1453 struct dig_t *digtable = &rtlpriv->dm_digtable;
1453 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1454 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1454 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 1455 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1455 u32 fw_param = FW_CMD_IO_PARA_QUERY(rtlpriv); 1456 u32 fw_param = FW_CMD_IO_PARA_QUERY(rtlpriv);
@@ -1588,16 +1589,16 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
1588 FW_SS_CTL); 1589 FW_SS_CTL);
1589 1590
1590 if (rtlpriv->dm.dm_flag & HAL_DM_DIG_DISABLE || 1591 if (rtlpriv->dm.dm_flag & HAL_DM_DIG_DISABLE ||
1591 !digtable.dig_enable_flag) 1592 !digtable->dig_enable_flag)
1592 fw_cmdmap &= ~FW_DIG_ENABLE_CTL; 1593 fw_cmdmap &= ~FW_DIG_ENABLE_CTL;
1593 1594
1594 if ((rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) || 1595 if ((rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) ||
1595 rtlpriv->dm.dynamic_txpower_enable) 1596 rtlpriv->dm.dynamic_txpower_enable)
1596 fw_cmdmap &= ~FW_HIGH_PWR_ENABLE_CTL; 1597 fw_cmdmap &= ~FW_HIGH_PWR_ENABLE_CTL;
1597 1598
1598 if ((digtable.dig_ext_port_stage == 1599 if ((digtable->dig_ext_port_stage ==
1599 DIG_EXT_PORT_STAGE_0) || 1600 DIG_EXT_PORT_STAGE_0) ||
1600 (digtable.dig_ext_port_stage == 1601 (digtable->dig_ext_port_stage ==
1601 DIG_EXT_PORT_STAGE_1)) 1602 DIG_EXT_PORT_STAGE_1))
1602 fw_cmdmap &= ~FW_DIG_ENABLE_CTL; 1603 fw_cmdmap &= ~FW_DIG_ENABLE_CTL;
1603 1604
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index f1b36005c6a2..730bcc919529 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -450,21 +450,4 @@ static struct pci_driver rtl92se_driver = {
450 .driver.pm = &rtlwifi_pm_ops, 450 .driver.pm = &rtlwifi_pm_ops,
451}; 451};
452 452
453static int __init rtl92se_module_init(void) 453module_pci_driver(rtl92se_driver);
454{
455 int ret = 0;
456
457 ret = pci_register_driver(&rtl92se_driver);
458 if (ret)
459 RT_ASSERT(false, "No device found\n");
460
461 return ret;
462}
463
464static void __exit rtl92se_module_exit(void)
465{
466 pci_unregister_driver(&rtl92se_driver);
467}
468
469module_init(rtl92se_module_init);
470module_exit(rtl92se_module_exit);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 2fd3d13b7ced..812b5858f14a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -492,13 +492,14 @@ static void _rtl92se_translate_rx_signal_stuff(struct ieee80211_hw *hw,
492 praddr = hdr->addr1; 492 praddr = hdr->addr1;
493 493
494 packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) && 494 packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
495 (!compare_ether_addr(mac->bssid, (cfc & IEEE80211_FCTL_TODS) ? 495 ether_addr_equal(mac->bssid,
496 hdr->addr1 : (cfc & IEEE80211_FCTL_FROMDS) ? 496 (cfc & IEEE80211_FCTL_TODS) ? hdr->addr1 :
497 hdr->addr2 : hdr->addr3)) && (!pstats->hwerror) && 497 (cfc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
498 (!pstats->crc) && (!pstats->icv)); 498 hdr->addr3) &&
499 (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
499 500
500 packet_toself = packet_matchbssid && 501 packet_toself = packet_matchbssid &&
501 (!compare_ether_addr(praddr, rtlefuse->dev_addr)); 502 ether_addr_equal(praddr, rtlefuse->dev_addr);
502 503
503 if (ieee80211_is_beacon(fc)) 504 if (ieee80211_is_beacon(fc))
504 packet_beacon = true; 505 packet_beacon = true;
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index d04dbda13f5a..a6049d7d51b3 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -971,11 +971,6 @@ int __devinit rtl_usb_probe(struct usb_interface *intf,
971 rtlpriv->cfg->ops->read_chip_version(hw); 971 rtlpriv->cfg->ops->read_chip_version(hw);
972 /*like read eeprom and so on */ 972 /*like read eeprom and so on */
973 rtlpriv->cfg->ops->read_eeprom_info(hw); 973 rtlpriv->cfg->ops->read_eeprom_info(hw);
974 if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
975 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
976 goto error_out;
977 }
978 rtlpriv->cfg->ops->init_sw_leds(hw);
979 err = _rtl_usb_init(hw); 974 err = _rtl_usb_init(hw);
980 if (err) 975 if (err)
981 goto error_out; 976 goto error_out;
@@ -987,6 +982,11 @@ int __devinit rtl_usb_probe(struct usb_interface *intf,
987 "Can't allocate sw for mac80211\n"); 982 "Can't allocate sw for mac80211\n");
988 goto error_out; 983 goto error_out;
989 } 984 }
985 if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
986 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
987 goto error_out;
988 }
989 rtlpriv->cfg->ops->init_sw_leds(hw);
990 990
991 return 0; 991 return 0;
992error_out: 992error_out:
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 28ebc69218a3..bd816aef26dc 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1592,6 +1592,65 @@ struct rtl_debug {
1592 char proc_name[20]; 1592 char proc_name[20];
1593}; 1593};
1594 1594
1595struct ps_t {
1596 u8 pre_ccastate;
1597 u8 cur_ccasate;
1598 u8 pre_rfstate;
1599 u8 cur_rfstate;
1600 long rssi_val_min;
1601};
1602
1603struct dig_t {
1604 u32 rssi_lowthresh;
1605 u32 rssi_highthresh;
1606 u32 fa_lowthresh;
1607 u32 fa_highthresh;
1608 long last_min_undecorated_pwdb_for_dm;
1609 long rssi_highpower_lowthresh;
1610 long rssi_highpower_highthresh;
1611 u32 recover_cnt;
1612 u32 pre_igvalue;
1613 u32 cur_igvalue;
1614 long rssi_val;
1615 u8 dig_enable_flag;
1616 u8 dig_ext_port_stage;
1617 u8 dig_algorithm;
1618 u8 dig_twoport_algorithm;
1619 u8 dig_dbgmode;
1620 u8 dig_slgorithm_switch;
1621 u8 cursta_connectctate;
1622 u8 presta_connectstate;
1623 u8 curmultista_connectstate;
1624 char backoff_val;
1625 char backoff_val_range_max;
1626 char backoff_val_range_min;
1627 u8 rx_gain_range_max;
1628 u8 rx_gain_range_min;
1629 u8 min_undecorated_pwdb_for_dm;
1630 u8 rssi_val_min;
1631 u8 pre_cck_pd_state;
1632 u8 cur_cck_pd_state;
1633 u8 pre_cck_fa_state;
1634 u8 cur_cck_fa_state;
1635 u8 pre_ccastate;
1636 u8 cur_ccasate;
1637 u8 large_fa_hit;
1638 u8 forbidden_igi;
1639 u8 dig_state;
1640 u8 dig_highpwrstate;
1641 u8 cur_sta_connectstate;
1642 u8 pre_sta_connectstate;
1643 u8 cur_ap_connectstate;
1644 u8 pre_ap_connectstate;
1645 u8 cur_pd_thstate;
1646 u8 pre_pd_thstate;
1647 u8 cur_cs_ratiostate;
1648 u8 pre_cs_ratiostate;
1649 u8 backoff_enable_flag;
1650 char backoffval_range_max;
1651 char backoffval_range_min;
1652};
1653
1595struct rtl_priv { 1654struct rtl_priv {
1596 struct completion firmware_loading_complete; 1655 struct completion firmware_loading_complete;
1597 struct rtl_locks locks; 1656 struct rtl_locks locks;
@@ -1629,6 +1688,10 @@ struct rtl_priv {
1629 interface or hardware */ 1688 interface or hardware */
1630 unsigned long status; 1689 unsigned long status;
1631 1690
1691 /* tables for dm */
1692 struct dig_t dm_digtable;
1693 struct ps_t dm_pstable;
1694
1632 /* data buffer pointer for USB reads */ 1695 /* data buffer pointer for USB reads */
1633 __le32 *usb_data; 1696 __le32 *usb_data;
1634 int usb_data_index; 1697 int usb_data_index;
@@ -1958,37 +2021,35 @@ static inline void rtl_write_dword(struct rtl_priv *rtlpriv,
1958static inline u32 rtl_get_bbreg(struct ieee80211_hw *hw, 2021static inline u32 rtl_get_bbreg(struct ieee80211_hw *hw,
1959 u32 regaddr, u32 bitmask) 2022 u32 regaddr, u32 bitmask)
1960{ 2023{
1961 return ((struct rtl_priv *)(hw)->priv)->cfg->ops->get_bbreg(hw, 2024 struct rtl_priv *rtlpriv = hw->priv;
1962 regaddr, 2025
1963 bitmask); 2026 return rtlpriv->cfg->ops->get_bbreg(hw, regaddr, bitmask);
1964} 2027}
1965 2028
1966static inline void rtl_set_bbreg(struct ieee80211_hw *hw, u32 regaddr, 2029static inline void rtl_set_bbreg(struct ieee80211_hw *hw, u32 regaddr,
1967 u32 bitmask, u32 data) 2030 u32 bitmask, u32 data)
1968{ 2031{
1969 ((struct rtl_priv *)(hw)->priv)->cfg->ops->set_bbreg(hw, 2032 struct rtl_priv *rtlpriv = hw->priv;
1970 regaddr, bitmask,
1971 data);
1972 2033
2034 rtlpriv->cfg->ops->set_bbreg(hw, regaddr, bitmask, data);
1973} 2035}
1974 2036
1975static inline u32 rtl_get_rfreg(struct ieee80211_hw *hw, 2037static inline u32 rtl_get_rfreg(struct ieee80211_hw *hw,
1976 enum radio_path rfpath, u32 regaddr, 2038 enum radio_path rfpath, u32 regaddr,
1977 u32 bitmask) 2039 u32 bitmask)
1978{ 2040{
1979 return ((struct rtl_priv *)(hw)->priv)->cfg->ops->get_rfreg(hw, 2041 struct rtl_priv *rtlpriv = hw->priv;
1980 rfpath, 2042
1981 regaddr, 2043 return rtlpriv->cfg->ops->get_rfreg(hw, rfpath, regaddr, bitmask);
1982 bitmask);
1983} 2044}
1984 2045
1985static inline void rtl_set_rfreg(struct ieee80211_hw *hw, 2046static inline void rtl_set_rfreg(struct ieee80211_hw *hw,
1986 enum radio_path rfpath, u32 regaddr, 2047 enum radio_path rfpath, u32 regaddr,
1987 u32 bitmask, u32 data) 2048 u32 bitmask, u32 data)
1988{ 2049{
1989 ((struct rtl_priv *)(hw)->priv)->cfg->ops->set_rfreg(hw, 2050 struct rtl_priv *rtlpriv = hw->priv;
1990 rfpath, regaddr, 2051
1991 bitmask, data); 2052 rtlpriv->cfg->ops->set_rfreg(hw, rfpath, regaddr, bitmask, data);
1992} 2053}
1993 2054
1994static inline bool is_hal_stop(struct rtl_hal *rtlhal) 2055static inline bool is_hal_stop(struct rtl_hal *rtlhal)
diff --git a/drivers/net/wireless/ti/Kconfig b/drivers/net/wireless/ti/Kconfig
new file mode 100644
index 000000000000..1a72932e2213
--- /dev/null
+++ b/drivers/net/wireless/ti/Kconfig
@@ -0,0 +1,14 @@
1menuconfig WL_TI
2 bool "TI Wireless LAN support"
3 ---help---
4 This section contains support for all the wireless drivers
5 for Texas Instruments WLAN chips, such as wl1251 and the wl12xx
6 family.
7
8if WL_TI
9source "drivers/net/wireless/ti/wl1251/Kconfig"
10source "drivers/net/wireless/ti/wl12xx/Kconfig"
11
12# keep last for automatic dependencies
13source "drivers/net/wireless/ti/wlcore/Kconfig"
14endif # WL_TI
diff --git a/drivers/net/wireless/ti/Makefile b/drivers/net/wireless/ti/Makefile
new file mode 100644
index 000000000000..0a565622d4a4
--- /dev/null
+++ b/drivers/net/wireless/ti/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_WLCORE) += wlcore/
2obj-$(CONFIG_WL12XX) += wl12xx/
3obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wlcore/
4obj-$(CONFIG_WL1251) += wl1251/
diff --git a/drivers/net/wireless/wl1251/Kconfig b/drivers/net/wireless/ti/wl1251/Kconfig
index 1fb65849414f..1fb65849414f 100644
--- a/drivers/net/wireless/wl1251/Kconfig
+++ b/drivers/net/wireless/ti/wl1251/Kconfig
diff --git a/drivers/net/wireless/wl1251/Makefile b/drivers/net/wireless/ti/wl1251/Makefile
index a5c6328b5f72..a5c6328b5f72 100644
--- a/drivers/net/wireless/wl1251/Makefile
+++ b/drivers/net/wireless/ti/wl1251/Makefile
diff --git a/drivers/net/wireless/wl1251/acx.c b/drivers/net/wireless/ti/wl1251/acx.c
index ad87a1ac6462..ad87a1ac6462 100644
--- a/drivers/net/wireless/wl1251/acx.c
+++ b/drivers/net/wireless/ti/wl1251/acx.c
diff --git a/drivers/net/wireless/wl1251/acx.h b/drivers/net/wireless/ti/wl1251/acx.h
index c2ba100f9b1a..c2ba100f9b1a 100644
--- a/drivers/net/wireless/wl1251/acx.h
+++ b/drivers/net/wireless/ti/wl1251/acx.h
diff --git a/drivers/net/wireless/wl1251/boot.c b/drivers/net/wireless/ti/wl1251/boot.c
index a2e5241382da..a2e5241382da 100644
--- a/drivers/net/wireless/wl1251/boot.c
+++ b/drivers/net/wireless/ti/wl1251/boot.c
diff --git a/drivers/net/wireless/wl1251/boot.h b/drivers/net/wireless/ti/wl1251/boot.h
index 7661bc5e4662..7661bc5e4662 100644
--- a/drivers/net/wireless/wl1251/boot.h
+++ b/drivers/net/wireless/ti/wl1251/boot.h
diff --git a/drivers/net/wireless/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index d14d69d733a0..d14d69d733a0 100644
--- a/drivers/net/wireless/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
diff --git a/drivers/net/wireless/wl1251/cmd.h b/drivers/net/wireless/ti/wl1251/cmd.h
index ee4f2b391822..ee4f2b391822 100644
--- a/drivers/net/wireless/wl1251/cmd.h
+++ b/drivers/net/wireless/ti/wl1251/cmd.h
diff --git a/drivers/net/wireless/wl1251/debugfs.c b/drivers/net/wireless/ti/wl1251/debugfs.c
index 448da1f8c22f..448da1f8c22f 100644
--- a/drivers/net/wireless/wl1251/debugfs.c
+++ b/drivers/net/wireless/ti/wl1251/debugfs.c
diff --git a/drivers/net/wireless/wl1251/debugfs.h b/drivers/net/wireless/ti/wl1251/debugfs.h
index b3417c02a218..b3417c02a218 100644
--- a/drivers/net/wireless/wl1251/debugfs.h
+++ b/drivers/net/wireless/ti/wl1251/debugfs.h
diff --git a/drivers/net/wireless/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index 9f15ccaf8f05..9f15ccaf8f05 100644
--- a/drivers/net/wireless/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
diff --git a/drivers/net/wireless/wl1251/event.h b/drivers/net/wireless/ti/wl1251/event.h
index 30eb5d150bf7..30eb5d150bf7 100644
--- a/drivers/net/wireless/wl1251/event.h
+++ b/drivers/net/wireless/ti/wl1251/event.h
diff --git a/drivers/net/wireless/wl1251/init.c b/drivers/net/wireless/ti/wl1251/init.c
index 89b43d35473c..89b43d35473c 100644
--- a/drivers/net/wireless/wl1251/init.c
+++ b/drivers/net/wireless/ti/wl1251/init.c
diff --git a/drivers/net/wireless/wl1251/init.h b/drivers/net/wireless/ti/wl1251/init.h
index 543f17582ead..543f17582ead 100644
--- a/drivers/net/wireless/wl1251/init.h
+++ b/drivers/net/wireless/ti/wl1251/init.h
diff --git a/drivers/net/wireless/wl1251/io.c b/drivers/net/wireless/ti/wl1251/io.c
index cdcadbf6ac2c..cdcadbf6ac2c 100644
--- a/drivers/net/wireless/wl1251/io.c
+++ b/drivers/net/wireless/ti/wl1251/io.c
diff --git a/drivers/net/wireless/wl1251/io.h b/drivers/net/wireless/ti/wl1251/io.h
index d382877c34cc..d382877c34cc 100644
--- a/drivers/net/wireless/wl1251/io.h
+++ b/drivers/net/wireless/ti/wl1251/io.h
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 41302c7b1ad0..d1afb8e3b2ef 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -479,6 +479,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
479 cancel_work_sync(&wl->irq_work); 479 cancel_work_sync(&wl->irq_work);
480 cancel_work_sync(&wl->tx_work); 480 cancel_work_sync(&wl->tx_work);
481 cancel_work_sync(&wl->filter_work); 481 cancel_work_sync(&wl->filter_work);
482 cancel_delayed_work_sync(&wl->elp_work);
482 483
483 mutex_lock(&wl->mutex); 484 mutex_lock(&wl->mutex);
484 485
diff --git a/drivers/net/wireless/wl1251/ps.c b/drivers/net/wireless/ti/wl1251/ps.c
index db719f7d2692..db719f7d2692 100644
--- a/drivers/net/wireless/wl1251/ps.c
+++ b/drivers/net/wireless/ti/wl1251/ps.c
diff --git a/drivers/net/wireless/wl1251/ps.h b/drivers/net/wireless/ti/wl1251/ps.h
index 75efad246d67..75efad246d67 100644
--- a/drivers/net/wireless/wl1251/ps.h
+++ b/drivers/net/wireless/ti/wl1251/ps.h
diff --git a/drivers/net/wireless/wl1251/reg.h b/drivers/net/wireless/ti/wl1251/reg.h
index a5809019c5c1..a5809019c5c1 100644
--- a/drivers/net/wireless/wl1251/reg.h
+++ b/drivers/net/wireless/ti/wl1251/reg.h
diff --git a/drivers/net/wireless/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c
index 6af35265c900..6af35265c900 100644
--- a/drivers/net/wireless/wl1251/rx.c
+++ b/drivers/net/wireless/ti/wl1251/rx.c
diff --git a/drivers/net/wireless/wl1251/rx.h b/drivers/net/wireless/ti/wl1251/rx.h
index 4448f635a4d8..4448f635a4d8 100644
--- a/drivers/net/wireless/wl1251/rx.h
+++ b/drivers/net/wireless/ti/wl1251/rx.h
diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index f78694295c39..1b851f650e07 100644
--- a/drivers/net/wireless/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -315,8 +315,8 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func)
315 315
316 if (wl->irq) 316 if (wl->irq)
317 free_irq(wl->irq, wl); 317 free_irq(wl->irq, wl);
318 kfree(wl_sdio);
319 wl1251_free_hw(wl); 318 wl1251_free_hw(wl);
319 kfree(wl_sdio);
320 320
321 sdio_claim_host(func); 321 sdio_claim_host(func);
322 sdio_release_irq(func); 322 sdio_release_irq(func);
diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 6248c354fc5c..6248c354fc5c 100644
--- a/drivers/net/wireless/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
diff --git a/drivers/net/wireless/wl1251/spi.h b/drivers/net/wireless/ti/wl1251/spi.h
index 16d506955cc0..16d506955cc0 100644
--- a/drivers/net/wireless/wl1251/spi.h
+++ b/drivers/net/wireless/ti/wl1251/spi.h
diff --git a/drivers/net/wireless/wl1251/tx.c b/drivers/net/wireless/ti/wl1251/tx.c
index 28121c590a2b..28121c590a2b 100644
--- a/drivers/net/wireless/wl1251/tx.c
+++ b/drivers/net/wireless/ti/wl1251/tx.c
diff --git a/drivers/net/wireless/wl1251/tx.h b/drivers/net/wireless/ti/wl1251/tx.h
index 81338d39b43e..81338d39b43e 100644
--- a/drivers/net/wireless/wl1251/tx.h
+++ b/drivers/net/wireless/ti/wl1251/tx.h
diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
index 9d8f5816c6f9..9d8f5816c6f9 100644
--- a/drivers/net/wireless/wl1251/wl1251.h
+++ b/drivers/net/wireless/ti/wl1251/wl1251.h
diff --git a/drivers/net/wireless/wl1251/wl12xx_80211.h b/drivers/net/wireless/ti/wl1251/wl12xx_80211.h
index 04ed51495772..04ed51495772 100644
--- a/drivers/net/wireless/wl1251/wl12xx_80211.h
+++ b/drivers/net/wireless/ti/wl1251/wl12xx_80211.h
diff --git a/drivers/net/wireless/ti/wl12xx/Kconfig b/drivers/net/wireless/ti/wl12xx/Kconfig
new file mode 100644
index 000000000000..5b92329122c4
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/Kconfig
@@ -0,0 +1,8 @@
1config WL12XX
2 tristate "TI wl12xx support"
3 select WLCORE
4 ---help---
5 This module adds support for wireless adapters based on TI wl1271,
6 wl1273, wl1281 and wl1283 chipsets. This module does *not* include
7 support for wl1251. For wl1251 support, use the separate homonymous
8 driver instead.
diff --git a/drivers/net/wireless/ti/wl12xx/Makefile b/drivers/net/wireless/ti/wl12xx/Makefile
new file mode 100644
index 000000000000..87f64b14db35
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/Makefile
@@ -0,0 +1,3 @@
1wl12xx-objs = main.o cmd.o acx.o
2
3obj-$(CONFIG_WL12XX) += wl12xx.o
diff --git a/drivers/net/wireless/ti/wl12xx/acx.c b/drivers/net/wireless/ti/wl12xx/acx.c
new file mode 100644
index 000000000000..bea06b2d7bf4
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/acx.c
@@ -0,0 +1,53 @@
1/*
2 * This file is part of wl12xx
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 * Copyright (C) 2011 Texas Instruments Inc.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 *
21 */
22
23#include "../wlcore/cmd.h"
24#include "../wlcore/debug.h"
25#include "../wlcore/acx.h"
26
27#include "acx.h"
28
29int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap)
30{
31 struct wl1271_acx_host_config_bitmap *bitmap_conf;
32 int ret;
33
34 bitmap_conf = kzalloc(sizeof(*bitmap_conf), GFP_KERNEL);
35 if (!bitmap_conf) {
36 ret = -ENOMEM;
37 goto out;
38 }
39
40 bitmap_conf->host_cfg_bitmap = cpu_to_le32(host_cfg_bitmap);
41
42 ret = wl1271_cmd_configure(wl, ACX_HOST_IF_CFG_BITMAP,
43 bitmap_conf, sizeof(*bitmap_conf));
44 if (ret < 0) {
45 wl1271_warning("wl1271 bitmap config opt failed: %d", ret);
46 goto out;
47 }
48
49out:
50 kfree(bitmap_conf);
51
52 return ret;
53}
diff --git a/drivers/net/wireless/ti/wl12xx/acx.h b/drivers/net/wireless/ti/wl12xx/acx.h
new file mode 100644
index 000000000000..d1f5aba0afce
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/acx.h
@@ -0,0 +1,36 @@
1/*
2 * This file is part of wl12xx
3 *
4 * Copyright (C) 1998-2009, 2011 Texas Instruments. All rights reserved.
5 * Copyright (C) 2008-2010 Nokia Corporation
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 *
21 */
22
23#ifndef __WL12XX_ACX_H__
24#define __WL12XX_ACX_H__
25
26#include "../wlcore/wlcore.h"
27
28struct wl1271_acx_host_config_bitmap {
29 struct acx_header header;
30
31 __le32 host_cfg_bitmap;
32} __packed;
33
34int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
35
36#endif /* __WL12XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl12xx/cmd.c b/drivers/net/wireless/ti/wl12xx/cmd.c
new file mode 100644
index 000000000000..8ffaeb5f2147
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/cmd.c
@@ -0,0 +1,254 @@
1/*
2 * This file is part of wl12xx
3 *
4 * Copyright (C) 2009-2010 Nokia Corporation
5 * Copyright (C) 2011 Texas Instruments Inc.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 *
21 */
22
23#include "../wlcore/cmd.h"
24#include "../wlcore/debug.h"
25
26#include "wl12xx.h"
27#include "cmd.h"
28
29int wl1271_cmd_ext_radio_parms(struct wl1271 *wl)
30{
31 struct wl1271_ext_radio_parms_cmd *ext_radio_parms;
32 struct wl12xx_priv *priv = wl->priv;
33 struct wl12xx_conf_rf *rf = &priv->conf.rf;
34 int ret;
35
36 if (!wl->nvs)
37 return -ENODEV;
38
39 ext_radio_parms = kzalloc(sizeof(*ext_radio_parms), GFP_KERNEL);
40 if (!ext_radio_parms)
41 return -ENOMEM;
42
43 ext_radio_parms->test.id = TEST_CMD_INI_FILE_RF_EXTENDED_PARAM;
44
45 memcpy(ext_radio_parms->tx_per_channel_power_compensation_2,
46 rf->tx_per_channel_power_compensation_2,
47 CONF_TX_PWR_COMPENSATION_LEN_2);
48 memcpy(ext_radio_parms->tx_per_channel_power_compensation_5,
49 rf->tx_per_channel_power_compensation_5,
50 CONF_TX_PWR_COMPENSATION_LEN_5);
51
52 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_EXT_RADIO_PARAM: ",
53 ext_radio_parms, sizeof(*ext_radio_parms));
54
55 ret = wl1271_cmd_test(wl, ext_radio_parms, sizeof(*ext_radio_parms), 0);
56 if (ret < 0)
57 wl1271_warning("TEST_CMD_INI_FILE_RF_EXTENDED_PARAM failed");
58
59 kfree(ext_radio_parms);
60 return ret;
61}
62
63int wl1271_cmd_general_parms(struct wl1271 *wl)
64{
65 struct wl1271_general_parms_cmd *gen_parms;
66 struct wl1271_ini_general_params *gp =
67 &((struct wl1271_nvs_file *)wl->nvs)->general_params;
68 bool answer = false;
69 int ret;
70
71 if (!wl->nvs)
72 return -ENODEV;
73
74 if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
75 wl1271_warning("FEM index from INI out of bounds");
76 return -EINVAL;
77 }
78
79 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
80 if (!gen_parms)
81 return -ENOMEM;
82
83 gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
84
85 memcpy(&gen_parms->general_params, gp, sizeof(*gp));
86
87 if (gp->tx_bip_fem_auto_detect)
88 answer = true;
89
90 /* Override the REF CLK from the NVS with the one from platform data */
91 gen_parms->general_params.ref_clock = wl->ref_clock;
92
93 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
94 if (ret < 0) {
95 wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
96 goto out;
97 }
98
99 gp->tx_bip_fem_manufacturer =
100 gen_parms->general_params.tx_bip_fem_manufacturer;
101
102 if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
103 wl1271_warning("FEM index from FW out of bounds");
104 ret = -EINVAL;
105 goto out;
106 }
107
108 wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
109 answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
110
111out:
112 kfree(gen_parms);
113 return ret;
114}
115
116int wl128x_cmd_general_parms(struct wl1271 *wl)
117{
118 struct wl128x_general_parms_cmd *gen_parms;
119 struct wl128x_ini_general_params *gp =
120 &((struct wl128x_nvs_file *)wl->nvs)->general_params;
121 bool answer = false;
122 int ret;
123
124 if (!wl->nvs)
125 return -ENODEV;
126
127 if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
128 wl1271_warning("FEM index from ini out of bounds");
129 return -EINVAL;
130 }
131
132 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
133 if (!gen_parms)
134 return -ENOMEM;
135
136 gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
137
138 memcpy(&gen_parms->general_params, gp, sizeof(*gp));
139
140 if (gp->tx_bip_fem_auto_detect)
141 answer = true;
142
143 /* Replace REF and TCXO CLKs with the ones from platform data */
144 gen_parms->general_params.ref_clock = wl->ref_clock;
145 gen_parms->general_params.tcxo_ref_clock = wl->tcxo_clock;
146
147 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
148 if (ret < 0) {
149 wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
150 goto out;
151 }
152
153 gp->tx_bip_fem_manufacturer =
154 gen_parms->general_params.tx_bip_fem_manufacturer;
155
156 if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
157 wl1271_warning("FEM index from FW out of bounds");
158 ret = -EINVAL;
159 goto out;
160 }
161
162 wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
163 answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
164
165out:
166 kfree(gen_parms);
167 return ret;
168}
169
170int wl1271_cmd_radio_parms(struct wl1271 *wl)
171{
172 struct wl1271_nvs_file *nvs = (struct wl1271_nvs_file *)wl->nvs;
173 struct wl1271_radio_parms_cmd *radio_parms;
174 struct wl1271_ini_general_params *gp = &nvs->general_params;
175 int ret;
176
177 if (!wl->nvs)
178 return -ENODEV;
179
180 radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
181 if (!radio_parms)
182 return -ENOMEM;
183
184 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
185
186 /* 2.4GHz parameters */
187 memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
188 sizeof(struct wl1271_ini_band_params_2));
189 memcpy(&radio_parms->dyn_params_2,
190 &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
191 sizeof(struct wl1271_ini_fem_params_2));
192
193 /* 5GHz parameters */
194 memcpy(&radio_parms->static_params_5,
195 &nvs->stat_radio_params_5,
196 sizeof(struct wl1271_ini_band_params_5));
197 memcpy(&radio_parms->dyn_params_5,
198 &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
199 sizeof(struct wl1271_ini_fem_params_5));
200
201 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
202 radio_parms, sizeof(*radio_parms));
203
204 ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
205 if (ret < 0)
206 wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed");
207
208 kfree(radio_parms);
209 return ret;
210}
211
212int wl128x_cmd_radio_parms(struct wl1271 *wl)
213{
214 struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
215 struct wl128x_radio_parms_cmd *radio_parms;
216 struct wl128x_ini_general_params *gp = &nvs->general_params;
217 int ret;
218
219 if (!wl->nvs)
220 return -ENODEV;
221
222 radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
223 if (!radio_parms)
224 return -ENOMEM;
225
226 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
227
228 /* 2.4GHz parameters */
229 memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
230 sizeof(struct wl128x_ini_band_params_2));
231 memcpy(&radio_parms->dyn_params_2,
232 &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
233 sizeof(struct wl128x_ini_fem_params_2));
234
235 /* 5GHz parameters */
236 memcpy(&radio_parms->static_params_5,
237 &nvs->stat_radio_params_5,
238 sizeof(struct wl128x_ini_band_params_5));
239 memcpy(&radio_parms->dyn_params_5,
240 &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
241 sizeof(struct wl128x_ini_fem_params_5));
242
243 radio_parms->fem_vendor_and_options = nvs->fem_vendor_and_options;
244
245 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
246 radio_parms, sizeof(*radio_parms));
247
248 ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
249 if (ret < 0)
250 wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed");
251
252 kfree(radio_parms);
253 return ret;
254}
diff --git a/drivers/net/wireless/ti/wl12xx/cmd.h b/drivers/net/wireless/ti/wl12xx/cmd.h
new file mode 100644
index 000000000000..140a0e8829d5
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/cmd.h
@@ -0,0 +1,112 @@
1/*
2 * This file is part of wl12xx
3 *
4 * Copyright (C) 1998-2009, 2011 Texas Instruments. All rights reserved.
5 * Copyright (C) 2009 Nokia Corporation
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 *
21 */
22
23#ifndef __WL12XX_CMD_H__
24#define __WL12XX_CMD_H__
25
26#include "conf.h"
27
28#define TEST_CMD_INI_FILE_RADIO_PARAM 0x19
29#define TEST_CMD_INI_FILE_GENERAL_PARAM 0x1E
30
31struct wl1271_general_parms_cmd {
32 struct wl1271_cmd_header header;
33
34 struct wl1271_cmd_test_header test;
35
36 struct wl1271_ini_general_params general_params;
37
38 u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM];
39 u8 sr_sen_n_p;
40 u8 sr_sen_n_p_gain;
41 u8 sr_sen_nrn;
42 u8 sr_sen_prn;
43 u8 padding[3];
44} __packed;
45
46struct wl128x_general_parms_cmd {
47 struct wl1271_cmd_header header;
48
49 struct wl1271_cmd_test_header test;
50
51 struct wl128x_ini_general_params general_params;
52
53 u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM];
54 u8 sr_sen_n_p;
55 u8 sr_sen_n_p_gain;
56 u8 sr_sen_nrn;
57 u8 sr_sen_prn;
58 u8 padding[3];
59} __packed;
60
61struct wl1271_radio_parms_cmd {
62 struct wl1271_cmd_header header;
63
64 struct wl1271_cmd_test_header test;
65
66 /* Static radio parameters */
67 struct wl1271_ini_band_params_2 static_params_2;
68 struct wl1271_ini_band_params_5 static_params_5;
69
70 /* Dynamic radio parameters */
71 struct wl1271_ini_fem_params_2 dyn_params_2;
72 u8 padding2;
73 struct wl1271_ini_fem_params_5 dyn_params_5;
74 u8 padding3[2];
75} __packed;
76
77struct wl128x_radio_parms_cmd {
78 struct wl1271_cmd_header header;
79
80 struct wl1271_cmd_test_header test;
81
82 /* Static radio parameters */
83 struct wl128x_ini_band_params_2 static_params_2;
84 struct wl128x_ini_band_params_5 static_params_5;
85
86 u8 fem_vendor_and_options;
87
88 /* Dynamic radio parameters */
89 struct wl128x_ini_fem_params_2 dyn_params_2;
90 u8 padding2;
91 struct wl128x_ini_fem_params_5 dyn_params_5;
92} __packed;
93
94#define TEST_CMD_INI_FILE_RF_EXTENDED_PARAM 0x26
95
96struct wl1271_ext_radio_parms_cmd {
97 struct wl1271_cmd_header header;
98
99 struct wl1271_cmd_test_header test;
100
101 u8 tx_per_channel_power_compensation_2[CONF_TX_PWR_COMPENSATION_LEN_2];
102 u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
103 u8 padding[3];
104} __packed;
105
106int wl1271_cmd_general_parms(struct wl1271 *wl);
107int wl128x_cmd_general_parms(struct wl1271 *wl);
108int wl1271_cmd_radio_parms(struct wl1271 *wl);
109int wl128x_cmd_radio_parms(struct wl1271 *wl);
110int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
111
112#endif /* __WL12XX_CMD_H__ */
diff --git a/drivers/net/wireless/ti/wl12xx/conf.h b/drivers/net/wireless/ti/wl12xx/conf.h
new file mode 100644
index 000000000000..75e29897a0f5
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/conf.h
@@ -0,0 +1,50 @@
1/*
2 * This file is part of wl12xx
3 *
4 * Copyright (C) 2011 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WL12XX_CONF_H__
23#define __WL12XX_CONF_H__
24
25/* these are number of channels on the band divided by two, rounded up */
26#define CONF_TX_PWR_COMPENSATION_LEN_2 7
27#define CONF_TX_PWR_COMPENSATION_LEN_5 18
28
29struct wl12xx_conf_rf {
30 /*
31 * Per channel power compensation for 2.4GHz
32 *
33 * Range: s8
34 */
35 u8 tx_per_channel_power_compensation_2[CONF_TX_PWR_COMPENSATION_LEN_2];
36
37 /*
38 * Per channel power compensation for 5GHz
39 *
40 * Range: s8
41 */
42 u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
43};
44
45struct wl12xx_priv_conf {
46 struct wl12xx_conf_rf rf;
47 struct conf_memory_settings mem_wl127x;
48};
49
50#endif /* __WL12XX_CONF_H__ */
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
new file mode 100644
index 000000000000..d7dd3def07b5
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -0,0 +1,1388 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2008-2010 Nokia Corporation
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/platform_device.h>
24
25#include <linux/err.h>
26
27#include <linux/wl12xx.h>
28
29#include "../wlcore/wlcore.h"
30#include "../wlcore/debug.h"
31#include "../wlcore/io.h"
32#include "../wlcore/acx.h"
33#include "../wlcore/tx.h"
34#include "../wlcore/rx.h"
35#include "../wlcore/io.h"
36#include "../wlcore/boot.h"
37
38#include "wl12xx.h"
39#include "reg.h"
40#include "cmd.h"
41#include "acx.h"
42
43static struct wlcore_conf wl12xx_conf = {
44 .sg = {
45 .params = {
46 [CONF_SG_ACL_BT_MASTER_MIN_BR] = 10,
47 [CONF_SG_ACL_BT_MASTER_MAX_BR] = 180,
48 [CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10,
49 [CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180,
50 [CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10,
51 [CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80,
52 [CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10,
53 [CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80,
54 [CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8,
55 [CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8,
56 [CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20,
57 [CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20,
58 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20,
59 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35,
60 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16,
61 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35,
62 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32,
63 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50,
64 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28,
65 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50,
66 [CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10,
67 [CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20,
68 [CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75,
69 [CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15,
70 [CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27,
71 [CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17,
72 /* active scan params */
73 [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
74 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
75 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
76 /* passive scan params */
77 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR] = 800,
78 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR] = 200,
79 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
80 /* passive scan in dual antenna params */
81 [CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0,
82 [CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN] = 0,
83 [CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN] = 0,
84 /* general params */
85 [CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1,
86 [CONF_SG_ANTENNA_CONFIGURATION] = 0,
87 [CONF_SG_BEACON_MISS_PERCENT] = 60,
88 [CONF_SG_DHCP_TIME] = 5000,
89 [CONF_SG_RXT] = 1200,
90 [CONF_SG_TXT] = 1000,
91 [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
92 [CONF_SG_GENERAL_USAGE_BIT_MAP] = 3,
93 [CONF_SG_HV3_MAX_SERVED] = 6,
94 [CONF_SG_PS_POLL_TIMEOUT] = 10,
95 [CONF_SG_UPSD_TIMEOUT] = 10,
96 [CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2,
97 [CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5,
98 [CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30,
99 /* AP params */
100 [CONF_AP_BEACON_MISS_TX] = 3,
101 [CONF_AP_RX_WINDOW_AFTER_BEACON] = 10,
102 [CONF_AP_BEACON_WINDOW_INTERVAL] = 2,
103 [CONF_AP_CONNECTION_PROTECTION_TIME] = 0,
104 [CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25,
105 [CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25,
106 /* CTS Diluting params */
107 [CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0,
108 [CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0,
109 },
110 .state = CONF_SG_PROTECTIVE,
111 },
112 .rx = {
113 .rx_msdu_life_time = 512000,
114 .packet_detection_threshold = 0,
115 .ps_poll_timeout = 15,
116 .upsd_timeout = 15,
117 .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD,
118 .rx_cca_threshold = 0,
119 .irq_blk_threshold = 0xFFFF,
120 .irq_pkt_threshold = 0,
121 .irq_timeout = 600,
122 .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
123 },
124 .tx = {
125 .tx_energy_detection = 0,
126 .sta_rc_conf = {
127 .enabled_rates = 0,
128 .short_retry_limit = 10,
129 .long_retry_limit = 10,
130 .aflags = 0,
131 },
132 .ac_conf_count = 4,
133 .ac_conf = {
134 [CONF_TX_AC_BE] = {
135 .ac = CONF_TX_AC_BE,
136 .cw_min = 15,
137 .cw_max = 63,
138 .aifsn = 3,
139 .tx_op_limit = 0,
140 },
141 [CONF_TX_AC_BK] = {
142 .ac = CONF_TX_AC_BK,
143 .cw_min = 15,
144 .cw_max = 63,
145 .aifsn = 7,
146 .tx_op_limit = 0,
147 },
148 [CONF_TX_AC_VI] = {
149 .ac = CONF_TX_AC_VI,
150 .cw_min = 15,
151 .cw_max = 63,
152 .aifsn = CONF_TX_AIFS_PIFS,
153 .tx_op_limit = 3008,
154 },
155 [CONF_TX_AC_VO] = {
156 .ac = CONF_TX_AC_VO,
157 .cw_min = 15,
158 .cw_max = 63,
159 .aifsn = CONF_TX_AIFS_PIFS,
160 .tx_op_limit = 1504,
161 },
162 },
163 .max_tx_retries = 100,
164 .ap_aging_period = 300,
165 .tid_conf_count = 4,
166 .tid_conf = {
167 [CONF_TX_AC_BE] = {
168 .queue_id = CONF_TX_AC_BE,
169 .channel_type = CONF_CHANNEL_TYPE_EDCF,
170 .tsid = CONF_TX_AC_BE,
171 .ps_scheme = CONF_PS_SCHEME_LEGACY,
172 .ack_policy = CONF_ACK_POLICY_LEGACY,
173 .apsd_conf = {0, 0},
174 },
175 [CONF_TX_AC_BK] = {
176 .queue_id = CONF_TX_AC_BK,
177 .channel_type = CONF_CHANNEL_TYPE_EDCF,
178 .tsid = CONF_TX_AC_BK,
179 .ps_scheme = CONF_PS_SCHEME_LEGACY,
180 .ack_policy = CONF_ACK_POLICY_LEGACY,
181 .apsd_conf = {0, 0},
182 },
183 [CONF_TX_AC_VI] = {
184 .queue_id = CONF_TX_AC_VI,
185 .channel_type = CONF_CHANNEL_TYPE_EDCF,
186 .tsid = CONF_TX_AC_VI,
187 .ps_scheme = CONF_PS_SCHEME_LEGACY,
188 .ack_policy = CONF_ACK_POLICY_LEGACY,
189 .apsd_conf = {0, 0},
190 },
191 [CONF_TX_AC_VO] = {
192 .queue_id = CONF_TX_AC_VO,
193 .channel_type = CONF_CHANNEL_TYPE_EDCF,
194 .tsid = CONF_TX_AC_VO,
195 .ps_scheme = CONF_PS_SCHEME_LEGACY,
196 .ack_policy = CONF_ACK_POLICY_LEGACY,
197 .apsd_conf = {0, 0},
198 },
199 },
200 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
201 .tx_compl_timeout = 700,
202 .tx_compl_threshold = 4,
203 .basic_rate = CONF_HW_BIT_RATE_1MBPS,
204 .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
205 .tmpl_short_retry_limit = 10,
206 .tmpl_long_retry_limit = 10,
207 .tx_watchdog_timeout = 5000,
208 },
209 .conn = {
210 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
211 .listen_interval = 1,
212 .suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM,
213 .suspend_listen_interval = 3,
214 .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
215 .bcn_filt_ie_count = 2,
216 .bcn_filt_ie = {
217 [0] = {
218 .ie = WLAN_EID_CHANNEL_SWITCH,
219 .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
220 },
221 [1] = {
222 .ie = WLAN_EID_HT_OPERATION,
223 .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
224 },
225 },
226 .synch_fail_thold = 10,
227 .bss_lose_timeout = 100,
228 .beacon_rx_timeout = 10000,
229 .broadcast_timeout = 20000,
230 .rx_broadcast_in_ps = 1,
231 .ps_poll_threshold = 10,
232 .bet_enable = CONF_BET_MODE_ENABLE,
233 .bet_max_consecutive = 50,
234 .psm_entry_retries = 8,
235 .psm_exit_retries = 16,
236 .psm_entry_nullfunc_retries = 3,
237 .dynamic_ps_timeout = 40,
238 .forced_ps = false,
239 .keep_alive_interval = 55000,
240 .max_listen_interval = 20,
241 },
242 .itrim = {
243 .enable = false,
244 .timeout = 50000,
245 },
246 .pm_config = {
247 .host_clk_settling_time = 5000,
248 .host_fast_wakeup_support = false
249 },
250 .roam_trigger = {
251 .trigger_pacing = 1,
252 .avg_weight_rssi_beacon = 20,
253 .avg_weight_rssi_data = 10,
254 .avg_weight_snr_beacon = 20,
255 .avg_weight_snr_data = 10,
256 },
257 .scan = {
258 .min_dwell_time_active = 7500,
259 .max_dwell_time_active = 30000,
260 .min_dwell_time_passive = 100000,
261 .max_dwell_time_passive = 100000,
262 .num_probe_reqs = 2,
263 .split_scan_timeout = 50000,
264 },
265 .sched_scan = {
266 /*
267 * Values are in TU/1000 but since sched scan FW command
268 * params are in TUs rounding up may occur.
269 */
270 .base_dwell_time = 7500,
271 .max_dwell_time_delta = 22500,
272 /* based on 250bits per probe @1Mbps */
273 .dwell_time_delta_per_probe = 2000,
274 /* based on 250bits per probe @6Mbps (plus a bit more) */
275 .dwell_time_delta_per_probe_5 = 350,
276 .dwell_time_passive = 100000,
277 .dwell_time_dfs = 150000,
278 .num_probe_reqs = 2,
279 .rssi_threshold = -90,
280 .snr_threshold = 0,
281 },
282 .ht = {
283 .rx_ba_win_size = 8,
284 .tx_ba_win_size = 64,
285 .inactivity_timeout = 10000,
286 .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
287 },
288 /*
289 * Memory config for wl127x chips is given in the
290 * wl12xx_default_priv_conf struct. The below configuration is
291 * for wl128x chips.
292 */
293 .mem = {
294 .num_stations = 1,
295 .ssid_profiles = 1,
296 .rx_block_num = 40,
297 .tx_min_block_num = 40,
298 .dynamic_memory = 1,
299 .min_req_tx_blocks = 45,
300 .min_req_rx_blocks = 22,
301 .tx_min = 27,
302 },
303 .fm_coex = {
304 .enable = true,
305 .swallow_period = 5,
306 .n_divider_fref_set_1 = 0xff, /* default */
307 .n_divider_fref_set_2 = 12,
308 .m_divider_fref_set_1 = 148,
309 .m_divider_fref_set_2 = 0xffff, /* default */
310 .coex_pll_stabilization_time = 0xffffffff, /* default */
311 .ldo_stabilization_time = 0xffff, /* default */
312 .fm_disturbed_band_margin = 0xff, /* default */
313 .swallow_clk_diff = 0xff, /* default */
314 },
315 .rx_streaming = {
316 .duration = 150,
317 .queues = 0x1,
318 .interval = 20,
319 .always = 0,
320 },
321 .fwlog = {
322 .mode = WL12XX_FWLOG_ON_DEMAND,
323 .mem_blocks = 2,
324 .severity = 0,
325 .timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED,
326 .output = WL12XX_FWLOG_OUTPUT_HOST,
327 .threshold = 0,
328 },
329 .rate = {
330 .rate_retry_score = 32000,
331 .per_add = 8192,
332 .per_th1 = 2048,
333 .per_th2 = 4096,
334 .max_per = 8100,
335 .inverse_curiosity_factor = 5,
336 .tx_fail_low_th = 4,
337 .tx_fail_high_th = 10,
338 .per_alpha_shift = 4,
339 .per_add_shift = 13,
340 .per_beta1_shift = 10,
341 .per_beta2_shift = 8,
342 .rate_check_up = 2,
343 .rate_check_down = 12,
344 .rate_retry_policy = {
345 0x00, 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00,
348 },
349 },
350 .hangover = {
351 .recover_time = 0,
352 .hangover_period = 20,
353 .dynamic_mode = 1,
354 .early_termination_mode = 1,
355 .max_period = 20,
356 .min_period = 1,
357 .increase_delta = 1,
358 .decrease_delta = 2,
359 .quiet_time = 4,
360 .increase_time = 1,
361 .window_size = 16,
362 },
363};
364
365static struct wl12xx_priv_conf wl12xx_default_priv_conf = {
366 .rf = {
367 .tx_per_channel_power_compensation_2 = {
368 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
369 },
370 .tx_per_channel_power_compensation_5 = {
371 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
374 },
375 },
376 .mem_wl127x = {
377 .num_stations = 1,
378 .ssid_profiles = 1,
379 .rx_block_num = 70,
380 .tx_min_block_num = 40,
381 .dynamic_memory = 1,
382 .min_req_tx_blocks = 100,
383 .min_req_rx_blocks = 22,
384 .tx_min = 27,
385 },
386
387};
388
389#define WL12XX_TX_HW_BLOCK_SPARE_DEFAULT 1
390#define WL12XX_TX_HW_BLOCK_GEM_SPARE 2
391#define WL12XX_TX_HW_BLOCK_SIZE 252
392
393static const u8 wl12xx_rate_to_idx_2ghz[] = {
394 /* MCS rates are used only with 11n */
395 7, /* WL12XX_CONF_HW_RXTX_RATE_MCS7_SGI */
396 7, /* WL12XX_CONF_HW_RXTX_RATE_MCS7 */
397 6, /* WL12XX_CONF_HW_RXTX_RATE_MCS6 */
398 5, /* WL12XX_CONF_HW_RXTX_RATE_MCS5 */
399 4, /* WL12XX_CONF_HW_RXTX_RATE_MCS4 */
400 3, /* WL12XX_CONF_HW_RXTX_RATE_MCS3 */
401 2, /* WL12XX_CONF_HW_RXTX_RATE_MCS2 */
402 1, /* WL12XX_CONF_HW_RXTX_RATE_MCS1 */
403 0, /* WL12XX_CONF_HW_RXTX_RATE_MCS0 */
404
405 11, /* WL12XX_CONF_HW_RXTX_RATE_54 */
406 10, /* WL12XX_CONF_HW_RXTX_RATE_48 */
407 9, /* WL12XX_CONF_HW_RXTX_RATE_36 */
408 8, /* WL12XX_CONF_HW_RXTX_RATE_24 */
409
410 /* TI-specific rate */
411 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_22 */
412
413 7, /* WL12XX_CONF_HW_RXTX_RATE_18 */
414 6, /* WL12XX_CONF_HW_RXTX_RATE_12 */
415 3, /* WL12XX_CONF_HW_RXTX_RATE_11 */
416 5, /* WL12XX_CONF_HW_RXTX_RATE_9 */
417 4, /* WL12XX_CONF_HW_RXTX_RATE_6 */
418 2, /* WL12XX_CONF_HW_RXTX_RATE_5_5 */
419 1, /* WL12XX_CONF_HW_RXTX_RATE_2 */
420 0 /* WL12XX_CONF_HW_RXTX_RATE_1 */
421};
422
423static const u8 wl12xx_rate_to_idx_5ghz[] = {
424 /* MCS rates are used only with 11n */
425 7, /* WL12XX_CONF_HW_RXTX_RATE_MCS7_SGI */
426 7, /* WL12XX_CONF_HW_RXTX_RATE_MCS7 */
427 6, /* WL12XX_CONF_HW_RXTX_RATE_MCS6 */
428 5, /* WL12XX_CONF_HW_RXTX_RATE_MCS5 */
429 4, /* WL12XX_CONF_HW_RXTX_RATE_MCS4 */
430 3, /* WL12XX_CONF_HW_RXTX_RATE_MCS3 */
431 2, /* WL12XX_CONF_HW_RXTX_RATE_MCS2 */
432 1, /* WL12XX_CONF_HW_RXTX_RATE_MCS1 */
433 0, /* WL12XX_CONF_HW_RXTX_RATE_MCS0 */
434
435 7, /* WL12XX_CONF_HW_RXTX_RATE_54 */
436 6, /* WL12XX_CONF_HW_RXTX_RATE_48 */
437 5, /* WL12XX_CONF_HW_RXTX_RATE_36 */
438 4, /* WL12XX_CONF_HW_RXTX_RATE_24 */
439
440 /* TI-specific rate */
441 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_22 */
442
443 3, /* WL12XX_CONF_HW_RXTX_RATE_18 */
444 2, /* WL12XX_CONF_HW_RXTX_RATE_12 */
445 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_11 */
446 1, /* WL12XX_CONF_HW_RXTX_RATE_9 */
447 0, /* WL12XX_CONF_HW_RXTX_RATE_6 */
448 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_5_5 */
449 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_2 */
450 CONF_HW_RXTX_RATE_UNSUPPORTED /* WL12XX_CONF_HW_RXTX_RATE_1 */
451};
452
453static const u8 *wl12xx_band_rate_to_idx[] = {
454 [IEEE80211_BAND_2GHZ] = wl12xx_rate_to_idx_2ghz,
455 [IEEE80211_BAND_5GHZ] = wl12xx_rate_to_idx_5ghz
456};
457
458enum wl12xx_hw_rates {
459 WL12XX_CONF_HW_RXTX_RATE_MCS7_SGI = 0,
460 WL12XX_CONF_HW_RXTX_RATE_MCS7,
461 WL12XX_CONF_HW_RXTX_RATE_MCS6,
462 WL12XX_CONF_HW_RXTX_RATE_MCS5,
463 WL12XX_CONF_HW_RXTX_RATE_MCS4,
464 WL12XX_CONF_HW_RXTX_RATE_MCS3,
465 WL12XX_CONF_HW_RXTX_RATE_MCS2,
466 WL12XX_CONF_HW_RXTX_RATE_MCS1,
467 WL12XX_CONF_HW_RXTX_RATE_MCS0,
468 WL12XX_CONF_HW_RXTX_RATE_54,
469 WL12XX_CONF_HW_RXTX_RATE_48,
470 WL12XX_CONF_HW_RXTX_RATE_36,
471 WL12XX_CONF_HW_RXTX_RATE_24,
472 WL12XX_CONF_HW_RXTX_RATE_22,
473 WL12XX_CONF_HW_RXTX_RATE_18,
474 WL12XX_CONF_HW_RXTX_RATE_12,
475 WL12XX_CONF_HW_RXTX_RATE_11,
476 WL12XX_CONF_HW_RXTX_RATE_9,
477 WL12XX_CONF_HW_RXTX_RATE_6,
478 WL12XX_CONF_HW_RXTX_RATE_5_5,
479 WL12XX_CONF_HW_RXTX_RATE_2,
480 WL12XX_CONF_HW_RXTX_RATE_1,
481 WL12XX_CONF_HW_RXTX_RATE_MAX,
482};
483
484static struct wlcore_partition_set wl12xx_ptable[PART_TABLE_LEN] = {
485 [PART_DOWN] = {
486 .mem = {
487 .start = 0x00000000,
488 .size = 0x000177c0
489 },
490 .reg = {
491 .start = REGISTERS_BASE,
492 .size = 0x00008800
493 },
494 .mem2 = {
495 .start = 0x00000000,
496 .size = 0x00000000
497 },
498 .mem3 = {
499 .start = 0x00000000,
500 .size = 0x00000000
501 },
502 },
503
504 [PART_BOOT] = { /* in wl12xx we can use a mix of work and down
505 * partition here */
506 .mem = {
507 .start = 0x00040000,
508 .size = 0x00014fc0
509 },
510 .reg = {
511 .start = REGISTERS_BASE,
512 .size = 0x00008800
513 },
514 .mem2 = {
515 .start = 0x00000000,
516 .size = 0x00000000
517 },
518 .mem3 = {
519 .start = 0x00000000,
520 .size = 0x00000000
521 },
522 },
523
524 [PART_WORK] = {
525 .mem = {
526 .start = 0x00040000,
527 .size = 0x00014fc0
528 },
529 .reg = {
530 .start = REGISTERS_BASE,
531 .size = 0x0000a000
532 },
533 .mem2 = {
534 .start = 0x003004f8,
535 .size = 0x00000004
536 },
537 .mem3 = {
538 .start = 0x00040404,
539 .size = 0x00000000
540 },
541 },
542
543 [PART_DRPW] = {
544 .mem = {
545 .start = 0x00040000,
546 .size = 0x00014fc0
547 },
548 .reg = {
549 .start = DRPW_BASE,
550 .size = 0x00006000
551 },
552 .mem2 = {
553 .start = 0x00000000,
554 .size = 0x00000000
555 },
556 .mem3 = {
557 .start = 0x00000000,
558 .size = 0x00000000
559 }
560 }
561};
562
563static const int wl12xx_rtable[REG_TABLE_LEN] = {
564 [REG_ECPU_CONTROL] = WL12XX_REG_ECPU_CONTROL,
565 [REG_INTERRUPT_NO_CLEAR] = WL12XX_REG_INTERRUPT_NO_CLEAR,
566 [REG_INTERRUPT_ACK] = WL12XX_REG_INTERRUPT_ACK,
567 [REG_COMMAND_MAILBOX_PTR] = WL12XX_REG_COMMAND_MAILBOX_PTR,
568 [REG_EVENT_MAILBOX_PTR] = WL12XX_REG_EVENT_MAILBOX_PTR,
569 [REG_INTERRUPT_TRIG] = WL12XX_REG_INTERRUPT_TRIG,
570 [REG_INTERRUPT_MASK] = WL12XX_REG_INTERRUPT_MASK,
571 [REG_PC_ON_RECOVERY] = WL12XX_SCR_PAD4,
572 [REG_CHIP_ID_B] = WL12XX_CHIP_ID_B,
573 [REG_CMD_MBOX_ADDRESS] = WL12XX_CMD_MBOX_ADDRESS,
574
575 /* data access memory addresses, used with partition translation */
576 [REG_SLV_MEM_DATA] = WL1271_SLV_MEM_DATA,
577 [REG_SLV_REG_DATA] = WL1271_SLV_REG_DATA,
578
579 /* raw data access memory addresses */
580 [REG_RAW_FW_STATUS_ADDR] = FW_STATUS_ADDR,
581};
582
583/* TODO: maybe move to a new header file? */
584#define WL127X_FW_NAME_MULTI "ti-connectivity/wl127x-fw-4-mr.bin"
585#define WL127X_FW_NAME_SINGLE "ti-connectivity/wl127x-fw-4-sr.bin"
586#define WL127X_PLT_FW_NAME "ti-connectivity/wl127x-fw-4-plt.bin"
587
588#define WL128X_FW_NAME_MULTI "ti-connectivity/wl128x-fw-4-mr.bin"
589#define WL128X_FW_NAME_SINGLE "ti-connectivity/wl128x-fw-4-sr.bin"
590#define WL128X_PLT_FW_NAME "ti-connectivity/wl128x-fw-4-plt.bin"
591
592static void wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
593{
594 if (wl->chip.id != CHIP_ID_1283_PG20) {
595 struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
596 struct wl1271_rx_mem_pool_addr rx_mem_addr;
597
598 /*
599 * Choose the block we want to read
600 * For aggregated packets, only the first memory block
601 * should be retrieved. The FW takes care of the rest.
602 */
603 u32 mem_block = rx_desc & RX_MEM_BLOCK_MASK;
604
605 rx_mem_addr.addr = (mem_block << 8) +
606 le32_to_cpu(wl_mem_map->packet_memory_pool_start);
607
608 rx_mem_addr.addr_extra = rx_mem_addr.addr + 4;
609
610 wl1271_write(wl, WL1271_SLV_REG_DATA,
611 &rx_mem_addr, sizeof(rx_mem_addr), false);
612 }
613}
614
615static int wl12xx_identify_chip(struct wl1271 *wl)
616{
617 int ret = 0;
618
619 switch (wl->chip.id) {
620 case CHIP_ID_1271_PG10:
621 wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
622 wl->chip.id);
623
624 /* clear the alignment quirk, since we don't support it */
625 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
626
627 wl->quirks |= WLCORE_QUIRK_LEGACY_NVS;
628 wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
629 wl->mr_fw_name = WL127X_FW_NAME_MULTI;
630 memcpy(&wl->conf.mem, &wl12xx_default_priv_conf.mem_wl127x,
631 sizeof(wl->conf.mem));
632
633 /* read data preparation is only needed by wl127x */
634 wl->ops->prepare_read = wl127x_prepare_read;
635
636 break;
637
638 case CHIP_ID_1271_PG20:
639 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
640 wl->chip.id);
641
642 /* clear the alignment quirk, since we don't support it */
643 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
644
645 wl->quirks |= WLCORE_QUIRK_LEGACY_NVS;
646 wl->plt_fw_name = WL127X_PLT_FW_NAME;
647 wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
648 wl->mr_fw_name = WL127X_FW_NAME_MULTI;
649 memcpy(&wl->conf.mem, &wl12xx_default_priv_conf.mem_wl127x,
650 sizeof(wl->conf.mem));
651
652 /* read data preparation is only needed by wl127x */
653 wl->ops->prepare_read = wl127x_prepare_read;
654
655 break;
656
657 case CHIP_ID_1283_PG20:
658 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
659 wl->chip.id);
660 wl->plt_fw_name = WL128X_PLT_FW_NAME;
661 wl->sr_fw_name = WL128X_FW_NAME_SINGLE;
662 wl->mr_fw_name = WL128X_FW_NAME_MULTI;
663 break;
664 case CHIP_ID_1283_PG10:
665 default:
666 wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
667 ret = -ENODEV;
668 goto out;
669 }
670
671out:
672 return ret;
673}
674
675static void wl12xx_top_reg_write(struct wl1271 *wl, int addr, u16 val)
676{
677 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
678 addr = (addr >> 1) + 0x30000;
679 wl1271_write32(wl, WL12XX_OCP_POR_CTR, addr);
680
681 /* write value to OCP_POR_WDATA */
682 wl1271_write32(wl, WL12XX_OCP_DATA_WRITE, val);
683
684 /* write 1 to OCP_CMD */
685 wl1271_write32(wl, WL12XX_OCP_CMD, OCP_CMD_WRITE);
686}
687
688static u16 wl12xx_top_reg_read(struct wl1271 *wl, int addr)
689{
690 u32 val;
691 int timeout = OCP_CMD_LOOP;
692
693 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
694 addr = (addr >> 1) + 0x30000;
695 wl1271_write32(wl, WL12XX_OCP_POR_CTR, addr);
696
697 /* write 2 to OCP_CMD */
698 wl1271_write32(wl, WL12XX_OCP_CMD, OCP_CMD_READ);
699
700 /* poll for data ready */
701 do {
702 val = wl1271_read32(wl, WL12XX_OCP_DATA_READ);
703 } while (!(val & OCP_READY_MASK) && --timeout);
704
705 if (!timeout) {
706 wl1271_warning("Top register access timed out.");
707 return 0xffff;
708 }
709
710 /* check data status and return if OK */
711 if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK)
712 return val & 0xffff;
713 else {
714 wl1271_warning("Top register access returned error.");
715 return 0xffff;
716 }
717}
718
719static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
720{
721 u16 spare_reg;
722
723 /* Mask bits [2] & [8:4] in the sys_clk_cfg register */
724 spare_reg = wl12xx_top_reg_read(wl, WL_SPARE_REG);
725 if (spare_reg == 0xFFFF)
726 return -EFAULT;
727 spare_reg |= (BIT(3) | BIT(5) | BIT(6));
728 wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg);
729
730 /* Enable FREF_CLK_REQ & mux MCS and coex PLLs to FREF */
731 wl12xx_top_reg_write(wl, SYS_CLK_CFG_REG,
732 WL_CLK_REQ_TYPE_PG2 | MCS_PLL_CLK_SEL_FREF);
733
734 /* Delay execution for 15msec, to let the HW settle */
735 mdelay(15);
736
737 return 0;
738}
739
740static bool wl128x_is_tcxo_valid(struct wl1271 *wl)
741{
742 u16 tcxo_detection;
743
744 tcxo_detection = wl12xx_top_reg_read(wl, TCXO_CLK_DETECT_REG);
745 if (tcxo_detection & TCXO_DET_FAILED)
746 return false;
747
748 return true;
749}
750
751static bool wl128x_is_fref_valid(struct wl1271 *wl)
752{
753 u16 fref_detection;
754
755 fref_detection = wl12xx_top_reg_read(wl, FREF_CLK_DETECT_REG);
756 if (fref_detection & FREF_CLK_DETECT_FAIL)
757 return false;
758
759 return true;
760}
761
762static int wl128x_manually_configure_mcs_pll(struct wl1271 *wl)
763{
764 wl12xx_top_reg_write(wl, MCS_PLL_M_REG, MCS_PLL_M_REG_VAL);
765 wl12xx_top_reg_write(wl, MCS_PLL_N_REG, MCS_PLL_N_REG_VAL);
766 wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, MCS_PLL_CONFIG_REG_VAL);
767
768 return 0;
769}
770
771static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
772{
773 u16 spare_reg;
774 u16 pll_config;
775 u8 input_freq;
776
777 /* Mask bits [3:1] in the sys_clk_cfg register */
778 spare_reg = wl12xx_top_reg_read(wl, WL_SPARE_REG);
779 if (spare_reg == 0xFFFF)
780 return -EFAULT;
781 spare_reg |= BIT(2);
782 wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg);
783
784 /* Handle special cases of the TCXO clock */
785 if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_8 ||
786 wl->tcxo_clock == WL12XX_TCXOCLOCK_33_6)
787 return wl128x_manually_configure_mcs_pll(wl);
788
789 /* Set the input frequency according to the selected clock source */
790 input_freq = (clk & 1) + 1;
791
792 pll_config = wl12xx_top_reg_read(wl, MCS_PLL_CONFIG_REG);
793 if (pll_config == 0xFFFF)
794 return -EFAULT;
795 pll_config |= (input_freq << MCS_SEL_IN_FREQ_SHIFT);
796 pll_config |= MCS_PLL_ENABLE_HP;
797 wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, pll_config);
798
799 return 0;
800}
801
802/*
803 * WL128x has two clocks input - TCXO and FREF.
804 * TCXO is the main clock of the device, while FREF is used to sync
805 * between the GPS and the cellular modem.
806 * In cases where TCXO is 32.736MHz or 16.368MHz, the FREF will be used
807 * as the WLAN/BT main clock.
808 */
809static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock)
810{
811 u16 sys_clk_cfg;
812
813 /* For XTAL-only modes, FREF will be used after switching from TCXO */
814 if (wl->ref_clock == WL12XX_REFCLOCK_26_XTAL ||
815 wl->ref_clock == WL12XX_REFCLOCK_38_XTAL) {
816 if (!wl128x_switch_tcxo_to_fref(wl))
817 return -EINVAL;
818 goto fref_clk;
819 }
820
821 /* Query the HW, to determine which clock source we should use */
822 sys_clk_cfg = wl12xx_top_reg_read(wl, SYS_CLK_CFG_REG);
823 if (sys_clk_cfg == 0xFFFF)
824 return -EINVAL;
825 if (sys_clk_cfg & PRCM_CM_EN_MUX_WLAN_FREF)
826 goto fref_clk;
827
828 /* If TCXO is either 32.736MHz or 16.368MHz, switch to FREF */
829 if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_368 ||
830 wl->tcxo_clock == WL12XX_TCXOCLOCK_32_736) {
831 if (!wl128x_switch_tcxo_to_fref(wl))
832 return -EINVAL;
833 goto fref_clk;
834 }
835
836 /* TCXO clock is selected */
837 if (!wl128x_is_tcxo_valid(wl))
838 return -EINVAL;
839 *selected_clock = wl->tcxo_clock;
840 goto config_mcs_pll;
841
842fref_clk:
843 /* FREF clock is selected */
844 if (!wl128x_is_fref_valid(wl))
845 return -EINVAL;
846 *selected_clock = wl->ref_clock;
847
848config_mcs_pll:
849 return wl128x_configure_mcs_pll(wl, *selected_clock);
850}
851
852static int wl127x_boot_clk(struct wl1271 *wl)
853{
854 u32 pause;
855 u32 clk;
856
857 if (WL127X_PG_GET_MAJOR(wl->hw_pg_ver) < 3)
858 wl->quirks |= WLCORE_QUIRK_END_OF_TRANSACTION;
859
860 if (wl->ref_clock == CONF_REF_CLK_19_2_E ||
861 wl->ref_clock == CONF_REF_CLK_38_4_E ||
862 wl->ref_clock == CONF_REF_CLK_38_4_M_XTAL)
863 /* ref clk: 19.2/38.4/38.4-XTAL */
864 clk = 0x3;
865 else if (wl->ref_clock == CONF_REF_CLK_26_E ||
866 wl->ref_clock == CONF_REF_CLK_52_E)
867 /* ref clk: 26/52 */
868 clk = 0x5;
869 else
870 return -EINVAL;
871
872 if (wl->ref_clock != CONF_REF_CLK_19_2_E) {
873 u16 val;
874 /* Set clock type (open drain) */
875 val = wl12xx_top_reg_read(wl, OCP_REG_CLK_TYPE);
876 val &= FREF_CLK_TYPE_BITS;
877 wl12xx_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
878
879 /* Set clock pull mode (no pull) */
880 val = wl12xx_top_reg_read(wl, OCP_REG_CLK_PULL);
881 val |= NO_PULL;
882 wl12xx_top_reg_write(wl, OCP_REG_CLK_PULL, val);
883 } else {
884 u16 val;
885 /* Set clock polarity */
886 val = wl12xx_top_reg_read(wl, OCP_REG_CLK_POLARITY);
887 val &= FREF_CLK_POLARITY_BITS;
888 val |= CLK_REQ_OUTN_SEL;
889 wl12xx_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
890 }
891
892 wl1271_write32(wl, WL12XX_PLL_PARAMETERS, clk);
893
894 pause = wl1271_read32(wl, WL12XX_PLL_PARAMETERS);
895
896 wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause);
897
898 pause &= ~(WU_COUNTER_PAUSE_VAL);
899 pause |= WU_COUNTER_PAUSE_VAL;
900 wl1271_write32(wl, WL12XX_WU_COUNTER_PAUSE, pause);
901
902 return 0;
903}
904
905static int wl1271_boot_soft_reset(struct wl1271 *wl)
906{
907 unsigned long timeout;
908 u32 boot_data;
909
910 /* perform soft reset */
911 wl1271_write32(wl, WL12XX_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);
912
913 /* SOFT_RESET is self clearing */
914 timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
915 while (1) {
916 boot_data = wl1271_read32(wl, WL12XX_SLV_SOFT_RESET);
917 wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
918 if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
919 break;
920
921 if (time_after(jiffies, timeout)) {
922 /* 1.2 check pWhalBus->uSelfClearTime if the
923 * timeout was reached */
924 wl1271_error("soft reset timeout");
925 return -1;
926 }
927
928 udelay(SOFT_RESET_STALL_TIME);
929 }
930
931 /* disable Rx/Tx */
932 wl1271_write32(wl, WL12XX_ENABLE, 0x0);
933
934 /* disable auto calibration on start*/
935 wl1271_write32(wl, WL12XX_SPARE_A2, 0xffff);
936
937 return 0;
938}
939
940static int wl12xx_pre_boot(struct wl1271 *wl)
941{
942 int ret = 0;
943 u32 clk;
944 int selected_clock = -1;
945
946 if (wl->chip.id == CHIP_ID_1283_PG20) {
947 ret = wl128x_boot_clk(wl, &selected_clock);
948 if (ret < 0)
949 goto out;
950 } else {
951 ret = wl127x_boot_clk(wl);
952 if (ret < 0)
953 goto out;
954 }
955
956 /* Continue the ELP wake up sequence */
957 wl1271_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
958 udelay(500);
959
960 wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
961
962 /* Read-modify-write DRPW_SCRATCH_START register (see next state)
963 to be used by DRPw FW. The RTRIM value will be added by the FW
964 before taking DRPw out of reset */
965
966 clk = wl1271_read32(wl, WL12XX_DRPW_SCRATCH_START);
967
968 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
969
970 if (wl->chip.id == CHIP_ID_1283_PG20)
971 clk |= ((selected_clock & 0x3) << 1) << 4;
972 else
973 clk |= (wl->ref_clock << 1) << 4;
974
975 wl1271_write32(wl, WL12XX_DRPW_SCRATCH_START, clk);
976
977 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
978
979 /* Disable interrupts */
980 wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
981
982 ret = wl1271_boot_soft_reset(wl);
983 if (ret < 0)
984 goto out;
985
986out:
987 return ret;
988}
989
990static void wl12xx_pre_upload(struct wl1271 *wl)
991{
992 u32 tmp;
993
994 /* write firmware's last address (ie. it's length) to
995 * ACX_EEPROMLESS_IND_REG */
996 wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG");
997
998 wl1271_write32(wl, WL12XX_EEPROMLESS_IND, WL12XX_EEPROMLESS_IND);
999
1000 tmp = wlcore_read_reg(wl, REG_CHIP_ID_B);
1001
1002 wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
1003
1004 /* 6. read the EEPROM parameters */
1005 tmp = wl1271_read32(wl, WL12XX_SCR_PAD2);
1006
1007 /* WL1271: The reference driver skips steps 7 to 10 (jumps directly
1008 * to upload_fw) */
1009
1010 if (wl->chip.id == CHIP_ID_1283_PG20)
1011 wl12xx_top_reg_write(wl, SDIO_IO_DS, HCI_IO_DS_6MA);
1012}
1013
1014static void wl12xx_enable_interrupts(struct wl1271 *wl)
1015{
1016 u32 polarity;
1017
1018 polarity = wl12xx_top_reg_read(wl, OCP_REG_POLARITY);
1019
1020 /* We use HIGH polarity, so unset the LOW bit */
1021 polarity &= ~POLARITY_LOW;
1022 wl12xx_top_reg_write(wl, OCP_REG_POLARITY, polarity);
1023
1024 wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_ALL_EVENTS_VECTOR);
1025
1026 wlcore_enable_interrupts(wl);
1027 wlcore_write_reg(wl, REG_INTERRUPT_MASK,
1028 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
1029
1030 wl1271_write32(wl, WL12XX_HI_CFG, HI_CFG_DEF_VAL);
1031}
1032
1033static int wl12xx_boot(struct wl1271 *wl)
1034{
1035 int ret;
1036
1037 ret = wl12xx_pre_boot(wl);
1038 if (ret < 0)
1039 goto out;
1040
1041 ret = wlcore_boot_upload_nvs(wl);
1042 if (ret < 0)
1043 goto out;
1044
1045 wl12xx_pre_upload(wl);
1046
1047 ret = wlcore_boot_upload_firmware(wl);
1048 if (ret < 0)
1049 goto out;
1050
1051 ret = wlcore_boot_run_firmware(wl);
1052 if (ret < 0)
1053 goto out;
1054
1055 wl12xx_enable_interrupts(wl);
1056
1057out:
1058 return ret;
1059}
1060
1061static void wl12xx_trigger_cmd(struct wl1271 *wl, int cmd_box_addr,
1062 void *buf, size_t len)
1063{
1064 wl1271_write(wl, cmd_box_addr, buf, len, false);
1065 wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_CMD);
1066}
1067
1068static void wl12xx_ack_event(struct wl1271 *wl)
1069{
1070 wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_EVENT_ACK);
1071}
1072
1073static u32 wl12xx_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks)
1074{
1075 u32 blk_size = WL12XX_TX_HW_BLOCK_SIZE;
1076 u32 align_len = wlcore_calc_packet_alignment(wl, len);
1077
1078 return (align_len + blk_size - 1) / blk_size + spare_blks;
1079}
1080
1081static void
1082wl12xx_set_tx_desc_blocks(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
1083 u32 blks, u32 spare_blks)
1084{
1085 if (wl->chip.id == CHIP_ID_1283_PG20) {
1086 desc->wl128x_mem.total_mem_blocks = blks;
1087 } else {
1088 desc->wl127x_mem.extra_blocks = spare_blks;
1089 desc->wl127x_mem.total_mem_blocks = blks;
1090 }
1091}
1092
1093static void
1094wl12xx_set_tx_desc_data_len(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
1095 struct sk_buff *skb)
1096{
1097 u32 aligned_len = wlcore_calc_packet_alignment(wl, skb->len);
1098
1099 if (wl->chip.id == CHIP_ID_1283_PG20) {
1100 desc->wl128x_mem.extra_bytes = aligned_len - skb->len;
1101 desc->length = cpu_to_le16(aligned_len >> 2);
1102
1103 wl1271_debug(DEBUG_TX,
1104 "tx_fill_hdr: hlid: %d len: %d life: %d mem: %d extra: %d",
1105 desc->hlid,
1106 le16_to_cpu(desc->length),
1107 le16_to_cpu(desc->life_time),
1108 desc->wl128x_mem.total_mem_blocks,
1109 desc->wl128x_mem.extra_bytes);
1110 } else {
1111 /* calculate number of padding bytes */
1112 int pad = aligned_len - skb->len;
1113 desc->tx_attr |=
1114 cpu_to_le16(pad << TX_HW_ATTR_OFST_LAST_WORD_PAD);
1115
1116 /* Store the aligned length in terms of words */
1117 desc->length = cpu_to_le16(aligned_len >> 2);
1118
1119 wl1271_debug(DEBUG_TX,
1120 "tx_fill_hdr: pad: %d hlid: %d len: %d life: %d mem: %d",
1121 pad, desc->hlid,
1122 le16_to_cpu(desc->length),
1123 le16_to_cpu(desc->life_time),
1124 desc->wl127x_mem.total_mem_blocks);
1125 }
1126}
1127
1128static enum wl_rx_buf_align
1129wl12xx_get_rx_buf_align(struct wl1271 *wl, u32 rx_desc)
1130{
1131 if (rx_desc & RX_BUF_UNALIGNED_PAYLOAD)
1132 return WLCORE_RX_BUF_UNALIGNED;
1133
1134 return WLCORE_RX_BUF_ALIGNED;
1135}
1136
1137static u32 wl12xx_get_rx_packet_len(struct wl1271 *wl, void *rx_data,
1138 u32 data_len)
1139{
1140 struct wl1271_rx_descriptor *desc = rx_data;
1141
1142 /* invalid packet */
1143 if (data_len < sizeof(*desc) ||
1144 data_len < sizeof(*desc) + desc->pad_len)
1145 return 0;
1146
1147 return data_len - sizeof(*desc) - desc->pad_len;
1148}
1149
1150static void wl12xx_tx_delayed_compl(struct wl1271 *wl)
1151{
1152 if (wl->fw_status->tx_results_counter == (wl->tx_results_count & 0xff))
1153 return;
1154
1155 wl1271_tx_complete(wl);
1156}
1157
1158static int wl12xx_hw_init(struct wl1271 *wl)
1159{
1160 int ret;
1161
1162 if (wl->chip.id == CHIP_ID_1283_PG20) {
1163 u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE;
1164
1165 ret = wl128x_cmd_general_parms(wl);
1166 if (ret < 0)
1167 goto out;
1168 ret = wl128x_cmd_radio_parms(wl);
1169 if (ret < 0)
1170 goto out;
1171
1172 if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN)
1173 /* Enable SDIO padding */
1174 host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
1175
1176 /* Must be before wl1271_acx_init_mem_config() */
1177 ret = wl1271_acx_host_if_cfg_bitmap(wl, host_cfg_bitmap);
1178 if (ret < 0)
1179 goto out;
1180 } else {
1181 ret = wl1271_cmd_general_parms(wl);
1182 if (ret < 0)
1183 goto out;
1184 ret = wl1271_cmd_radio_parms(wl);
1185 if (ret < 0)
1186 goto out;
1187 ret = wl1271_cmd_ext_radio_parms(wl);
1188 if (ret < 0)
1189 goto out;
1190 }
1191out:
1192 return ret;
1193}
1194
1195static u32 wl12xx_sta_get_ap_rate_mask(struct wl1271 *wl,
1196 struct wl12xx_vif *wlvif)
1197{
1198 return wlvif->rate_set;
1199}
1200
1201static int wl12xx_identify_fw(struct wl1271 *wl)
1202{
1203 unsigned int *fw_ver = wl->chip.fw_ver;
1204
1205 /* Only new station firmwares support routing fw logs to the host */
1206 if ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
1207 (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_FWLOG_STA_MIN))
1208 wl->quirks |= WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED;
1209
1210 /* This feature is not yet supported for AP mode */
1211 if (fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP)
1212 wl->quirks |= WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED;
1213
1214 return 0;
1215}
1216
1217static void wl12xx_conf_init(struct wl1271 *wl)
1218{
1219 struct wl12xx_priv *priv = wl->priv;
1220
1221 /* apply driver default configuration */
1222 memcpy(&wl->conf, &wl12xx_conf, sizeof(wl12xx_conf));
1223
1224 /* apply default private configuration */
1225 memcpy(&priv->conf, &wl12xx_default_priv_conf, sizeof(priv->conf));
1226}
1227
1228static bool wl12xx_mac_in_fuse(struct wl1271 *wl)
1229{
1230 bool supported = false;
1231 u8 major, minor;
1232
1233 if (wl->chip.id == CHIP_ID_1283_PG20) {
1234 major = WL128X_PG_GET_MAJOR(wl->hw_pg_ver);
1235 minor = WL128X_PG_GET_MINOR(wl->hw_pg_ver);
1236
1237 /* in wl128x we have the MAC address if the PG is >= (2, 1) */
1238 if (major > 2 || (major == 2 && minor >= 1))
1239 supported = true;
1240 } else {
1241 major = WL127X_PG_GET_MAJOR(wl->hw_pg_ver);
1242 minor = WL127X_PG_GET_MINOR(wl->hw_pg_ver);
1243
1244 /* in wl127x we have the MAC address if the PG is >= (3, 1) */
1245 if (major == 3 && minor >= 1)
1246 supported = true;
1247 }
1248
1249 wl1271_debug(DEBUG_PROBE,
1250 "PG Ver major = %d minor = %d, MAC %s present",
1251 major, minor, supported ? "is" : "is not");
1252
1253 return supported;
1254}
1255
1256static void wl12xx_get_fuse_mac(struct wl1271 *wl)
1257{
1258 u32 mac1, mac2;
1259
1260 wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
1261
1262 mac1 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_1);
1263 mac2 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_2);
1264
1265 /* these are the two parts of the BD_ADDR */
1266 wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) +
1267 ((mac1 & 0xff000000) >> 24);
1268 wl->fuse_nic_addr = mac1 & 0xffffff;
1269
1270 wlcore_set_partition(wl, &wl->ptable[PART_DOWN]);
1271}
1272
1273static s8 wl12xx_get_pg_ver(struct wl1271 *wl)
1274{
1275 u32 die_info;
1276
1277 if (wl->chip.id == CHIP_ID_1283_PG20)
1278 die_info = wl12xx_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1);
1279 else
1280 die_info = wl12xx_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1);
1281
1282 return (s8) (die_info & PG_VER_MASK) >> PG_VER_OFFSET;
1283}
1284
1285static void wl12xx_get_mac(struct wl1271 *wl)
1286{
1287 if (wl12xx_mac_in_fuse(wl))
1288 wl12xx_get_fuse_mac(wl);
1289}
1290
1291static struct wlcore_ops wl12xx_ops = {
1292 .identify_chip = wl12xx_identify_chip,
1293 .identify_fw = wl12xx_identify_fw,
1294 .boot = wl12xx_boot,
1295 .trigger_cmd = wl12xx_trigger_cmd,
1296 .ack_event = wl12xx_ack_event,
1297 .calc_tx_blocks = wl12xx_calc_tx_blocks,
1298 .set_tx_desc_blocks = wl12xx_set_tx_desc_blocks,
1299 .set_tx_desc_data_len = wl12xx_set_tx_desc_data_len,
1300 .get_rx_buf_align = wl12xx_get_rx_buf_align,
1301 .get_rx_packet_len = wl12xx_get_rx_packet_len,
1302 .tx_immediate_compl = NULL,
1303 .tx_delayed_compl = wl12xx_tx_delayed_compl,
1304 .hw_init = wl12xx_hw_init,
1305 .init_vif = NULL,
1306 .sta_get_ap_rate_mask = wl12xx_sta_get_ap_rate_mask,
1307 .get_pg_ver = wl12xx_get_pg_ver,
1308 .get_mac = wl12xx_get_mac,
1309};
1310
1311static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
1312 .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 |
1313 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT),
1314 .ht_supported = true,
1315 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K,
1316 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8,
1317 .mcs = {
1318 .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
1319 .rx_highest = cpu_to_le16(72),
1320 .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
1321 },
1322};
1323
1324static int __devinit wl12xx_probe(struct platform_device *pdev)
1325{
1326 struct wl1271 *wl;
1327 struct ieee80211_hw *hw;
1328 struct wl12xx_priv *priv;
1329
1330 hw = wlcore_alloc_hw(sizeof(*priv));
1331 if (IS_ERR(hw)) {
1332 wl1271_error("can't allocate hw");
1333 return PTR_ERR(hw);
1334 }
1335
1336 wl = hw->priv;
1337 wl->ops = &wl12xx_ops;
1338 wl->ptable = wl12xx_ptable;
1339 wl->rtable = wl12xx_rtable;
1340 wl->num_tx_desc = 16;
1341 wl->normal_tx_spare = WL12XX_TX_HW_BLOCK_SPARE_DEFAULT;
1342 wl->gem_tx_spare = WL12XX_TX_HW_BLOCK_GEM_SPARE;
1343 wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
1344 wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
1345 wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0;
1346 wl->fw_status_priv_len = 0;
1347 memcpy(&wl->ht_cap, &wl12xx_ht_cap, sizeof(wl12xx_ht_cap));
1348 wl12xx_conf_init(wl);
1349
1350 return wlcore_probe(wl, pdev);
1351}
1352
1353static const struct platform_device_id wl12xx_id_table[] __devinitconst = {
1354 { "wl12xx", 0 },
1355 { } /* Terminating Entry */
1356};
1357MODULE_DEVICE_TABLE(platform, wl12xx_id_table);
1358
1359static struct platform_driver wl12xx_driver = {
1360 .probe = wl12xx_probe,
1361 .remove = __devexit_p(wlcore_remove),
1362 .id_table = wl12xx_id_table,
1363 .driver = {
1364 .name = "wl12xx_driver",
1365 .owner = THIS_MODULE,
1366 }
1367};
1368
1369static int __init wl12xx_init(void)
1370{
1371 return platform_driver_register(&wl12xx_driver);
1372}
1373module_init(wl12xx_init);
1374
1375static void __exit wl12xx_exit(void)
1376{
1377 platform_driver_unregister(&wl12xx_driver);
1378}
1379module_exit(wl12xx_exit);
1380
1381MODULE_LICENSE("GPL v2");
1382MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
1383MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
1384MODULE_FIRMWARE(WL127X_FW_NAME_MULTI);
1385MODULE_FIRMWARE(WL127X_PLT_FW_NAME);
1386MODULE_FIRMWARE(WL128X_FW_NAME_SINGLE);
1387MODULE_FIRMWARE(WL128X_FW_NAME_MULTI);
1388MODULE_FIRMWARE(WL128X_PLT_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/reg.h b/drivers/net/wireless/ti/wl12xx/reg.h
index 340db324bc26..79ede02e2587 100644
--- a/drivers/net/wireless/wl12xx/reg.h
+++ b/drivers/net/wireless/ti/wl12xx/reg.h
@@ -33,16 +33,8 @@
33#define REGISTERS_DOWN_SIZE 0x00008800 33#define REGISTERS_DOWN_SIZE 0x00008800
34#define REGISTERS_WORK_SIZE 0x0000b000 34#define REGISTERS_WORK_SIZE 0x0000b000
35 35
36#define HW_ACCESS_ELP_CTRL_REG_ADDR 0x1FFFC
37#define FW_STATUS_ADDR (0x14FC0 + 0xA000) 36#define FW_STATUS_ADDR (0x14FC0 + 0xA000)
38 37
39/* ELP register commands */
40#define ELPCTRL_WAKE_UP 0x1
41#define ELPCTRL_WAKE_UP_WLAN_READY 0x5
42#define ELPCTRL_SLEEP 0x0
43/* ELP WLAN_READY bit */
44#define ELPCTRL_WLAN_READY 0x2
45
46/*=============================================== 38/*===============================================
47 Host Software Reset - 32bit RW 39 Host Software Reset - 32bit RW
48 ------------------------------------------ 40 ------------------------------------------
@@ -57,14 +49,14 @@
57 (not self-clearing), the Wlan hardware 49 (not self-clearing), the Wlan hardware
58 exits the software reset state. 50 exits the software reset state.
59===============================================*/ 51===============================================*/
60#define ACX_REG_SLV_SOFT_RESET (REGISTERS_BASE + 0x0000) 52#define WL12XX_SLV_SOFT_RESET (REGISTERS_BASE + 0x0000)
61 53
62#define WL1271_SLV_REG_DATA (REGISTERS_BASE + 0x0008) 54#define WL1271_SLV_REG_DATA (REGISTERS_BASE + 0x0008)
63#define WL1271_SLV_REG_ADATA (REGISTERS_BASE + 0x000c) 55#define WL1271_SLV_REG_ADATA (REGISTERS_BASE + 0x000c)
64#define WL1271_SLV_MEM_DATA (REGISTERS_BASE + 0x0018) 56#define WL1271_SLV_MEM_DATA (REGISTERS_BASE + 0x0018)
65 57
66#define ACX_REG_INTERRUPT_TRIG (REGISTERS_BASE + 0x0474) 58#define WL12XX_REG_INTERRUPT_TRIG (REGISTERS_BASE + 0x0474)
67#define ACX_REG_INTERRUPT_TRIG_H (REGISTERS_BASE + 0x0478) 59#define WL12XX_REG_INTERRUPT_TRIG_H (REGISTERS_BASE + 0x0478)
68 60
69/*============================================= 61/*=============================================
70 Host Interrupt Mask Register - 32bit (RW) 62 Host Interrupt Mask Register - 32bit (RW)
@@ -94,7 +86,7 @@
94 21- - 86 21- -
95 Default: 0x0001 87 Default: 0x0001
96*==============================================*/ 88*==============================================*/
97#define ACX_REG_INTERRUPT_MASK (REGISTERS_BASE + 0x04DC) 89#define WL12XX_REG_INTERRUPT_MASK (REGISTERS_BASE + 0x04DC)
98 90
99/*============================================= 91/*=============================================
100 Host Interrupt Mask Set 16bit, (Write only) 92 Host Interrupt Mask Set 16bit, (Write only)
@@ -125,7 +117,7 @@
125 Reading this register doesn't 117 Reading this register doesn't
126 effect its content. 118 effect its content.
127=============================================*/ 119=============================================*/
128#define ACX_REG_INTERRUPT_NO_CLEAR (REGISTERS_BASE + 0x04E8) 120#define WL12XX_REG_INTERRUPT_NO_CLEAR (REGISTERS_BASE + 0x04E8)
129 121
130/*============================================= 122/*=============================================
131 Host Interrupt Status Clear on Read Register 123 Host Interrupt Status Clear on Read Register
@@ -148,9 +140,9 @@
148 HINT_STS_ND registers, thus making the 140 HINT_STS_ND registers, thus making the
149 assotiated interrupt inactive. (0-no effect) 141 assotiated interrupt inactive. (0-no effect)
150==============================================*/ 142==============================================*/
151#define ACX_REG_INTERRUPT_ACK (REGISTERS_BASE + 0x04F0) 143#define WL12XX_REG_INTERRUPT_ACK (REGISTERS_BASE + 0x04F0)
152 144
153#define RX_DRIVER_COUNTER_ADDRESS (REGISTERS_BASE + 0x0538) 145#define WL12XX_REG_RX_DRIVER_COUNTER (REGISTERS_BASE + 0x0538)
154 146
155/* Device Configuration registers*/ 147/* Device Configuration registers*/
156#define SOR_CFG (REGISTERS_BASE + 0x0800) 148#define SOR_CFG (REGISTERS_BASE + 0x0800)
@@ -175,9 +167,9 @@
175 1 halt eCPU 167 1 halt eCPU
176 0 enable eCPU 168 0 enable eCPU
177 ===============================================*/ 169 ===============================================*/
178#define ACX_REG_ECPU_CONTROL (REGISTERS_BASE + 0x0804) 170#define WL12XX_REG_ECPU_CONTROL (REGISTERS_BASE + 0x0804)
179 171
180#define HI_CFG (REGISTERS_BASE + 0x0808) 172#define WL12XX_HI_CFG (REGISTERS_BASE + 0x0808)
181 173
182/*=============================================== 174/*===============================================
183 EEPROM Burst Read Start - 32bit RW 175 EEPROM Burst Read Start - 32bit RW
@@ -196,72 +188,67 @@
196*================================================*/ 188*================================================*/
197#define ACX_REG_EE_START (REGISTERS_BASE + 0x080C) 189#define ACX_REG_EE_START (REGISTERS_BASE + 0x080C)
198 190
199#define OCP_POR_CTR (REGISTERS_BASE + 0x09B4) 191#define WL12XX_OCP_POR_CTR (REGISTERS_BASE + 0x09B4)
200#define OCP_DATA_WRITE (REGISTERS_BASE + 0x09B8) 192#define WL12XX_OCP_DATA_WRITE (REGISTERS_BASE + 0x09B8)
201#define OCP_DATA_READ (REGISTERS_BASE + 0x09BC) 193#define WL12XX_OCP_DATA_READ (REGISTERS_BASE + 0x09BC)
202#define OCP_CMD (REGISTERS_BASE + 0x09C0) 194#define WL12XX_OCP_CMD (REGISTERS_BASE + 0x09C0)
203
204#define WL1271_HOST_WR_ACCESS (REGISTERS_BASE + 0x09F8)
205 195
206#define CHIP_ID_B (REGISTERS_BASE + 0x5674) 196#define WL12XX_HOST_WR_ACCESS (REGISTERS_BASE + 0x09F8)
207 197
208#define CHIP_ID_1271_PG10 (0x4030101) 198#define WL12XX_CHIP_ID_B (REGISTERS_BASE + 0x5674)
209#define CHIP_ID_1271_PG20 (0x4030111)
210#define CHIP_ID_1283_PG10 (0x05030101)
211#define CHIP_ID_1283_PG20 (0x05030111)
212 199
213#define ENABLE (REGISTERS_BASE + 0x5450) 200#define WL12XX_ENABLE (REGISTERS_BASE + 0x5450)
214 201
215/* Power Management registers */ 202/* Power Management registers */
216#define ELP_CFG_MODE (REGISTERS_BASE + 0x5804) 203#define WL12XX_ELP_CFG_MODE (REGISTERS_BASE + 0x5804)
217#define ELP_CMD (REGISTERS_BASE + 0x5808) 204#define WL12XX_ELP_CMD (REGISTERS_BASE + 0x5808)
218#define PLL_CAL_TIME (REGISTERS_BASE + 0x5810) 205#define WL12XX_PLL_CAL_TIME (REGISTERS_BASE + 0x5810)
219#define CLK_REQ_TIME (REGISTERS_BASE + 0x5814) 206#define WL12XX_CLK_REQ_TIME (REGISTERS_BASE + 0x5814)
220#define CLK_BUF_TIME (REGISTERS_BASE + 0x5818) 207#define WL12XX_CLK_BUF_TIME (REGISTERS_BASE + 0x5818)
221 208
222#define CFG_PLL_SYNC_CNT (REGISTERS_BASE + 0x5820) 209#define WL12XX_CFG_PLL_SYNC_CNT (REGISTERS_BASE + 0x5820)
223 210
224/* Scratch Pad registers*/ 211/* Scratch Pad registers*/
225#define SCR_PAD0 (REGISTERS_BASE + 0x5608) 212#define WL12XX_SCR_PAD0 (REGISTERS_BASE + 0x5608)
226#define SCR_PAD1 (REGISTERS_BASE + 0x560C) 213#define WL12XX_SCR_PAD1 (REGISTERS_BASE + 0x560C)
227#define SCR_PAD2 (REGISTERS_BASE + 0x5610) 214#define WL12XX_SCR_PAD2 (REGISTERS_BASE + 0x5610)
228#define SCR_PAD3 (REGISTERS_BASE + 0x5614) 215#define WL12XX_SCR_PAD3 (REGISTERS_BASE + 0x5614)
229#define SCR_PAD4 (REGISTERS_BASE + 0x5618) 216#define WL12XX_SCR_PAD4 (REGISTERS_BASE + 0x5618)
230#define SCR_PAD4_SET (REGISTERS_BASE + 0x561C) 217#define WL12XX_SCR_PAD4_SET (REGISTERS_BASE + 0x561C)
231#define SCR_PAD4_CLR (REGISTERS_BASE + 0x5620) 218#define WL12XX_SCR_PAD4_CLR (REGISTERS_BASE + 0x5620)
232#define SCR_PAD5 (REGISTERS_BASE + 0x5624) 219#define WL12XX_SCR_PAD5 (REGISTERS_BASE + 0x5624)
233#define SCR_PAD5_SET (REGISTERS_BASE + 0x5628) 220#define WL12XX_SCR_PAD5_SET (REGISTERS_BASE + 0x5628)
234#define SCR_PAD5_CLR (REGISTERS_BASE + 0x562C) 221#define WL12XX_SCR_PAD5_CLR (REGISTERS_BASE + 0x562C)
235#define SCR_PAD6 (REGISTERS_BASE + 0x5630) 222#define WL12XX_SCR_PAD6 (REGISTERS_BASE + 0x5630)
236#define SCR_PAD7 (REGISTERS_BASE + 0x5634) 223#define WL12XX_SCR_PAD7 (REGISTERS_BASE + 0x5634)
237#define SCR_PAD8 (REGISTERS_BASE + 0x5638) 224#define WL12XX_SCR_PAD8 (REGISTERS_BASE + 0x5638)
238#define SCR_PAD9 (REGISTERS_BASE + 0x563C) 225#define WL12XX_SCR_PAD9 (REGISTERS_BASE + 0x563C)
239 226
240/* Spare registers*/ 227/* Spare registers*/
241#define SPARE_A1 (REGISTERS_BASE + 0x0994) 228#define WL12XX_SPARE_A1 (REGISTERS_BASE + 0x0994)
242#define SPARE_A2 (REGISTERS_BASE + 0x0998) 229#define WL12XX_SPARE_A2 (REGISTERS_BASE + 0x0998)
243#define SPARE_A3 (REGISTERS_BASE + 0x099C) 230#define WL12XX_SPARE_A3 (REGISTERS_BASE + 0x099C)
244#define SPARE_A4 (REGISTERS_BASE + 0x09A0) 231#define WL12XX_SPARE_A4 (REGISTERS_BASE + 0x09A0)
245#define SPARE_A5 (REGISTERS_BASE + 0x09A4) 232#define WL12XX_SPARE_A5 (REGISTERS_BASE + 0x09A4)
246#define SPARE_A6 (REGISTERS_BASE + 0x09A8) 233#define WL12XX_SPARE_A6 (REGISTERS_BASE + 0x09A8)
247#define SPARE_A7 (REGISTERS_BASE + 0x09AC) 234#define WL12XX_SPARE_A7 (REGISTERS_BASE + 0x09AC)
248#define SPARE_A8 (REGISTERS_BASE + 0x09B0) 235#define WL12XX_SPARE_A8 (REGISTERS_BASE + 0x09B0)
249#define SPARE_B1 (REGISTERS_BASE + 0x5420) 236#define WL12XX_SPARE_B1 (REGISTERS_BASE + 0x5420)
250#define SPARE_B2 (REGISTERS_BASE + 0x5424) 237#define WL12XX_SPARE_B2 (REGISTERS_BASE + 0x5424)
251#define SPARE_B3 (REGISTERS_BASE + 0x5428) 238#define WL12XX_SPARE_B3 (REGISTERS_BASE + 0x5428)
252#define SPARE_B4 (REGISTERS_BASE + 0x542C) 239#define WL12XX_SPARE_B4 (REGISTERS_BASE + 0x542C)
253#define SPARE_B5 (REGISTERS_BASE + 0x5430) 240#define WL12XX_SPARE_B5 (REGISTERS_BASE + 0x5430)
254#define SPARE_B6 (REGISTERS_BASE + 0x5434) 241#define WL12XX_SPARE_B6 (REGISTERS_BASE + 0x5434)
255#define SPARE_B7 (REGISTERS_BASE + 0x5438) 242#define WL12XX_SPARE_B7 (REGISTERS_BASE + 0x5438)
256#define SPARE_B8 (REGISTERS_BASE + 0x543C) 243#define WL12XX_SPARE_B8 (REGISTERS_BASE + 0x543C)
257 244
258#define PLL_PARAMETERS (REGISTERS_BASE + 0x6040) 245#define WL12XX_PLL_PARAMETERS (REGISTERS_BASE + 0x6040)
259#define WU_COUNTER_PAUSE (REGISTERS_BASE + 0x6008) 246#define WL12XX_WU_COUNTER_PAUSE (REGISTERS_BASE + 0x6008)
260#define WELP_ARM_COMMAND (REGISTERS_BASE + 0x6100) 247#define WL12XX_WELP_ARM_COMMAND (REGISTERS_BASE + 0x6100)
261#define DRPW_SCRATCH_START (DRPW_BASE + 0x002C) 248#define WL12XX_DRPW_SCRATCH_START (DRPW_BASE + 0x002C)
262 249
263 250#define WL12XX_CMD_MBOX_ADDRESS 0x407B4
264#define ACX_SLV_SOFT_RESET_BIT BIT(1) 251
265#define ACX_REG_EEPROM_START_BIT BIT(1) 252#define ACX_REG_EEPROM_START_BIT BIT(1)
266 253
267/* Command/Information Mailbox Pointers */ 254/* Command/Information Mailbox Pointers */
@@ -279,7 +266,7 @@
279 the host receives the Init Complete interrupt from 266 the host receives the Init Complete interrupt from
280 the Wlan hardware. 267 the Wlan hardware.
281 ===============================================*/ 268 ===============================================*/
282#define REG_COMMAND_MAILBOX_PTR (SCR_PAD0) 269#define WL12XX_REG_COMMAND_MAILBOX_PTR (WL12XX_SCR_PAD0)
283 270
284/*=============================================== 271/*===============================================
285 Information Mailbox Pointer - 32bit RW 272 Information Mailbox Pointer - 32bit RW
@@ -294,7 +281,7 @@
294 until after the host receives the Init Complete interrupt from 281 until after the host receives the Init Complete interrupt from
295 the Wlan hardware. 282 the Wlan hardware.
296 ===============================================*/ 283 ===============================================*/
297#define REG_EVENT_MAILBOX_PTR (SCR_PAD1) 284#define WL12XX_REG_EVENT_MAILBOX_PTR (WL12XX_SCR_PAD1)
298 285
299/*=============================================== 286/*===============================================
300 EEPROM Read/Write Request 32bit RW 287 EEPROM Read/Write Request 32bit RW
@@ -365,26 +352,6 @@
365#define ACX_CONT_WIND_MIN_MASK 0x0000007f 352#define ACX_CONT_WIND_MIN_MASK 0x0000007f
366#define ACX_CONT_WIND_MAX 0x03ff0000 353#define ACX_CONT_WIND_MAX 0x03ff0000
367 354
368/*===============================================
369 HI_CFG Interface Configuration Register Values
370 ------------------------------------------
371 ===============================================*/
372#define HI_CFG_UART_ENABLE 0x00000004
373#define HI_CFG_RST232_ENABLE 0x00000008
374#define HI_CFG_CLOCK_REQ_SELECT 0x00000010
375#define HI_CFG_HOST_INT_ENABLE 0x00000020
376#define HI_CFG_VLYNQ_OUTPUT_ENABLE 0x00000040
377#define HI_CFG_HOST_INT_ACTIVE_LOW 0x00000080
378#define HI_CFG_UART_TX_OUT_GPIO_15 0x00000100
379#define HI_CFG_UART_TX_OUT_GPIO_14 0x00000200
380#define HI_CFG_UART_TX_OUT_GPIO_7 0x00000400
381
382#define HI_CFG_DEF_VAL \
383 (HI_CFG_UART_ENABLE | \
384 HI_CFG_RST232_ENABLE | \
385 HI_CFG_CLOCK_REQ_SELECT | \
386 HI_CFG_HOST_INT_ENABLE)
387
388#define REF_FREQ_19_2 0 355#define REF_FREQ_19_2 0
389#define REF_FREQ_26_0 1 356#define REF_FREQ_26_0 1
390#define REF_FREQ_38_4 2 357#define REF_FREQ_38_4 2
@@ -400,38 +367,19 @@
400#define LUT_PARAM_BB_PLL_LOOP_FILTER 5 367#define LUT_PARAM_BB_PLL_LOOP_FILTER 5
401#define LUT_PARAM_NUM 6 368#define LUT_PARAM_NUM 6
402 369
403#define ACX_EEPROMLESS_IND_REG (SCR_PAD4) 370#define WL12XX_EEPROMLESS_IND (WL12XX_SCR_PAD4)
404#define USE_EEPROM 0 371#define USE_EEPROM 0
405#define SOFT_RESET_MAX_TIME 1000000
406#define SOFT_RESET_STALL_TIME 1000
407#define NVS_DATA_BUNDARY_ALIGNMENT 4 372#define NVS_DATA_BUNDARY_ALIGNMENT 4
408 373
409
410/* Firmware image load chunk size */
411#define CHUNK_SIZE 16384
412
413/* Firmware image header size */ 374/* Firmware image header size */
414#define FW_HDR_SIZE 8 375#define FW_HDR_SIZE 8
415 376
416#define ECPU_CONTROL_HALT 0x00000101
417
418
419/****************************************************************************** 377/******************************************************************************
420 378
421 CHANNELS, BAND & REG DOMAINS definitions 379 CHANNELS, BAND & REG DOMAINS definitions
422 380
423******************************************************************************/ 381******************************************************************************/
424 382
425
426enum {
427 RADIO_BAND_2_4GHZ = 0, /* 2.4 Ghz band */
428 RADIO_BAND_5GHZ = 1, /* 5 Ghz band */
429 RADIO_BAND_JAPAN_4_9_GHZ = 2,
430 DEFAULT_BAND = RADIO_BAND_2_4GHZ,
431 INVALID_BAND = 0xFE,
432 MAX_RADIO_BANDS = 0xFF
433};
434
435#define SHORT_PREAMBLE_BIT BIT(0) /* CCK or Barker depending on the rate */ 383#define SHORT_PREAMBLE_BIT BIT(0) /* CCK or Barker depending on the rate */
436#define OFDM_RATE_BIT BIT(6) 384#define OFDM_RATE_BIT BIT(6)
437#define PBCC_RATE_BIT BIT(7) 385#define PBCC_RATE_BIT BIT(7)
@@ -465,14 +413,82 @@ b12-b0 - Supported Rate indicator bits as defined below.
465 413
466******************************************************************************/ 414******************************************************************************/
467 415
416#define OCP_CMD_LOOP 32
417#define OCP_CMD_WRITE 0x1
418#define OCP_CMD_READ 0x2
419#define OCP_READY_MASK BIT(18)
420#define OCP_STATUS_MASK (BIT(16) | BIT(17))
421#define OCP_STATUS_NO_RESP 0x00000
422#define OCP_STATUS_OK 0x10000
423#define OCP_STATUS_REQ_FAILED 0x20000
424#define OCP_STATUS_RESP_ERROR 0x30000
425
426#define OCP_REG_POLARITY 0x0064
427#define OCP_REG_CLK_TYPE 0x0448
428#define OCP_REG_CLK_POLARITY 0x0cb2
429#define OCP_REG_CLK_PULL 0x0cb4
430
431#define POLARITY_LOW BIT(1)
432#define NO_PULL (BIT(14) | BIT(15))
433
434#define FREF_CLK_TYPE_BITS 0xfffffe7f
435#define CLK_REQ_PRCM 0x100
436#define FREF_CLK_POLARITY_BITS 0xfffff8ff
437#define CLK_REQ_OUTN_SEL 0x700
438
439#define WU_COUNTER_PAUSE_VAL 0x3FF
440
441/* PLL configuration algorithm for wl128x */
442#define SYS_CLK_CFG_REG 0x2200
443/* Bit[0] - 0-TCXO, 1-FREF */
444#define MCS_PLL_CLK_SEL_FREF BIT(0)
445/* Bit[3:2] - 01-TCXO, 10-FREF */
446#define WL_CLK_REQ_TYPE_FREF BIT(3)
447#define WL_CLK_REQ_TYPE_PG2 (BIT(3) | BIT(2))
448/* Bit[4] - 0-TCXO, 1-FREF */
449#define PRCM_CM_EN_MUX_WLAN_FREF BIT(4)
450
451#define TCXO_ILOAD_INT_REG 0x2264
452#define TCXO_CLK_DETECT_REG 0x2266
453
454#define TCXO_DET_FAILED BIT(4)
455
456#define FREF_ILOAD_INT_REG 0x2084
457#define FREF_CLK_DETECT_REG 0x2086
458#define FREF_CLK_DETECT_FAIL BIT(4)
459
460/* Use this reg for masking during driver access */
461#define WL_SPARE_REG 0x2320
462#define WL_SPARE_VAL BIT(2)
463/* Bit[6:5:3] - mask wl write SYS_CLK_CFG[8:5:2:4] */
464#define WL_SPARE_MASK_8526 (BIT(6) | BIT(5) | BIT(3))
465
466#define PLL_LOCK_COUNTERS_REG 0xD8C
467#define PLL_LOCK_COUNTERS_COEX 0x0F
468#define PLL_LOCK_COUNTERS_MCS 0xF0
469#define MCS_PLL_OVERRIDE_REG 0xD90
470#define MCS_PLL_CONFIG_REG 0xD92
471#define MCS_SEL_IN_FREQ_MASK 0x0070
472#define MCS_SEL_IN_FREQ_SHIFT 4
473#define MCS_PLL_CONFIG_REG_VAL 0x73
474#define MCS_PLL_ENABLE_HP (BIT(0) | BIT(1))
475
476#define MCS_PLL_M_REG 0xD94
477#define MCS_PLL_N_REG 0xD96
478#define MCS_PLL_M_REG_VAL 0xC8
479#define MCS_PLL_N_REG_VAL 0x07
480
481#define SDIO_IO_DS 0xd14
482
483/* SDIO/wSPI DS configuration values */
484enum {
485 HCI_IO_DS_8MA = 0,
486 HCI_IO_DS_4MA = 1, /* default */
487 HCI_IO_DS_6MA = 2,
488 HCI_IO_DS_2MA = 3,
489};
468 490
469/************************************************************************* 491/* end PLL configuration algorithm for wl128x */
470
471 Interrupt Trigger Register (Host -> WiLink)
472
473**************************************************************************/
474
475/* Hardware to Embedded CPU Interrupts - first 32-bit register set */
476 492
477/* 493/*
478 * Host Command Interrupt. Setting this bit masks 494 * Host Command Interrupt. Setting this bit masks
@@ -480,7 +496,7 @@ b12-b0 - Supported Rate indicator bits as defined below.
480 * the FW that it has sent a command 496 * the FW that it has sent a command
481 * to the Wlan hardware Command Mailbox. 497 * to the Wlan hardware Command Mailbox.
482 */ 498 */
483#define INTR_TRIG_CMD BIT(0) 499#define WL12XX_INTR_TRIG_CMD BIT(0)
484 500
485/* 501/*
486 * Host Event Acknowlegde Interrupt. The host 502 * Host Event Acknowlegde Interrupt. The host
@@ -488,42 +504,27 @@ b12-b0 - Supported Rate indicator bits as defined below.
488 * the unsolicited information from the event 504 * the unsolicited information from the event
489 * mailbox. 505 * mailbox.
490 */ 506 */
491#define INTR_TRIG_EVENT_ACK BIT(1) 507#define WL12XX_INTR_TRIG_EVENT_ACK BIT(1)
492
493/*
494 * The host sets this bit to inform the Wlan
495 * FW that a TX packet is in the XFER
496 * Buffer #0.
497 */
498#define INTR_TRIG_TX_PROC0 BIT(2)
499
500/*
501 * The host sets this bit to inform the FW
502 * that it read a packet from RX XFER
503 * Buffer #0.
504 */
505#define INTR_TRIG_RX_PROC0 BIT(3)
506
507#define INTR_TRIG_DEBUG_ACK BIT(4)
508 508
509#define INTR_TRIG_STATE_CHANGED BIT(5) 509/*===============================================
510 510 HI_CFG Interface Configuration Register Values
511 511 ------------------------------------------
512/* Hardware to Embedded CPU Interrupts - second 32-bit register set */ 512 ===============================================*/
513 513#define HI_CFG_UART_ENABLE 0x00000004
514/* 514#define HI_CFG_RST232_ENABLE 0x00000008
515 * The host sets this bit to inform the FW 515#define HI_CFG_CLOCK_REQ_SELECT 0x00000010
516 * that it read a packet from RX XFER 516#define HI_CFG_HOST_INT_ENABLE 0x00000020
517 * Buffer #1. 517#define HI_CFG_VLYNQ_OUTPUT_ENABLE 0x00000040
518 */ 518#define HI_CFG_HOST_INT_ACTIVE_LOW 0x00000080
519#define INTR_TRIG_RX_PROC1 BIT(17) 519#define HI_CFG_UART_TX_OUT_GPIO_15 0x00000100
520#define HI_CFG_UART_TX_OUT_GPIO_14 0x00000200
521#define HI_CFG_UART_TX_OUT_GPIO_7 0x00000400
520 522
521/* 523#define HI_CFG_DEF_VAL \
522 * The host sets this bit to inform the Wlan 524 (HI_CFG_UART_ENABLE | \
523 * hardware that a TX packet is in the XFER 525 HI_CFG_RST232_ENABLE | \
524 * Buffer #1. 526 HI_CFG_CLOCK_REQ_SELECT | \
525 */ 527 HI_CFG_HOST_INT_ENABLE)
526#define INTR_TRIG_TX_PROC1 BIT(18)
527 528
528#define WL127X_REG_FUSE_DATA_2_1 0x050a 529#define WL127X_REG_FUSE_DATA_2_1 0x050a
529#define WL128X_REG_FUSE_DATA_2_1 0x2152 530#define WL128X_REG_FUSE_DATA_2_1 0x2152
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
new file mode 100644
index 000000000000..74cd332e23ef
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -0,0 +1,31 @@
1/*
2 * This file is part of wl12xx
3 *
4 * Copyright (C) 2011 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WL12XX_PRIV_H__
23#define __WL12XX_PRIV_H__
24
25#include "conf.h"
26
27struct wl12xx_priv {
28 struct wl12xx_priv_conf conf;
29};
30
31#endif /* __WL12XX_PRIV_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/Kconfig b/drivers/net/wireless/ti/wlcore/Kconfig
new file mode 100644
index 000000000000..9d04c38938bc
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/Kconfig
@@ -0,0 +1,41 @@
1config WLCORE
2 tristate "TI wlcore support"
3 depends on WL_TI && GENERIC_HARDIRQS
4 depends on INET
5 select FW_LOADER
6 ---help---
7 This module contains the main code for TI WLAN chips. It abstracts
8 hardware-specific differences among different chipset families.
9 Each chipset family needs to implement its own lower-level module
10 that will depend on this module for the common code.
11
12 If you choose to build a module, it will be called wlcore. Say N if
13 unsure.
14
15config WLCORE_SPI
16 tristate "TI wlcore SPI support"
17 depends on WLCORE && SPI_MASTER
18 select CRC7
19 ---help---
20 This module adds support for the SPI interface of adapters using
21 TI WLAN chipsets. Select this if your platform is using
22 the SPI bus.
23
24 If you choose to build a module, it'll be called wlcore_spi.
25 Say N if unsure.
26
27config WLCORE_SDIO
28 tristate "TI wlcore SDIO support"
29 depends on WLCORE && MMC
30 ---help---
31 This module adds support for the SDIO interface of adapters using
32 TI WLAN chipsets. Select this if your platform is using
33 the SDIO bus.
34
35 If you choose to build a module, it'll be called wlcore_sdio.
36 Say N if unsure.
37
38config WL12XX_PLATFORM_DATA
39 bool
40 depends on WLCORE_SDIO != n || WL1251_SDIO != n
41 default y
diff --git a/drivers/net/wireless/ti/wlcore/Makefile b/drivers/net/wireless/ti/wlcore/Makefile
new file mode 100644
index 000000000000..d9fba9e32130
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/Makefile
@@ -0,0 +1,15 @@
1wlcore-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
2 boot.o init.o debugfs.o scan.o
3
4wlcore_spi-objs = spi.o
5wlcore_sdio-objs = sdio.o
6
7wlcore-$(CONFIG_NL80211_TESTMODE) += testmode.o
8obj-$(CONFIG_WLCORE) += wlcore.o
9obj-$(CONFIG_WLCORE_SPI) += wlcore_spi.o
10obj-$(CONFIG_WLCORE_SDIO) += wlcore_sdio.o
11
12# small builtin driver bit
13obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o
14
15ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index bc96db0683a5..5912541a925e 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -28,11 +28,11 @@
28#include <linux/spi/spi.h> 28#include <linux/spi/spi.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include "wl12xx.h" 31#include "wlcore.h"
32#include "debug.h" 32#include "debug.h"
33#include "wl12xx_80211.h" 33#include "wl12xx_80211.h"
34#include "reg.h"
35#include "ps.h" 34#include "ps.h"
35#include "hw_ops.h"
36 36
37int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif, 37int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif,
38 u8 wake_up_event, u8 listen_interval) 38 u8 wake_up_event, u8 listen_interval)
@@ -757,7 +757,10 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
757 757
758 /* configure one AP supported rate class */ 758 /* configure one AP supported rate class */
759 acx->rate_policy_idx = cpu_to_le32(wlvif->sta.ap_rate_idx); 759 acx->rate_policy_idx = cpu_to_le32(wlvif->sta.ap_rate_idx);
760 acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->rate_set); 760
761 /* the AP policy is HW specific */
762 acx->rate_policy.enabled_rates =
763 cpu_to_le32(wlcore_hw_sta_get_ap_rate_mask(wl, wlvif));
761 acx->rate_policy.short_retry_limit = c->short_retry_limit; 764 acx->rate_policy.short_retry_limit = c->short_retry_limit;
762 acx->rate_policy.long_retry_limit = c->long_retry_limit; 765 acx->rate_policy.long_retry_limit = c->long_retry_limit;
763 acx->rate_policy.aflags = c->aflags; 766 acx->rate_policy.aflags = c->aflags;
@@ -969,17 +972,14 @@ int wl12xx_acx_mem_cfg(struct wl1271 *wl)
969 goto out; 972 goto out;
970 } 973 }
971 974
972 if (wl->chip.id == CHIP_ID_1283_PG20) 975 mem = &wl->conf.mem;
973 mem = &wl->conf.mem_wl128x;
974 else
975 mem = &wl->conf.mem_wl127x;
976 976
977 /* memory config */ 977 /* memory config */
978 mem_conf->num_stations = mem->num_stations; 978 mem_conf->num_stations = mem->num_stations;
979 mem_conf->rx_mem_block_num = mem->rx_block_num; 979 mem_conf->rx_mem_block_num = mem->rx_block_num;
980 mem_conf->tx_min_mem_block_num = mem->tx_min_block_num; 980 mem_conf->tx_min_mem_block_num = mem->tx_min_block_num;
981 mem_conf->num_ssid_profiles = mem->ssid_profiles; 981 mem_conf->num_ssid_profiles = mem->ssid_profiles;
982 mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS); 982 mem_conf->total_tx_descriptors = cpu_to_le32(wl->num_tx_desc);
983 mem_conf->dyn_mem_enable = mem->dynamic_memory; 983 mem_conf->dyn_mem_enable = mem->dynamic_memory;
984 mem_conf->tx_free_req = mem->min_req_tx_blocks; 984 mem_conf->tx_free_req = mem->min_req_tx_blocks;
985 mem_conf->rx_free_req = mem->min_req_rx_blocks; 985 mem_conf->rx_free_req = mem->min_req_rx_blocks;
@@ -998,32 +998,6 @@ out:
998 return ret; 998 return ret;
999} 999}
1000 1000
1001int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap)
1002{
1003 struct wl1271_acx_host_config_bitmap *bitmap_conf;
1004 int ret;
1005
1006 bitmap_conf = kzalloc(sizeof(*bitmap_conf), GFP_KERNEL);
1007 if (!bitmap_conf) {
1008 ret = -ENOMEM;
1009 goto out;
1010 }
1011
1012 bitmap_conf->host_cfg_bitmap = cpu_to_le32(host_cfg_bitmap);
1013
1014 ret = wl1271_cmd_configure(wl, ACX_HOST_IF_CFG_BITMAP,
1015 bitmap_conf, sizeof(*bitmap_conf));
1016 if (ret < 0) {
1017 wl1271_warning("wl1271 bitmap config opt failed: %d", ret);
1018 goto out;
1019 }
1020
1021out:
1022 kfree(bitmap_conf);
1023
1024 return ret;
1025}
1026
1027int wl1271_acx_init_mem_config(struct wl1271 *wl) 1001int wl1271_acx_init_mem_config(struct wl1271 *wl)
1028{ 1002{
1029 int ret; 1003 int ret;
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index a28fc044034c..b2f88831b7a9 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -25,7 +25,7 @@
25#ifndef __ACX_H__ 25#ifndef __ACX_H__
26#define __ACX_H__ 26#define __ACX_H__
27 27
28#include "wl12xx.h" 28#include "wlcore.h"
29#include "cmd.h" 29#include "cmd.h"
30 30
31/************************************************************************* 31/*************************************************************************
@@ -824,16 +824,11 @@ struct wl1271_acx_keep_alive_config {
824 __le32 period; 824 __le32 period;
825} __packed; 825} __packed;
826 826
827/* TODO: maybe this needs to be moved somewhere else? */
827#define HOST_IF_CFG_RX_FIFO_ENABLE BIT(0) 828#define HOST_IF_CFG_RX_FIFO_ENABLE BIT(0)
828#define HOST_IF_CFG_TX_EXTRA_BLKS_SWAP BIT(1) 829#define HOST_IF_CFG_TX_EXTRA_BLKS_SWAP BIT(1)
829#define HOST_IF_CFG_TX_PAD_TO_SDIO_BLK BIT(3) 830#define HOST_IF_CFG_TX_PAD_TO_SDIO_BLK BIT(3)
830 831
831struct wl1271_acx_host_config_bitmap {
832 struct acx_header header;
833
834 __le32 host_cfg_bitmap;
835} __packed;
836
837enum { 832enum {
838 WL1271_ACX_TRIG_TYPE_LEVEL = 0, 833 WL1271_ACX_TRIG_TYPE_LEVEL = 0,
839 WL1271_ACX_TRIG_TYPE_EDGE, 834 WL1271_ACX_TRIG_TYPE_EDGE,
@@ -1274,7 +1269,6 @@ int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold);
1274int wl1271_acx_tx_config_options(struct wl1271 *wl); 1269int wl1271_acx_tx_config_options(struct wl1271 *wl);
1275int wl12xx_acx_mem_cfg(struct wl1271 *wl); 1270int wl12xx_acx_mem_cfg(struct wl1271 *wl);
1276int wl1271_acx_init_mem_config(struct wl1271 *wl); 1271int wl1271_acx_init_mem_config(struct wl1271 *wl);
1277int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
1278int wl1271_acx_init_rx_interrupt(struct wl1271 *wl); 1272int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
1279int wl1271_acx_smart_reflex(struct wl1271 *wl); 1273int wl1271_acx_smart_reflex(struct wl1271 *wl);
1280int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif, 1274int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif,
diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
new file mode 100644
index 000000000000..3a2207db5405
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/boot.c
@@ -0,0 +1,443 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2008-2010 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/slab.h>
25#include <linux/wl12xx.h>
26#include <linux/export.h>
27
28#include "debug.h"
29#include "acx.h"
30#include "boot.h"
31#include "io.h"
32#include "event.h"
33#include "rx.h"
34#include "hw_ops.h"
35
36static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
37{
38 u32 cpu_ctrl;
39
40 /* 10.5.0 run the firmware (I) */
41 cpu_ctrl = wlcore_read_reg(wl, REG_ECPU_CONTROL);
42
43 /* 10.5.1 run the firmware (II) */
44 cpu_ctrl |= flag;
45 wlcore_write_reg(wl, REG_ECPU_CONTROL, cpu_ctrl);
46}
47
48static int wlcore_parse_fw_ver(struct wl1271 *wl)
49{
50 int ret;
51
52 ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u",
53 &wl->chip.fw_ver[0], &wl->chip.fw_ver[1],
54 &wl->chip.fw_ver[2], &wl->chip.fw_ver[3],
55 &wl->chip.fw_ver[4]);
56
57 if (ret != 5) {
58 wl1271_warning("fw version incorrect value");
59 memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver));
60 return -EINVAL;
61 }
62
63 ret = wlcore_identify_fw(wl);
64 if (ret < 0)
65 return ret;
66
67 return 0;
68}
69
70static int wlcore_boot_fw_version(struct wl1271 *wl)
71{
72 struct wl1271_static_data *static_data;
73 int ret;
74
75 static_data = kmalloc(sizeof(*static_data), GFP_DMA);
76 if (!static_data) {
77 wl1271_error("Couldn't allocate memory for static data!");
78 return -ENOMEM;
79 }
80
81 wl1271_read(wl, wl->cmd_box_addr, static_data, sizeof(*static_data),
82 false);
83
84 strncpy(wl->chip.fw_ver_str, static_data->fw_version,
85 sizeof(wl->chip.fw_ver_str));
86
87 kfree(static_data);
88
89 /* make sure the string is NULL-terminated */
90 wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
91
92 ret = wlcore_parse_fw_ver(wl);
93 if (ret < 0)
94 return ret;
95
96 return 0;
97}
98
99static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
100 size_t fw_data_len, u32 dest)
101{
102 struct wlcore_partition_set partition;
103 int addr, chunk_num, partition_limit;
104 u8 *p, *chunk;
105
106 /* whal_FwCtrl_LoadFwImageSm() */
107
108 wl1271_debug(DEBUG_BOOT, "starting firmware upload");
109
110 wl1271_debug(DEBUG_BOOT, "fw_data_len %zd chunk_size %d",
111 fw_data_len, CHUNK_SIZE);
112
113 if ((fw_data_len % 4) != 0) {
114 wl1271_error("firmware length not multiple of four");
115 return -EIO;
116 }
117
118 chunk = kmalloc(CHUNK_SIZE, GFP_KERNEL);
119 if (!chunk) {
120 wl1271_error("allocation for firmware upload chunk failed");
121 return -ENOMEM;
122 }
123
124 memcpy(&partition, &wl->ptable[PART_DOWN], sizeof(partition));
125 partition.mem.start = dest;
126 wlcore_set_partition(wl, &partition);
127
128 /* 10.1 set partition limit and chunk num */
129 chunk_num = 0;
130 partition_limit = wl->ptable[PART_DOWN].mem.size;
131
132 while (chunk_num < fw_data_len / CHUNK_SIZE) {
133 /* 10.2 update partition, if needed */
134 addr = dest + (chunk_num + 2) * CHUNK_SIZE;
135 if (addr > partition_limit) {
136 addr = dest + chunk_num * CHUNK_SIZE;
137 partition_limit = chunk_num * CHUNK_SIZE +
138 wl->ptable[PART_DOWN].mem.size;
139 partition.mem.start = addr;
140 wlcore_set_partition(wl, &partition);
141 }
142
143 /* 10.3 upload the chunk */
144 addr = dest + chunk_num * CHUNK_SIZE;
145 p = buf + chunk_num * CHUNK_SIZE;
146 memcpy(chunk, p, CHUNK_SIZE);
147 wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
148 p, addr);
149 wl1271_write(wl, addr, chunk, CHUNK_SIZE, false);
150
151 chunk_num++;
152 }
153
154 /* 10.4 upload the last chunk */
155 addr = dest + chunk_num * CHUNK_SIZE;
156 p = buf + chunk_num * CHUNK_SIZE;
157 memcpy(chunk, p, fw_data_len % CHUNK_SIZE);
158 wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x",
159 fw_data_len % CHUNK_SIZE, p, addr);
160 wl1271_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
161
162 kfree(chunk);
163 return 0;
164}
165
166int wlcore_boot_upload_firmware(struct wl1271 *wl)
167{
168 u32 chunks, addr, len;
169 int ret = 0;
170 u8 *fw;
171
172 fw = wl->fw;
173 chunks = be32_to_cpup((__be32 *) fw);
174 fw += sizeof(u32);
175
176 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks);
177
178 while (chunks--) {
179 addr = be32_to_cpup((__be32 *) fw);
180 fw += sizeof(u32);
181 len = be32_to_cpup((__be32 *) fw);
182 fw += sizeof(u32);
183
184 if (len > 300000) {
185 wl1271_info("firmware chunk too long: %u", len);
186 return -EINVAL;
187 }
188 wl1271_debug(DEBUG_BOOT, "chunk %d addr 0x%x len %u",
189 chunks, addr, len);
190 ret = wl1271_boot_upload_firmware_chunk(wl, fw, len, addr);
191 if (ret != 0)
192 break;
193 fw += len;
194 }
195
196 return ret;
197}
198EXPORT_SYMBOL_GPL(wlcore_boot_upload_firmware);
199
200int wlcore_boot_upload_nvs(struct wl1271 *wl)
201{
202 size_t nvs_len, burst_len;
203 int i;
204 u32 dest_addr, val;
205 u8 *nvs_ptr, *nvs_aligned;
206
207 if (wl->nvs == NULL)
208 return -ENODEV;
209
210 if (wl->quirks & WLCORE_QUIRK_LEGACY_NVS) {
211 struct wl1271_nvs_file *nvs =
212 (struct wl1271_nvs_file *)wl->nvs;
213 /*
214 * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz
215 * band configurations) can be removed when those NVS files stop
216 * floating around.
217 */
218 if (wl->nvs_len == sizeof(struct wl1271_nvs_file) ||
219 wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) {
220 if (nvs->general_params.dual_mode_select)
221 wl->enable_11a = true;
222 }
223
224 if (wl->nvs_len != sizeof(struct wl1271_nvs_file) &&
225 (wl->nvs_len != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
226 wl->enable_11a)) {
227 wl1271_error("nvs size is not as expected: %zu != %zu",
228 wl->nvs_len, sizeof(struct wl1271_nvs_file));
229 kfree(wl->nvs);
230 wl->nvs = NULL;
231 wl->nvs_len = 0;
232 return -EILSEQ;
233 }
234
235 /* only the first part of the NVS needs to be uploaded */
236 nvs_len = sizeof(nvs->nvs);
237 nvs_ptr = (u8 *) nvs->nvs;
238 } else {
239 struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
240
241 if (wl->nvs_len == sizeof(struct wl128x_nvs_file)) {
242 if (nvs->general_params.dual_mode_select)
243 wl->enable_11a = true;
244 } else {
245 wl1271_error("nvs size is not as expected: %zu != %zu",
246 wl->nvs_len,
247 sizeof(struct wl128x_nvs_file));
248 kfree(wl->nvs);
249 wl->nvs = NULL;
250 wl->nvs_len = 0;
251 return -EILSEQ;
252 }
253
254 /* only the first part of the NVS needs to be uploaded */
255 nvs_len = sizeof(nvs->nvs);
256 nvs_ptr = (u8 *)nvs->nvs;
257 }
258
259 /* update current MAC address to NVS */
260 nvs_ptr[11] = wl->addresses[0].addr[0];
261 nvs_ptr[10] = wl->addresses[0].addr[1];
262 nvs_ptr[6] = wl->addresses[0].addr[2];
263 nvs_ptr[5] = wl->addresses[0].addr[3];
264 nvs_ptr[4] = wl->addresses[0].addr[4];
265 nvs_ptr[3] = wl->addresses[0].addr[5];
266
267 /*
268 * Layout before the actual NVS tables:
269 * 1 byte : burst length.
270 * 2 bytes: destination address.
271 * n bytes: data to burst copy.
272 *
273 * This is ended by a 0 length, then the NVS tables.
274 */
275
276 /* FIXME: Do we need to check here whether the LSB is 1? */
277 while (nvs_ptr[0]) {
278 burst_len = nvs_ptr[0];
279 dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8));
280
281 /*
282 * Due to our new wl1271_translate_reg_addr function,
283 * we need to add the register partition start address
284 * to the destination
285 */
286 dest_addr += wl->curr_part.reg.start;
287
288 /* We move our pointer to the data */
289 nvs_ptr += 3;
290
291 for (i = 0; i < burst_len; i++) {
292 if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len)
293 goto out_badnvs;
294
295 val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
296 | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
297
298 wl1271_debug(DEBUG_BOOT,
299 "nvs burst write 0x%x: 0x%x",
300 dest_addr, val);
301 wl1271_write32(wl, dest_addr, val);
302
303 nvs_ptr += 4;
304 dest_addr += 4;
305 }
306
307 if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
308 goto out_badnvs;
309 }
310
311 /*
312 * We've reached the first zero length, the first NVS table
313 * is located at an aligned offset which is at least 7 bytes further.
314 * NOTE: The wl->nvs->nvs element must be first, in order to
315 * simplify the casting, we assume it is at the beginning of
316 * the wl->nvs structure.
317 */
318 nvs_ptr = (u8 *)wl->nvs +
319 ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4);
320
321 if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
322 goto out_badnvs;
323
324 nvs_len -= nvs_ptr - (u8 *)wl->nvs;
325
326 /* Now we must set the partition correctly */
327 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
328
329 /* Copy the NVS tables to a new block to ensure alignment */
330 nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);
331 if (!nvs_aligned)
332 return -ENOMEM;
333
334 /* And finally we upload the NVS tables */
335 wlcore_write_data(wl, REG_CMD_MBOX_ADDRESS,
336 nvs_aligned, nvs_len, false);
337
338 kfree(nvs_aligned);
339 return 0;
340
341out_badnvs:
342 wl1271_error("nvs data is malformed");
343 return -EILSEQ;
344}
345EXPORT_SYMBOL_GPL(wlcore_boot_upload_nvs);
346
347int wlcore_boot_run_firmware(struct wl1271 *wl)
348{
349 int loop, ret;
350 u32 chip_id, intr;
351
352 /* Make sure we have the boot partition */
353 wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
354
355 wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
356
357 chip_id = wlcore_read_reg(wl, REG_CHIP_ID_B);
358
359 wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
360
361 if (chip_id != wl->chip.id) {
362 wl1271_error("chip id doesn't match after firmware boot");
363 return -EIO;
364 }
365
366 /* wait for init to complete */
367 loop = 0;
368 while (loop++ < INIT_LOOP) {
369 udelay(INIT_LOOP_DELAY);
370 intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR);
371
372 if (intr == 0xffffffff) {
373 wl1271_error("error reading hardware complete "
374 "init indication");
375 return -EIO;
376 }
377 /* check that ACX_INTR_INIT_COMPLETE is enabled */
378 else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) {
379 wlcore_write_reg(wl, REG_INTERRUPT_ACK,
380 WL1271_ACX_INTR_INIT_COMPLETE);
381 break;
382 }
383 }
384
385 if (loop > INIT_LOOP) {
386 wl1271_error("timeout waiting for the hardware to "
387 "complete initialization");
388 return -EIO;
389 }
390
391 /* get hardware config command mail box */
392 wl->cmd_box_addr = wlcore_read_reg(wl, REG_COMMAND_MAILBOX_PTR);
393
394 wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x", wl->cmd_box_addr);
395
396 /* get hardware config event mail box */
397 wl->mbox_ptr[0] = wlcore_read_reg(wl, REG_EVENT_MAILBOX_PTR);
398 wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
399
400 wl1271_debug(DEBUG_MAILBOX, "MBOX ptrs: 0x%x 0x%x",
401 wl->mbox_ptr[0], wl->mbox_ptr[1]);
402
403 ret = wlcore_boot_fw_version(wl);
404 if (ret < 0) {
405 wl1271_error("couldn't boot firmware");
406 return ret;
407 }
408
409 /*
410 * in case of full asynchronous mode the firmware event must be
411 * ready to receive event from the command mailbox
412 */
413
414 /* unmask required mbox events */
415 wl->event_mask = BSS_LOSE_EVENT_ID |
416 SCAN_COMPLETE_EVENT_ID |
417 ROLE_STOP_COMPLETE_EVENT_ID |
418 RSSI_SNR_TRIGGER_0_EVENT_ID |
419 PSPOLL_DELIVERY_FAILURE_EVENT_ID |
420 SOFT_GEMINI_SENSE_EVENT_ID |
421 PERIODIC_SCAN_REPORT_EVENT_ID |
422 PERIODIC_SCAN_COMPLETE_EVENT_ID |
423 DUMMY_PACKET_EVENT_ID |
424 PEER_REMOVE_COMPLETE_EVENT_ID |
425 BA_SESSION_RX_CONSTRAINT_EVENT_ID |
426 REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
427 INACTIVE_STA_EVENT_ID |
428 MAX_TX_RETRY_EVENT_ID |
429 CHANNEL_SWITCH_COMPLETE_EVENT_ID;
430
431 ret = wl1271_event_unmask(wl);
432 if (ret < 0) {
433 wl1271_error("EVENT mask setting failed");
434 return ret;
435 }
436
437 /* set the working partition to its "running" mode offset */
438 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
439
440 /* firmware startup completed */
441 return 0;
442}
443EXPORT_SYMBOL_GPL(wlcore_boot_run_firmware);
diff --git a/drivers/net/wireless/ti/wlcore/boot.h b/drivers/net/wireless/ti/wlcore/boot.h
new file mode 100644
index 000000000000..094981dd2227
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/boot.h
@@ -0,0 +1,54 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#ifndef __BOOT_H__
25#define __BOOT_H__
26
27#include "wlcore.h"
28
29int wlcore_boot_upload_firmware(struct wl1271 *wl);
30int wlcore_boot_upload_nvs(struct wl1271 *wl);
31int wlcore_boot_run_firmware(struct wl1271 *wl);
32
33#define WL1271_NO_SUBBANDS 8
34#define WL1271_NO_POWER_LEVELS 4
35#define WL1271_FW_VERSION_MAX_LEN 20
36
37struct wl1271_static_data {
38 u8 mac_address[ETH_ALEN];
39 u8 padding[2];
40 u8 fw_version[WL1271_FW_VERSION_MAX_LEN];
41 u32 hw_version;
42 u8 tx_power_table[WL1271_NO_SUBBANDS][WL1271_NO_POWER_LEVELS];
43};
44
45/* number of times we try to read the INIT interrupt */
46#define INIT_LOOP 20000
47
48/* delay between retries */
49#define INIT_LOOP_DELAY 50
50
51#define WU_COUNTER_PAUSE_VAL 0x3FF
52#define WELP_ARM_COMMAND_VAL 0x4
53
54#endif
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 3414fc11e9ba..5c4716c6f040 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -28,9 +28,8 @@
28#include <linux/ieee80211.h> 28#include <linux/ieee80211.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include "wl12xx.h" 31#include "wlcore.h"
32#include "debug.h" 32#include "debug.h"
33#include "reg.h"
34#include "io.h" 33#include "io.h"
35#include "acx.h" 34#include "acx.h"
36#include "wl12xx_80211.h" 35#include "wl12xx_80211.h"
@@ -67,11 +66,15 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
67 66
68 wl1271_write(wl, wl->cmd_box_addr, buf, len, false); 67 wl1271_write(wl, wl->cmd_box_addr, buf, len, false);
69 68
70 wl1271_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD); 69 /*
70 * TODO: we just need this because one bit is in a different
71 * place. Is there any better way?
72 */
73 wl->ops->trigger_cmd(wl, wl->cmd_box_addr, buf, len);
71 74
72 timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT); 75 timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT);
73 76
74 intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 77 intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR);
75 while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) { 78 while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) {
76 if (time_after(jiffies, timeout)) { 79 if (time_after(jiffies, timeout)) {
77 wl1271_error("command complete timeout"); 80 wl1271_error("command complete timeout");
@@ -85,7 +88,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
85 else 88 else
86 msleep(1); 89 msleep(1);
87 90
88 intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 91 intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR);
89 } 92 }
90 93
91 /* read back the status code of the command */ 94 /* read back the status code of the command */
@@ -100,8 +103,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
100 goto fail; 103 goto fail;
101 } 104 }
102 105
103 wl1271_write32(wl, ACX_REG_INTERRUPT_ACK, 106 wlcore_write_reg(wl, REG_INTERRUPT_ACK, WL1271_ACX_INTR_CMD_COMPLETE);
104 WL1271_ACX_INTR_CMD_COMPLETE);
105 return 0; 107 return 0;
106 108
107fail: 109fail:
@@ -110,240 +112,18 @@ fail:
110 return ret; 112 return ret;
111} 113}
112 114
113int wl1271_cmd_general_parms(struct wl1271 *wl)
114{
115 struct wl1271_general_parms_cmd *gen_parms;
116 struct wl1271_ini_general_params *gp =
117 &((struct wl1271_nvs_file *)wl->nvs)->general_params;
118 bool answer = false;
119 int ret;
120
121 if (!wl->nvs)
122 return -ENODEV;
123
124 if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
125 wl1271_warning("FEM index from INI out of bounds");
126 return -EINVAL;
127 }
128
129 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
130 if (!gen_parms)
131 return -ENOMEM;
132
133 gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
134
135 memcpy(&gen_parms->general_params, gp, sizeof(*gp));
136
137 if (gp->tx_bip_fem_auto_detect)
138 answer = true;
139
140 /* Override the REF CLK from the NVS with the one from platform data */
141 gen_parms->general_params.ref_clock = wl->ref_clock;
142
143 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
144 if (ret < 0) {
145 wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
146 goto out;
147 }
148
149 gp->tx_bip_fem_manufacturer =
150 gen_parms->general_params.tx_bip_fem_manufacturer;
151
152 if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
153 wl1271_warning("FEM index from FW out of bounds");
154 ret = -EINVAL;
155 goto out;
156 }
157
158 wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
159 answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
160
161out:
162 kfree(gen_parms);
163 return ret;
164}
165
166int wl128x_cmd_general_parms(struct wl1271 *wl)
167{
168 struct wl128x_general_parms_cmd *gen_parms;
169 struct wl128x_ini_general_params *gp =
170 &((struct wl128x_nvs_file *)wl->nvs)->general_params;
171 bool answer = false;
172 int ret;
173
174 if (!wl->nvs)
175 return -ENODEV;
176
177 if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
178 wl1271_warning("FEM index from ini out of bounds");
179 return -EINVAL;
180 }
181
182 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
183 if (!gen_parms)
184 return -ENOMEM;
185
186 gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
187
188 memcpy(&gen_parms->general_params, gp, sizeof(*gp));
189
190 if (gp->tx_bip_fem_auto_detect)
191 answer = true;
192
193 /* Replace REF and TCXO CLKs with the ones from platform data */
194 gen_parms->general_params.ref_clock = wl->ref_clock;
195 gen_parms->general_params.tcxo_ref_clock = wl->tcxo_clock;
196
197 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
198 if (ret < 0) {
199 wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
200 goto out;
201 }
202
203 gp->tx_bip_fem_manufacturer =
204 gen_parms->general_params.tx_bip_fem_manufacturer;
205
206 if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
207 wl1271_warning("FEM index from FW out of bounds");
208 ret = -EINVAL;
209 goto out;
210 }
211
212 wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
213 answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
214
215out:
216 kfree(gen_parms);
217 return ret;
218}
219
220int wl1271_cmd_radio_parms(struct wl1271 *wl)
221{
222 struct wl1271_nvs_file *nvs = (struct wl1271_nvs_file *)wl->nvs;
223 struct wl1271_radio_parms_cmd *radio_parms;
224 struct wl1271_ini_general_params *gp = &nvs->general_params;
225 int ret;
226
227 if (!wl->nvs)
228 return -ENODEV;
229
230 radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
231 if (!radio_parms)
232 return -ENOMEM;
233
234 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
235
236 /* 2.4GHz parameters */
237 memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
238 sizeof(struct wl1271_ini_band_params_2));
239 memcpy(&radio_parms->dyn_params_2,
240 &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
241 sizeof(struct wl1271_ini_fem_params_2));
242
243 /* 5GHz parameters */
244 memcpy(&radio_parms->static_params_5,
245 &nvs->stat_radio_params_5,
246 sizeof(struct wl1271_ini_band_params_5));
247 memcpy(&radio_parms->dyn_params_5,
248 &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
249 sizeof(struct wl1271_ini_fem_params_5));
250
251 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
252 radio_parms, sizeof(*radio_parms));
253
254 ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
255 if (ret < 0)
256 wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed");
257
258 kfree(radio_parms);
259 return ret;
260}
261
262int wl128x_cmd_radio_parms(struct wl1271 *wl)
263{
264 struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
265 struct wl128x_radio_parms_cmd *radio_parms;
266 struct wl128x_ini_general_params *gp = &nvs->general_params;
267 int ret;
268
269 if (!wl->nvs)
270 return -ENODEV;
271
272 radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
273 if (!radio_parms)
274 return -ENOMEM;
275
276 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
277
278 /* 2.4GHz parameters */
279 memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
280 sizeof(struct wl128x_ini_band_params_2));
281 memcpy(&radio_parms->dyn_params_2,
282 &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
283 sizeof(struct wl128x_ini_fem_params_2));
284
285 /* 5GHz parameters */
286 memcpy(&radio_parms->static_params_5,
287 &nvs->stat_radio_params_5,
288 sizeof(struct wl128x_ini_band_params_5));
289 memcpy(&radio_parms->dyn_params_5,
290 &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
291 sizeof(struct wl128x_ini_fem_params_5));
292
293 radio_parms->fem_vendor_and_options = nvs->fem_vendor_and_options;
294
295 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
296 radio_parms, sizeof(*radio_parms));
297
298 ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
299 if (ret < 0)
300 wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed");
301
302 kfree(radio_parms);
303 return ret;
304}
305
306int wl1271_cmd_ext_radio_parms(struct wl1271 *wl)
307{
308 struct wl1271_ext_radio_parms_cmd *ext_radio_parms;
309 struct conf_rf_settings *rf = &wl->conf.rf;
310 int ret;
311
312 if (!wl->nvs)
313 return -ENODEV;
314
315 ext_radio_parms = kzalloc(sizeof(*ext_radio_parms), GFP_KERNEL);
316 if (!ext_radio_parms)
317 return -ENOMEM;
318
319 ext_radio_parms->test.id = TEST_CMD_INI_FILE_RF_EXTENDED_PARAM;
320
321 memcpy(ext_radio_parms->tx_per_channel_power_compensation_2,
322 rf->tx_per_channel_power_compensation_2,
323 CONF_TX_PWR_COMPENSATION_LEN_2);
324 memcpy(ext_radio_parms->tx_per_channel_power_compensation_5,
325 rf->tx_per_channel_power_compensation_5,
326 CONF_TX_PWR_COMPENSATION_LEN_5);
327
328 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_EXT_RADIO_PARAM: ",
329 ext_radio_parms, sizeof(*ext_radio_parms));
330
331 ret = wl1271_cmd_test(wl, ext_radio_parms, sizeof(*ext_radio_parms), 0);
332 if (ret < 0)
333 wl1271_warning("TEST_CMD_INI_FILE_RF_EXTENDED_PARAM failed");
334
335 kfree(ext_radio_parms);
336 return ret;
337}
338
339/* 115/*
340 * Poll the mailbox event field until any of the bits in the mask is set or a 116 * Poll the mailbox event field until any of the bits in the mask is set or a
341 * timeout occurs (WL1271_EVENT_TIMEOUT in msecs) 117 * timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
342 */ 118 */
343static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask) 119static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask)
344{ 120{
345 u32 events_vector, event; 121 u32 *events_vector;
122 u32 event;
346 unsigned long timeout; 123 unsigned long timeout;
124 int ret = 0;
125
126 events_vector = kmalloc(sizeof(*events_vector), GFP_DMA);
347 127
348 timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT); 128 timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
349 129
@@ -351,21 +131,24 @@ static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask)
351 if (time_after(jiffies, timeout)) { 131 if (time_after(jiffies, timeout)) {
352 wl1271_debug(DEBUG_CMD, "timeout waiting for event %d", 132 wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
353 (int)mask); 133 (int)mask);
354 return -ETIMEDOUT; 134 ret = -ETIMEDOUT;
135 goto out;
355 } 136 }
356 137
357 msleep(1); 138 msleep(1);
358 139
359 /* read from both event fields */ 140 /* read from both event fields */
360 wl1271_read(wl, wl->mbox_ptr[0], &events_vector, 141 wl1271_read(wl, wl->mbox_ptr[0], events_vector,
361 sizeof(events_vector), false); 142 sizeof(*events_vector), false);
362 event = events_vector & mask; 143 event = *events_vector & mask;
363 wl1271_read(wl, wl->mbox_ptr[1], &events_vector, 144 wl1271_read(wl, wl->mbox_ptr[1], events_vector,
364 sizeof(events_vector), false); 145 sizeof(*events_vector), false);
365 event |= events_vector & mask; 146 event |= *events_vector & mask;
366 } while (!event); 147 } while (!event);
367 148
368 return 0; 149out:
150 kfree(events_vector);
151 return ret;
369} 152}
370 153
371static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask) 154static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
@@ -522,7 +305,7 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
522 305
523 cmd->role_id = wlvif->dev_role_id; 306 cmd->role_id = wlvif->dev_role_id;
524 if (wlvif->band == IEEE80211_BAND_5GHZ) 307 if (wlvif->band == IEEE80211_BAND_5GHZ)
525 cmd->band = WL12XX_BAND_5GHZ; 308 cmd->band = WLCORE_BAND_5GHZ;
526 cmd->channel = wlvif->channel; 309 cmd->channel = wlvif->channel;
527 310
528 if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) { 311 if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) {
@@ -613,7 +396,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
613 396
614 cmd->role_id = wlvif->role_id; 397 cmd->role_id = wlvif->role_id;
615 if (wlvif->band == IEEE80211_BAND_5GHZ) 398 if (wlvif->band == IEEE80211_BAND_5GHZ)
616 cmd->band = WL12XX_BAND_5GHZ; 399 cmd->band = WLCORE_BAND_5GHZ;
617 cmd->channel = wlvif->channel; 400 cmd->channel = wlvif->channel;
618 cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); 401 cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
619 cmd->sta.beacon_interval = cpu_to_le16(wlvif->beacon_int); 402 cmd->sta.beacon_interval = cpu_to_le16(wlvif->beacon_int);
@@ -750,14 +533,14 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
750 533
751 switch (wlvif->band) { 534 switch (wlvif->band) {
752 case IEEE80211_BAND_2GHZ: 535 case IEEE80211_BAND_2GHZ:
753 cmd->band = RADIO_BAND_2_4GHZ; 536 cmd->band = WLCORE_BAND_2_4GHZ;
754 break; 537 break;
755 case IEEE80211_BAND_5GHZ: 538 case IEEE80211_BAND_5GHZ:
756 cmd->band = RADIO_BAND_5GHZ; 539 cmd->band = WLCORE_BAND_5GHZ;
757 break; 540 break;
758 default: 541 default:
759 wl1271_warning("ap start - unknown band: %d", (int)wlvif->band); 542 wl1271_warning("ap start - unknown band: %d", (int)wlvif->band);
760 cmd->band = RADIO_BAND_2_4GHZ; 543 cmd->band = WLCORE_BAND_2_4GHZ;
761 break; 544 break;
762 } 545 }
763 546
@@ -830,7 +613,7 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif)
830 613
831 cmd->role_id = wlvif->role_id; 614 cmd->role_id = wlvif->role_id;
832 if (wlvif->band == IEEE80211_BAND_5GHZ) 615 if (wlvif->band == IEEE80211_BAND_5GHZ)
833 cmd->band = WL12XX_BAND_5GHZ; 616 cmd->band = WLCORE_BAND_5GHZ;
834 cmd->channel = wlvif->channel; 617 cmd->channel = wlvif->channel;
835 cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); 618 cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
836 cmd->ibss.beacon_interval = cpu_to_le16(wlvif->beacon_int); 619 cmd->ibss.beacon_interval = cpu_to_le16(wlvif->beacon_int);
@@ -904,6 +687,7 @@ int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer)
904 687
905 return ret; 688 return ret;
906} 689}
690EXPORT_SYMBOL_GPL(wl1271_cmd_test);
907 691
908/** 692/**
909 * read acx from firmware 693 * read acx from firmware
@@ -960,6 +744,7 @@ int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
960 744
961 return 0; 745 return 0;
962} 746}
747EXPORT_SYMBOL_GPL(wl1271_cmd_configure);
963 748
964int wl1271_cmd_data_path(struct wl1271 *wl, bool enable) 749int wl1271_cmd_data_path(struct wl1271 *wl, bool enable)
965{ 750{
@@ -1730,10 +1515,10 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1730 cmd->channel = wlvif->channel; 1515 cmd->channel = wlvif->channel;
1731 switch (wlvif->band) { 1516 switch (wlvif->band) {
1732 case IEEE80211_BAND_2GHZ: 1517 case IEEE80211_BAND_2GHZ:
1733 cmd->band = RADIO_BAND_2_4GHZ; 1518 cmd->band = WLCORE_BAND_2_4GHZ;
1734 break; 1519 break;
1735 case IEEE80211_BAND_5GHZ: 1520 case IEEE80211_BAND_5GHZ:
1736 cmd->band = RADIO_BAND_5GHZ; 1521 cmd->band = WLCORE_BAND_5GHZ;
1737 break; 1522 break;
1738 default: 1523 default:
1739 wl1271_error("roc - unknown band: %d", (int)wlvif->band); 1524 wl1271_error("roc - unknown band: %d", (int)wlvif->band);
diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index de217d92516b..a46ae07cb77e 100644
--- a/drivers/net/wireless/wl12xx/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -25,17 +25,12 @@
25#ifndef __CMD_H__ 25#ifndef __CMD_H__
26#define __CMD_H__ 26#define __CMD_H__
27 27
28#include "wl12xx.h" 28#include "wlcore.h"
29 29
30struct acx_header; 30struct acx_header;
31 31
32int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, 32int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
33 size_t res_len); 33 size_t res_len);
34int wl1271_cmd_general_parms(struct wl1271 *wl);
35int wl128x_cmd_general_parms(struct wl1271 *wl);
36int wl1271_cmd_radio_parms(struct wl1271 *wl);
37int wl128x_cmd_radio_parms(struct wl1271 *wl);
38int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
39int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type, 34int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
40 u8 *role_id); 35 u8 *role_id);
41int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id); 36int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id);
@@ -262,13 +257,13 @@ struct wl12xx_cmd_role_disable {
262 u8 padding[3]; 257 u8 padding[3];
263} __packed; 258} __packed;
264 259
265enum wl12xx_band { 260enum wlcore_band {
266 WL12XX_BAND_2_4GHZ = 0, 261 WLCORE_BAND_2_4GHZ = 0,
267 WL12XX_BAND_5GHZ = 1, 262 WLCORE_BAND_5GHZ = 1,
268 WL12XX_BAND_JAPAN_4_9_GHZ = 2, 263 WLCORE_BAND_JAPAN_4_9_GHZ = 2,
269 WL12XX_BAND_DEFAULT = WL12XX_BAND_2_4GHZ, 264 WLCORE_BAND_DEFAULT = WLCORE_BAND_2_4GHZ,
270 WL12XX_BAND_INVALID = 0x7E, 265 WLCORE_BAND_INVALID = 0x7E,
271 WL12XX_BAND_MAX_RADIO = 0x7F, 266 WLCORE_BAND_MAX_RADIO = 0x7F,
272}; 267};
273 268
274struct wl12xx_cmd_role_start { 269struct wl12xx_cmd_role_start {
@@ -494,83 +489,6 @@ enum wl1271_channel_tune_bands {
494 489
495#define WL1271_PD_REFERENCE_POINT_BAND_B_G 0 490#define WL1271_PD_REFERENCE_POINT_BAND_B_G 0
496 491
497#define TEST_CMD_INI_FILE_RADIO_PARAM 0x19
498#define TEST_CMD_INI_FILE_GENERAL_PARAM 0x1E
499#define TEST_CMD_INI_FILE_RF_EXTENDED_PARAM 0x26
500
501struct wl1271_general_parms_cmd {
502 struct wl1271_cmd_header header;
503
504 struct wl1271_cmd_test_header test;
505
506 struct wl1271_ini_general_params general_params;
507
508 u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM];
509 u8 sr_sen_n_p;
510 u8 sr_sen_n_p_gain;
511 u8 sr_sen_nrn;
512 u8 sr_sen_prn;
513 u8 padding[3];
514} __packed;
515
516struct wl128x_general_parms_cmd {
517 struct wl1271_cmd_header header;
518
519 struct wl1271_cmd_test_header test;
520
521 struct wl128x_ini_general_params general_params;
522
523 u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM];
524 u8 sr_sen_n_p;
525 u8 sr_sen_n_p_gain;
526 u8 sr_sen_nrn;
527 u8 sr_sen_prn;
528 u8 padding[3];
529} __packed;
530
531struct wl1271_radio_parms_cmd {
532 struct wl1271_cmd_header header;
533
534 struct wl1271_cmd_test_header test;
535
536 /* Static radio parameters */
537 struct wl1271_ini_band_params_2 static_params_2;
538 struct wl1271_ini_band_params_5 static_params_5;
539
540 /* Dynamic radio parameters */
541 struct wl1271_ini_fem_params_2 dyn_params_2;
542 u8 padding2;
543 struct wl1271_ini_fem_params_5 dyn_params_5;
544 u8 padding3[2];
545} __packed;
546
547struct wl128x_radio_parms_cmd {
548 struct wl1271_cmd_header header;
549
550 struct wl1271_cmd_test_header test;
551
552 /* Static radio parameters */
553 struct wl128x_ini_band_params_2 static_params_2;
554 struct wl128x_ini_band_params_5 static_params_5;
555
556 u8 fem_vendor_and_options;
557
558 /* Dynamic radio parameters */
559 struct wl128x_ini_fem_params_2 dyn_params_2;
560 u8 padding2;
561 struct wl128x_ini_fem_params_5 dyn_params_5;
562} __packed;
563
564struct wl1271_ext_radio_parms_cmd {
565 struct wl1271_cmd_header header;
566
567 struct wl1271_cmd_test_header test;
568
569 u8 tx_per_channel_power_compensation_2[CONF_TX_PWR_COMPENSATION_LEN_2];
570 u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
571 u8 padding[3];
572} __packed;
573
574/* 492/*
575 * There are three types of disconnections: 493 * There are three types of disconnections:
576 * 494 *
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index 3e581e19424c..fef0db4213bc 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -65,36 +65,7 @@ enum {
65 CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_54MBPS, 65 CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_54MBPS,
66}; 66};
67 67
68enum { 68#define CONF_HW_RXTX_RATE_UNSUPPORTED 0xff
69 CONF_HW_RXTX_RATE_MCS7_SGI = 0,
70 CONF_HW_RXTX_RATE_MCS7,
71 CONF_HW_RXTX_RATE_MCS6,
72 CONF_HW_RXTX_RATE_MCS5,
73 CONF_HW_RXTX_RATE_MCS4,
74 CONF_HW_RXTX_RATE_MCS3,
75 CONF_HW_RXTX_RATE_MCS2,
76 CONF_HW_RXTX_RATE_MCS1,
77 CONF_HW_RXTX_RATE_MCS0,
78 CONF_HW_RXTX_RATE_54,
79 CONF_HW_RXTX_RATE_48,
80 CONF_HW_RXTX_RATE_36,
81 CONF_HW_RXTX_RATE_24,
82 CONF_HW_RXTX_RATE_22,
83 CONF_HW_RXTX_RATE_18,
84 CONF_HW_RXTX_RATE_12,
85 CONF_HW_RXTX_RATE_11,
86 CONF_HW_RXTX_RATE_9,
87 CONF_HW_RXTX_RATE_6,
88 CONF_HW_RXTX_RATE_5_5,
89 CONF_HW_RXTX_RATE_2,
90 CONF_HW_RXTX_RATE_1,
91 CONF_HW_RXTX_RATE_MAX,
92 CONF_HW_RXTX_RATE_UNSUPPORTED = 0xff
93};
94
95/* Rates between and including these are MCS rates */
96#define CONF_HW_RXTX_RATE_MCS_MIN CONF_HW_RXTX_RATE_MCS7_SGI
97#define CONF_HW_RXTX_RATE_MCS_MAX CONF_HW_RXTX_RATE_MCS0
98 69
99enum { 70enum {
100 CONF_SG_DISABLE = 0, 71 CONF_SG_DISABLE = 0,
@@ -1096,16 +1067,31 @@ struct conf_scan_settings {
1096}; 1067};
1097 1068
1098struct conf_sched_scan_settings { 1069struct conf_sched_scan_settings {
1099 /* minimum time to wait on the channel for active scans (in TUs) */ 1070 /*
1100 u16 min_dwell_time_active; 1071 * The base time to wait on the channel for active scans (in TU/1000).
1072 * The minimum dwell time is calculated according to this:
1073 * min_dwell_time = base + num_of_probes_to_be_sent * delta_per_probe
1074 * The maximum dwell time is calculated according to this:
1075 * max_dwell_time = min_dwell_time + max_dwell_time_delta
1076 */
1077 u32 base_dwell_time;
1101 1078
1102 /* maximum time to wait on the channel for active scans (in TUs) */ 1079 /* The delta between the min dwell time and max dwell time for
1103 u16 max_dwell_time_active; 1080 * active scans (in TU/1000s). The max dwell time is used by the FW once
1081 * traffic is detected on the channel.
1082 */
1083 u32 max_dwell_time_delta;
1084
1085 /* Delta added to min dwell time per each probe in 2.4 GHz (TU/1000) */
1086 u32 dwell_time_delta_per_probe;
1104 1087
1105 /* time to wait on the channel for passive scans (in TUs) */ 1088 /* Delta added to min dwell time per each probe in 5 GHz (TU/1000) */
1089 u32 dwell_time_delta_per_probe_5;
1090
1091 /* time to wait on the channel for passive scans (in TU/1000) */
1106 u32 dwell_time_passive; 1092 u32 dwell_time_passive;
1107 1093
1108 /* time to wait on the channel for DFS scans (in TUs) */ 1094 /* time to wait on the channel for DFS scans (in TU/1000) */
1109 u32 dwell_time_dfs; 1095 u32 dwell_time_dfs;
1110 1096
1111 /* number of probe requests to send on each channel in active scans */ 1097 /* number of probe requests to send on each channel in active scans */
@@ -1118,26 +1104,6 @@ struct conf_sched_scan_settings {
1118 s8 snr_threshold; 1104 s8 snr_threshold;
1119}; 1105};
1120 1106
1121/* these are number of channels on the band divided by two, rounded up */
1122#define CONF_TX_PWR_COMPENSATION_LEN_2 7
1123#define CONF_TX_PWR_COMPENSATION_LEN_5 18
1124
1125struct conf_rf_settings {
1126 /*
1127 * Per channel power compensation for 2.4GHz
1128 *
1129 * Range: s8
1130 */
1131 u8 tx_per_channel_power_compensation_2[CONF_TX_PWR_COMPENSATION_LEN_2];
1132
1133 /*
1134 * Per channel power compensation for 5GHz
1135 *
1136 * Range: s8
1137 */
1138 u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
1139};
1140
1141struct conf_ht_setting { 1107struct conf_ht_setting {
1142 u8 rx_ba_win_size; 1108 u8 rx_ba_win_size;
1143 u8 tx_ba_win_size; 1109 u8 tx_ba_win_size;
@@ -1286,7 +1252,7 @@ struct conf_hangover_settings {
1286 u8 window_size; 1252 u8 window_size;
1287}; 1253};
1288 1254
1289struct conf_drv_settings { 1255struct wlcore_conf {
1290 struct conf_sg_settings sg; 1256 struct conf_sg_settings sg;
1291 struct conf_rx_settings rx; 1257 struct conf_rx_settings rx;
1292 struct conf_tx_settings tx; 1258 struct conf_tx_settings tx;
@@ -1296,16 +1262,13 @@ struct conf_drv_settings {
1296 struct conf_roam_trigger_settings roam_trigger; 1262 struct conf_roam_trigger_settings roam_trigger;
1297 struct conf_scan_settings scan; 1263 struct conf_scan_settings scan;
1298 struct conf_sched_scan_settings sched_scan; 1264 struct conf_sched_scan_settings sched_scan;
1299 struct conf_rf_settings rf;
1300 struct conf_ht_setting ht; 1265 struct conf_ht_setting ht;
1301 struct conf_memory_settings mem_wl127x; 1266 struct conf_memory_settings mem;
1302 struct conf_memory_settings mem_wl128x;
1303 struct conf_fm_coex fm_coex; 1267 struct conf_fm_coex fm_coex;
1304 struct conf_rx_streaming_settings rx_streaming; 1268 struct conf_rx_streaming_settings rx_streaming;
1305 struct conf_fwlog fwlog; 1269 struct conf_fwlog fwlog;
1306 struct conf_rate_policy_settings rate; 1270 struct conf_rate_policy_settings rate;
1307 struct conf_hangover_settings hangover; 1271 struct conf_hangover_settings hangover;
1308 u8 hci_io_ds;
1309}; 1272};
1310 1273
1311#endif 1274#endif
diff --git a/drivers/net/wireless/wl12xx/debug.h b/drivers/net/wireless/ti/wlcore/debug.h
index ec0fdc25b280..6b800b3cbea5 100644
--- a/drivers/net/wireless/wl12xx/debug.h
+++ b/drivers/net/wireless/ti/wlcore/debug.h
@@ -52,6 +52,7 @@ enum {
52 DEBUG_ADHOC = BIT(16), 52 DEBUG_ADHOC = BIT(16),
53 DEBUG_AP = BIT(17), 53 DEBUG_AP = BIT(17),
54 DEBUG_PROBE = BIT(18), 54 DEBUG_PROBE = BIT(18),
55 DEBUG_IO = BIT(19),
55 DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP), 56 DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP),
56 DEBUG_ALL = ~0, 57 DEBUG_ALL = ~0,
57}; 58};
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index 564d49575c94..d5aea1ff5ad1 100644
--- a/drivers/net/wireless/wl12xx/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -26,7 +26,7 @@
26#include <linux/skbuff.h> 26#include <linux/skbuff.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28 28
29#include "wl12xx.h" 29#include "wlcore.h"
30#include "debug.h" 30#include "debug.h"
31#include "acx.h" 31#include "acx.h"
32#include "ps.h" 32#include "ps.h"
@@ -647,6 +647,7 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
647 VIF_STATE_PRINT_INT(last_rssi_event); 647 VIF_STATE_PRINT_INT(last_rssi_event);
648 VIF_STATE_PRINT_INT(ba_support); 648 VIF_STATE_PRINT_INT(ba_support);
649 VIF_STATE_PRINT_INT(ba_allowed); 649 VIF_STATE_PRINT_INT(ba_allowed);
650 VIF_STATE_PRINT_INT(is_gem);
650 VIF_STATE_PRINT_LLHEX(tx_security_seq); 651 VIF_STATE_PRINT_LLHEX(tx_security_seq);
651 VIF_STATE_PRINT_INT(tx_security_last_seq_lsb); 652 VIF_STATE_PRINT_INT(tx_security_last_seq_lsb);
652 } 653 }
diff --git a/drivers/net/wireless/wl12xx/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
index 254c5b292cf6..a8d3aef011ff 100644
--- a/drivers/net/wireless/wl12xx/debugfs.h
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
@@ -24,7 +24,7 @@
24#ifndef __DEBUGFS_H__ 24#ifndef __DEBUGFS_H__
25#define __DEBUGFS_H__ 25#define __DEBUGFS_H__
26 26
27#include "wl12xx.h" 27#include "wlcore.h"
28 28
29int wl1271_debugfs_init(struct wl1271 *wl); 29int wl1271_debugfs_init(struct wl1271 *wl);
30void wl1271_debugfs_exit(struct wl1271 *wl); 30void wl1271_debugfs_exit(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/ti/wlcore/event.c
index c953717f38eb..292632ddf890 100644
--- a/drivers/net/wireless/wl12xx/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -21,9 +21,8 @@
21 * 21 *
22 */ 22 */
23 23
24#include "wl12xx.h" 24#include "wlcore.h"
25#include "debug.h" 25#include "debug.h"
26#include "reg.h"
27#include "io.h" 26#include "io.h"
28#include "event.h" 27#include "event.h"
29#include "ps.h" 28#include "ps.h"
@@ -98,8 +97,9 @@ static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
98 wl1271_debug(DEBUG_EVENT, "\tmask: 0x%x", mbox->events_mask); 97 wl1271_debug(DEBUG_EVENT, "\tmask: 0x%x", mbox->events_mask);
99} 98}
100 99
101static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) 100static int wl1271_event_process(struct wl1271 *wl)
102{ 101{
102 struct event_mailbox *mbox = wl->mbox;
103 struct ieee80211_vif *vif; 103 struct ieee80211_vif *vif;
104 struct wl12xx_vif *wlvif; 104 struct wl12xx_vif *wlvif;
105 u32 vector; 105 u32 vector;
@@ -196,7 +196,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
196 bool success; 196 bool success;
197 197
198 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, 198 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
199 &wl->flags)) 199 &wlvif->flags))
200 continue; 200 continue;
201 201
202 success = mbox->channel_switch_status ? false : true; 202 success = mbox->channel_switch_status ? false : true;
@@ -278,18 +278,8 @@ int wl1271_event_unmask(struct wl1271 *wl)
278 return 0; 278 return 0;
279} 279}
280 280
281void wl1271_event_mbox_config(struct wl1271 *wl)
282{
283 wl->mbox_ptr[0] = wl1271_read32(wl, REG_EVENT_MAILBOX_PTR);
284 wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
285
286 wl1271_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x",
287 wl->mbox_ptr[0], wl->mbox_ptr[1]);
288}
289
290int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num) 281int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
291{ 282{
292 struct event_mailbox mbox;
293 int ret; 283 int ret;
294 284
295 wl1271_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num); 285 wl1271_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num);
@@ -298,16 +288,19 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
298 return -EINVAL; 288 return -EINVAL;
299 289
300 /* first we read the mbox descriptor */ 290 /* first we read the mbox descriptor */
301 wl1271_read(wl, wl->mbox_ptr[mbox_num], &mbox, 291 wl1271_read(wl, wl->mbox_ptr[mbox_num], wl->mbox,
302 sizeof(struct event_mailbox), false); 292 sizeof(*wl->mbox), false);
303 293
304 /* process the descriptor */ 294 /* process the descriptor */
305 ret = wl1271_event_process(wl, &mbox); 295 ret = wl1271_event_process(wl);
306 if (ret < 0) 296 if (ret < 0)
307 return ret; 297 return ret;
308 298
309 /* then we let the firmware know it can go on...*/ 299 /*
310 wl1271_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK); 300 * TODO: we just need this because one bit is in a different
301 * place. Is there any better way?
302 */
303 wl->ops->ack_event(wl);
311 304
312 return 0; 305 return 0;
313} 306}
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/ti/wlcore/event.h
index 057d193d3525..8adf18d6c58f 100644
--- a/drivers/net/wireless/wl12xx/event.h
+++ b/drivers/net/wireless/ti/wlcore/event.h
@@ -132,8 +132,9 @@ struct event_mailbox {
132 u8 reserved_8[9]; 132 u8 reserved_8[9];
133} __packed; 133} __packed;
134 134
135struct wl1271;
136
135int wl1271_event_unmask(struct wl1271 *wl); 137int wl1271_event_unmask(struct wl1271 *wl);
136void wl1271_event_mbox_config(struct wl1271 *wl);
137int wl1271_event_handle(struct wl1271 *wl, u8 mbox); 138int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
138 139
139#endif 140#endif
diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h
new file mode 100644
index 000000000000..9384b4d56c24
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/hw_ops.h
@@ -0,0 +1,122 @@
1/*
2 * This file is part of wlcore
3 *
4 * Copyright (C) 2011 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WLCORE_HW_OPS_H__
23#define __WLCORE_HW_OPS_H__
24
25#include "wlcore.h"
26#include "rx.h"
27
28static inline u32
29wlcore_hw_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks)
30{
31 if (!wl->ops->calc_tx_blocks)
32 BUG_ON(1);
33
34 return wl->ops->calc_tx_blocks(wl, len, spare_blks);
35}
36
37static inline void
38wlcore_hw_set_tx_desc_blocks(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
39 u32 blks, u32 spare_blks)
40{
41 if (!wl->ops->set_tx_desc_blocks)
42 BUG_ON(1);
43
44 return wl->ops->set_tx_desc_blocks(wl, desc, blks, spare_blks);
45}
46
47static inline void
48wlcore_hw_set_tx_desc_data_len(struct wl1271 *wl,
49 struct wl1271_tx_hw_descr *desc,
50 struct sk_buff *skb)
51{
52 if (!wl->ops->set_tx_desc_data_len)
53 BUG_ON(1);
54
55 wl->ops->set_tx_desc_data_len(wl, desc, skb);
56}
57
58static inline enum wl_rx_buf_align
59wlcore_hw_get_rx_buf_align(struct wl1271 *wl, u32 rx_desc)
60{
61
62 if (!wl->ops->get_rx_buf_align)
63 BUG_ON(1);
64
65 return wl->ops->get_rx_buf_align(wl, rx_desc);
66}
67
68static inline void
69wlcore_hw_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
70{
71 if (wl->ops->prepare_read)
72 wl->ops->prepare_read(wl, rx_desc, len);
73}
74
75static inline u32
76wlcore_hw_get_rx_packet_len(struct wl1271 *wl, void *rx_data, u32 data_len)
77{
78 if (!wl->ops->get_rx_packet_len)
79 BUG_ON(1);
80
81 return wl->ops->get_rx_packet_len(wl, rx_data, data_len);
82}
83
84static inline void wlcore_hw_tx_delayed_compl(struct wl1271 *wl)
85{
86 if (wl->ops->tx_delayed_compl)
87 wl->ops->tx_delayed_compl(wl);
88}
89
90static inline void wlcore_hw_tx_immediate_compl(struct wl1271 *wl)
91{
92 if (wl->ops->tx_immediate_compl)
93 wl->ops->tx_immediate_compl(wl);
94}
95
96static inline int
97wlcore_hw_init_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
98{
99 if (wl->ops->init_vif)
100 return wl->ops->init_vif(wl, wlvif);
101
102 return 0;
103}
104
105static inline u32
106wlcore_hw_sta_get_ap_rate_mask(struct wl1271 *wl, struct wl12xx_vif *wlvif)
107{
108 if (!wl->ops->sta_get_ap_rate_mask)
109 BUG_ON(1);
110
111 return wl->ops->sta_get_ap_rate_mask(wl, wlvif);
112}
113
114static inline int wlcore_identify_fw(struct wl1271 *wl)
115{
116 if (wl->ops->identify_fw)
117 return wl->ops->identify_fw(wl);
118
119 return 0;
120}
121
122#endif
diff --git a/drivers/net/wireless/wl12xx/ini.h b/drivers/net/wireless/ti/wlcore/ini.h
index 4cf9ecc56212..4cf9ecc56212 100644
--- a/drivers/net/wireless/wl12xx/ini.h
+++ b/drivers/net/wireless/ti/wlcore/ini.h
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 203fbebf09eb..9f89255eb6e6 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -30,9 +30,9 @@
30#include "wl12xx_80211.h" 30#include "wl12xx_80211.h"
31#include "acx.h" 31#include "acx.h"
32#include "cmd.h" 32#include "cmd.h"
33#include "reg.h"
34#include "tx.h" 33#include "tx.h"
35#include "io.h" 34#include "io.h"
35#include "hw_ops.h"
36 36
37int wl1271_init_templates_config(struct wl1271 *wl) 37int wl1271_init_templates_config(struct wl1271 *wl)
38{ 38{
@@ -319,7 +319,7 @@ static int wl12xx_init_fwlog(struct wl1271 *wl)
319{ 319{
320 int ret; 320 int ret;
321 321
322 if (wl->quirks & WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED) 322 if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
323 return 0; 323 return 0;
324 324
325 ret = wl12xx_cmd_config_fwlog(wl); 325 ret = wl12xx_cmd_config_fwlog(wl);
@@ -494,26 +494,6 @@ static int wl1271_set_ba_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
494 return wl12xx_acx_set_ba_initiator_policy(wl, wlvif); 494 return wl12xx_acx_set_ba_initiator_policy(wl, wlvif);
495} 495}
496 496
497int wl1271_chip_specific_init(struct wl1271 *wl)
498{
499 int ret = 0;
500
501 if (wl->chip.id == CHIP_ID_1283_PG20) {
502 u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE;
503
504 if (!(wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT))
505 /* Enable SDIO padding */
506 host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
507
508 /* Must be before wl1271_acx_init_mem_config() */
509 ret = wl1271_acx_host_if_cfg_bitmap(wl, host_cfg_bitmap);
510 if (ret < 0)
511 goto out;
512 }
513out:
514 return ret;
515}
516
517/* vif-specifc initialization */ 497/* vif-specifc initialization */
518static int wl12xx_init_sta_role(struct wl1271 *wl, struct wl12xx_vif *wlvif) 498static int wl12xx_init_sta_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
519{ 499{
@@ -582,10 +562,17 @@ int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
582 if (ret < 0) 562 if (ret < 0)
583 return ret; 563 return ret;
584 } else if (!wl->sta_count) { 564 } else if (!wl->sta_count) {
585 /* Configure for ELP power saving */ 565 if (wl->quirks & WLCORE_QUIRK_NO_ELP) {
586 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP); 566 /* Configure for power always on */
587 if (ret < 0) 567 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
588 return ret; 568 if (ret < 0)
569 return ret;
570 } else {
571 /* Configure for ELP power saving */
572 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
573 if (ret < 0)
574 return ret;
575 }
589 } 576 }
590 } 577 }
591 578
@@ -652,6 +639,10 @@ int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
652 if (ret < 0) 639 if (ret < 0)
653 return ret; 640 return ret;
654 641
642 ret = wlcore_hw_init_vif(wl, wlvif);
643 if (ret < 0)
644 return ret;
645
655 return 0; 646 return 0;
656} 647}
657 648
@@ -659,27 +650,8 @@ int wl1271_hw_init(struct wl1271 *wl)
659{ 650{
660 int ret; 651 int ret;
661 652
662 if (wl->chip.id == CHIP_ID_1283_PG20) { 653 /* Chip-specific hw init */
663 ret = wl128x_cmd_general_parms(wl); 654 ret = wl->ops->hw_init(wl);
664 if (ret < 0)
665 return ret;
666 ret = wl128x_cmd_radio_parms(wl);
667 if (ret < 0)
668 return ret;
669 } else {
670 ret = wl1271_cmd_general_parms(wl);
671 if (ret < 0)
672 return ret;
673 ret = wl1271_cmd_radio_parms(wl);
674 if (ret < 0)
675 return ret;
676 ret = wl1271_cmd_ext_radio_parms(wl);
677 if (ret < 0)
678 return ret;
679 }
680
681 /* Chip-specific init */
682 ret = wl1271_chip_specific_init(wl);
683 if (ret < 0) 655 if (ret < 0)
684 return ret; 656 return ret;
685 657
diff --git a/drivers/net/wireless/wl12xx/init.h b/drivers/net/wireless/ti/wlcore/init.h
index 2da0f404ef6e..a45fbfddec19 100644
--- a/drivers/net/wireless/wl12xx/init.h
+++ b/drivers/net/wireless/ti/wlcore/init.h
@@ -24,7 +24,7 @@
24#ifndef __INIT_H__ 24#ifndef __INIT_H__
25#define __INIT_H__ 25#define __INIT_H__
26 26
27#include "wl12xx.h" 27#include "wlcore.h"
28 28
29int wl1271_hw_init_power_auth(struct wl1271 *wl); 29int wl1271_hw_init_power_auth(struct wl1271 *wl);
30int wl1271_init_templates_config(struct wl1271 *wl); 30int wl1271_init_templates_config(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/io.c b/drivers/net/wireless/ti/wlcore/io.c
index c574a3b31e31..7cd0081aede5 100644
--- a/drivers/net/wireless/wl12xx/io.c
+++ b/drivers/net/wireless/ti/wlcore/io.c
@@ -26,84 +26,12 @@
26#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28 28
29#include "wl12xx.h" 29#include "wlcore.h"
30#include "debug.h" 30#include "debug.h"
31#include "wl12xx_80211.h" 31#include "wl12xx_80211.h"
32#include "io.h" 32#include "io.h"
33#include "tx.h" 33#include "tx.h"
34 34
35#define OCP_CMD_LOOP 32
36
37#define OCP_CMD_WRITE 0x1
38#define OCP_CMD_READ 0x2
39
40#define OCP_READY_MASK BIT(18)
41#define OCP_STATUS_MASK (BIT(16) | BIT(17))
42
43#define OCP_STATUS_NO_RESP 0x00000
44#define OCP_STATUS_OK 0x10000
45#define OCP_STATUS_REQ_FAILED 0x20000
46#define OCP_STATUS_RESP_ERROR 0x30000
47
48struct wl1271_partition_set wl12xx_part_table[PART_TABLE_LEN] = {
49 [PART_DOWN] = {
50 .mem = {
51 .start = 0x00000000,
52 .size = 0x000177c0
53 },
54 .reg = {
55 .start = REGISTERS_BASE,
56 .size = 0x00008800
57 },
58 .mem2 = {
59 .start = 0x00000000,
60 .size = 0x00000000
61 },
62 .mem3 = {
63 .start = 0x00000000,
64 .size = 0x00000000
65 },
66 },
67
68 [PART_WORK] = {
69 .mem = {
70 .start = 0x00040000,
71 .size = 0x00014fc0
72 },
73 .reg = {
74 .start = REGISTERS_BASE,
75 .size = 0x0000a000
76 },
77 .mem2 = {
78 .start = 0x003004f8,
79 .size = 0x00000004
80 },
81 .mem3 = {
82 .start = 0x00040404,
83 .size = 0x00000000
84 },
85 },
86
87 [PART_DRPW] = {
88 .mem = {
89 .start = 0x00040000,
90 .size = 0x00014fc0
91 },
92 .reg = {
93 .start = DRPW_BASE,
94 .size = 0x00006000
95 },
96 .mem2 = {
97 .start = 0x00000000,
98 .size = 0x00000000
99 },
100 .mem3 = {
101 .start = 0x00000000,
102 .size = 0x00000000
103 }
104 }
105};
106
107bool wl1271_set_block_size(struct wl1271 *wl) 35bool wl1271_set_block_size(struct wl1271 *wl)
108{ 36{
109 if (wl->if_ops->set_block_size) { 37 if (wl->if_ops->set_block_size) {
@@ -114,17 +42,53 @@ bool wl1271_set_block_size(struct wl1271 *wl)
114 return false; 42 return false;
115} 43}
116 44
117void wl1271_disable_interrupts(struct wl1271 *wl) 45void wlcore_disable_interrupts(struct wl1271 *wl)
118{ 46{
119 disable_irq(wl->irq); 47 disable_irq(wl->irq);
120} 48}
49EXPORT_SYMBOL_GPL(wlcore_disable_interrupts);
121 50
122void wl1271_enable_interrupts(struct wl1271 *wl) 51void wlcore_enable_interrupts(struct wl1271 *wl)
123{ 52{
124 enable_irq(wl->irq); 53 enable_irq(wl->irq);
125} 54}
55EXPORT_SYMBOL_GPL(wlcore_enable_interrupts);
126 56
127/* Set the SPI partitions to access the chip addresses 57int wlcore_translate_addr(struct wl1271 *wl, int addr)
58{
59 struct wlcore_partition_set *part = &wl->curr_part;
60
61 /*
62 * To translate, first check to which window of addresses the
63 * particular address belongs. Then subtract the starting address
64 * of that window from the address. Then, add offset of the
65 * translated region.
66 *
67 * The translated regions occur next to each other in physical device
68 * memory, so just add the sizes of the preceding address regions to
69 * get the offset to the new region.
70 */
71 if ((addr >= part->mem.start) &&
72 (addr < part->mem.start + part->mem.size))
73 return addr - part->mem.start;
74 else if ((addr >= part->reg.start) &&
75 (addr < part->reg.start + part->reg.size))
76 return addr - part->reg.start + part->mem.size;
77 else if ((addr >= part->mem2.start) &&
78 (addr < part->mem2.start + part->mem2.size))
79 return addr - part->mem2.start + part->mem.size +
80 part->reg.size;
81 else if ((addr >= part->mem3.start) &&
82 (addr < part->mem3.start + part->mem3.size))
83 return addr - part->mem3.start + part->mem.size +
84 part->reg.size + part->mem2.size;
85
86 WARN(1, "HW address 0x%x out of range", addr);
87 return 0;
88}
89EXPORT_SYMBOL_GPL(wlcore_translate_addr);
90
91/* Set the partitions to access the chip addresses
128 * 92 *
129 * To simplify driver code, a fixed (virtual) memory map is defined for 93 * To simplify driver code, a fixed (virtual) memory map is defined for
130 * register and memory addresses. Because in the chipset, in different stages 94 * register and memory addresses. Because in the chipset, in different stages
@@ -158,33 +122,43 @@ void wl1271_enable_interrupts(struct wl1271 *wl)
158 * | | 122 * | |
159 * 123 *
160 */ 124 */
161int wl1271_set_partition(struct wl1271 *wl, 125void wlcore_set_partition(struct wl1271 *wl,
162 struct wl1271_partition_set *p) 126 const struct wlcore_partition_set *p)
163{ 127{
164 /* copy partition info */ 128 /* copy partition info */
165 memcpy(&wl->part, p, sizeof(*p)); 129 memcpy(&wl->curr_part, p, sizeof(*p));
166 130
167 wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", 131 wl1271_debug(DEBUG_IO, "mem_start %08X mem_size %08X",
168 p->mem.start, p->mem.size); 132 p->mem.start, p->mem.size);
169 wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", 133 wl1271_debug(DEBUG_IO, "reg_start %08X reg_size %08X",
170 p->reg.start, p->reg.size); 134 p->reg.start, p->reg.size);
171 wl1271_debug(DEBUG_SPI, "mem2_start %08X mem2_size %08X", 135 wl1271_debug(DEBUG_IO, "mem2_start %08X mem2_size %08X",
172 p->mem2.start, p->mem2.size); 136 p->mem2.start, p->mem2.size);
173 wl1271_debug(DEBUG_SPI, "mem3_start %08X mem3_size %08X", 137 wl1271_debug(DEBUG_IO, "mem3_start %08X mem3_size %08X",
174 p->mem3.start, p->mem3.size); 138 p->mem3.start, p->mem3.size);
175 139
176 /* write partition info to the chipset */
177 wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start); 140 wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
178 wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size); 141 wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
179 wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start); 142 wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
180 wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size); 143 wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
181 wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start); 144 wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
182 wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size); 145 wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
146 /*
147 * We don't need the size of the last partition, as it is
148 * automatically calculated based on the total memory size and
149 * the sizes of the previous partitions.
150 */
183 wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start); 151 wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
152}
153EXPORT_SYMBOL_GPL(wlcore_set_partition);
184 154
185 return 0; 155void wlcore_select_partition(struct wl1271 *wl, u8 part)
156{
157 wl1271_debug(DEBUG_IO, "setting partition %d", part);
158
159 wlcore_set_partition(wl, &wl->ptable[part]);
186} 160}
187EXPORT_SYMBOL_GPL(wl1271_set_partition); 161EXPORT_SYMBOL_GPL(wlcore_select_partition);
188 162
189void wl1271_io_reset(struct wl1271 *wl) 163void wl1271_io_reset(struct wl1271 *wl)
190{ 164{
@@ -197,48 +171,3 @@ void wl1271_io_init(struct wl1271 *wl)
197 if (wl->if_ops->init) 171 if (wl->if_ops->init)
198 wl->if_ops->init(wl->dev); 172 wl->if_ops->init(wl->dev);
199} 173}
200
201void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
202{
203 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
204 addr = (addr >> 1) + 0x30000;
205 wl1271_write32(wl, OCP_POR_CTR, addr);
206
207 /* write value to OCP_POR_WDATA */
208 wl1271_write32(wl, OCP_DATA_WRITE, val);
209
210 /* write 1 to OCP_CMD */
211 wl1271_write32(wl, OCP_CMD, OCP_CMD_WRITE);
212}
213
214u16 wl1271_top_reg_read(struct wl1271 *wl, int addr)
215{
216 u32 val;
217 int timeout = OCP_CMD_LOOP;
218
219 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
220 addr = (addr >> 1) + 0x30000;
221 wl1271_write32(wl, OCP_POR_CTR, addr);
222
223 /* write 2 to OCP_CMD */
224 wl1271_write32(wl, OCP_CMD, OCP_CMD_READ);
225
226 /* poll for data ready */
227 do {
228 val = wl1271_read32(wl, OCP_DATA_READ);
229 } while (!(val & OCP_READY_MASK) && --timeout);
230
231 if (!timeout) {
232 wl1271_warning("Top register access timed out.");
233 return 0xffff;
234 }
235
236 /* check data status and return if OK */
237 if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK)
238 return val & 0xffff;
239 else {
240 wl1271_warning("Top register access returned error.");
241 return 0xffff;
242 }
243}
244
diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 4fb3dab8c3b2..8942954b56a0 100644
--- a/drivers/net/wireless/wl12xx/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -26,7 +26,6 @@
26#define __IO_H__ 26#define __IO_H__
27 27
28#include <linux/irqreturn.h> 28#include <linux/irqreturn.h>
29#include "reg.h"
30 29
31#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0 30#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0
32 31
@@ -43,15 +42,14 @@
43 42
44#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000 43#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000
45 44
46extern struct wl1271_partition_set wl12xx_part_table[PART_TABLE_LEN];
47
48struct wl1271; 45struct wl1271;
49 46
50void wl1271_disable_interrupts(struct wl1271 *wl); 47void wlcore_disable_interrupts(struct wl1271 *wl);
51void wl1271_enable_interrupts(struct wl1271 *wl); 48void wlcore_enable_interrupts(struct wl1271 *wl);
52 49
53void wl1271_io_reset(struct wl1271 *wl); 50void wl1271_io_reset(struct wl1271 *wl);
54void wl1271_io_init(struct wl1271 *wl); 51void wl1271_io_init(struct wl1271 *wl);
52int wlcore_translate_addr(struct wl1271 *wl, int addr);
55 53
56/* Raw target IO, address is not translated */ 54/* Raw target IO, address is not translated */
57static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf, 55static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
@@ -66,6 +64,18 @@ static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
66 wl->if_ops->read(wl->dev, addr, buf, len, fixed); 64 wl->if_ops->read(wl->dev, addr, buf, len, fixed);
67} 65}
68 66
67static inline void wlcore_raw_read_data(struct wl1271 *wl, int reg, void *buf,
68 size_t len, bool fixed)
69{
70 wl1271_raw_read(wl, wl->rtable[reg], buf, len, fixed);
71}
72
73static inline void wlcore_raw_write_data(struct wl1271 *wl, int reg, void *buf,
74 size_t len, bool fixed)
75{
76 wl1271_raw_write(wl, wl->rtable[reg], buf, len, fixed);
77}
78
69static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr) 79static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
70{ 80{
71 wl1271_raw_read(wl, addr, &wl->buffer_32, 81 wl1271_raw_read(wl, addr, &wl->buffer_32,
@@ -81,36 +91,12 @@ static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val)
81 sizeof(wl->buffer_32), false); 91 sizeof(wl->buffer_32), false);
82} 92}
83 93
84/* Translated target IO */
85static inline int wl1271_translate_addr(struct wl1271 *wl, int addr)
86{
87 /*
88 * To translate, first check to which window of addresses the
89 * particular address belongs. Then subtract the starting address
90 * of that window from the address. Then, add offset of the
91 * translated region.
92 *
93 * The translated regions occur next to each other in physical device
94 * memory, so just add the sizes of the preceding address regions to
95 * get the offset to the new region.
96 *
97 * Currently, only the two first regions are addressed, and the
98 * assumption is that all addresses will fall into either of those
99 * two.
100 */
101 if ((addr >= wl->part.reg.start) &&
102 (addr < wl->part.reg.start + wl->part.reg.size))
103 return addr - wl->part.reg.start + wl->part.mem.size;
104 else
105 return addr - wl->part.mem.start;
106}
107
108static inline void wl1271_read(struct wl1271 *wl, int addr, void *buf, 94static inline void wl1271_read(struct wl1271 *wl, int addr, void *buf,
109 size_t len, bool fixed) 95 size_t len, bool fixed)
110{ 96{
111 int physical; 97 int physical;
112 98
113 physical = wl1271_translate_addr(wl, addr); 99 physical = wlcore_translate_addr(wl, addr);
114 100
115 wl1271_raw_read(wl, physical, buf, len, fixed); 101 wl1271_raw_read(wl, physical, buf, len, fixed);
116} 102}
@@ -120,11 +106,23 @@ static inline void wl1271_write(struct wl1271 *wl, int addr, void *buf,
120{ 106{
121 int physical; 107 int physical;
122 108
123 physical = wl1271_translate_addr(wl, addr); 109 physical = wlcore_translate_addr(wl, addr);
124 110
125 wl1271_raw_write(wl, physical, buf, len, fixed); 111 wl1271_raw_write(wl, physical, buf, len, fixed);
126} 112}
127 113
114static inline void wlcore_write_data(struct wl1271 *wl, int reg, void *buf,
115 size_t len, bool fixed)
116{
117 wl1271_write(wl, wl->rtable[reg], buf, len, fixed);
118}
119
120static inline void wlcore_read_data(struct wl1271 *wl, int reg, void *buf,
121 size_t len, bool fixed)
122{
123 wl1271_read(wl, wl->rtable[reg], buf, len, fixed);
124}
125
128static inline void wl1271_read_hwaddr(struct wl1271 *wl, int hwaddr, 126static inline void wl1271_read_hwaddr(struct wl1271 *wl, int hwaddr,
129 void *buf, size_t len, bool fixed) 127 void *buf, size_t len, bool fixed)
130{ 128{
@@ -134,19 +132,30 @@ static inline void wl1271_read_hwaddr(struct wl1271 *wl, int hwaddr,
134 /* Addresses are stored internally as addresses to 32 bytes blocks */ 132 /* Addresses are stored internally as addresses to 32 bytes blocks */
135 addr = hwaddr << 5; 133 addr = hwaddr << 5;
136 134
137 physical = wl1271_translate_addr(wl, addr); 135 physical = wlcore_translate_addr(wl, addr);
138 136
139 wl1271_raw_read(wl, physical, buf, len, fixed); 137 wl1271_raw_read(wl, physical, buf, len, fixed);
140} 138}
141 139
142static inline u32 wl1271_read32(struct wl1271 *wl, int addr) 140static inline u32 wl1271_read32(struct wl1271 *wl, int addr)
143{ 141{
144 return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr)); 142 return wl1271_raw_read32(wl, wlcore_translate_addr(wl, addr));
145} 143}
146 144
147static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val) 145static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
148{ 146{
149 wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val); 147 wl1271_raw_write32(wl, wlcore_translate_addr(wl, addr), val);
148}
149
150static inline u32 wlcore_read_reg(struct wl1271 *wl, int reg)
151{
152 return wl1271_raw_read32(wl,
153 wlcore_translate_addr(wl, wl->rtable[reg]));
154}
155
156static inline void wlcore_write_reg(struct wl1271 *wl, int reg, u32 val)
157{
158 wl1271_raw_write32(wl, wlcore_translate_addr(wl, wl->rtable[reg]), val);
150} 159}
151 160
152static inline void wl1271_power_off(struct wl1271 *wl) 161static inline void wl1271_power_off(struct wl1271 *wl)
@@ -164,13 +173,8 @@ static inline int wl1271_power_on(struct wl1271 *wl)
164 return ret; 173 return ret;
165} 174}
166 175
167 176void wlcore_set_partition(struct wl1271 *wl,
168/* Top Register IO */ 177 const struct wlcore_partition_set *p);
169void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
170u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
171
172int wl1271_set_partition(struct wl1271 *wl,
173 struct wl1271_partition_set *p);
174 178
175bool wl1271_set_block_size(struct wl1271 *wl); 179bool wl1271_set_block_size(struct wl1271 *wl);
176 180
@@ -178,4 +182,6 @@ bool wl1271_set_block_size(struct wl1271 *wl);
178 182
179int wl1271_tx_dummy_packet(struct wl1271 *wl); 183int wl1271_tx_dummy_packet(struct wl1271 *wl);
180 184
185void wlcore_select_partition(struct wl1271 *wl, u8 part);
186
181#endif 187#endif
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 39002363611e..2b0f987660c6 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -35,10 +35,9 @@
35#include <linux/sched.h> 35#include <linux/sched.h>
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37 37
38#include "wl12xx.h" 38#include "wlcore.h"
39#include "debug.h" 39#include "debug.h"
40#include "wl12xx_80211.h" 40#include "wl12xx_80211.h"
41#include "reg.h"
42#include "io.h" 41#include "io.h"
43#include "event.h" 42#include "event.h"
44#include "tx.h" 43#include "tx.h"
@@ -50,342 +49,15 @@
50#include "boot.h" 49#include "boot.h"
51#include "testmode.h" 50#include "testmode.h"
52#include "scan.h" 51#include "scan.h"
52#include "hw_ops.h"
53 53
54#define WL1271_BOOT_RETRIES 3 54#define WL1271_BOOT_RETRIES 3
55 55
56static struct conf_drv_settings default_conf = { 56#define WL1271_BOOT_RETRIES 3
57 .sg = {
58 .params = {
59 [CONF_SG_ACL_BT_MASTER_MIN_BR] = 10,
60 [CONF_SG_ACL_BT_MASTER_MAX_BR] = 180,
61 [CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10,
62 [CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180,
63 [CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10,
64 [CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80,
65 [CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10,
66 [CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80,
67 [CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8,
68 [CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8,
69 [CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20,
70 [CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20,
71 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20,
72 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35,
73 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16,
74 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35,
75 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32,
76 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50,
77 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28,
78 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50,
79 [CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10,
80 [CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20,
81 [CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75,
82 [CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15,
83 [CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27,
84 [CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17,
85 /* active scan params */
86 [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
87 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
88 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
89 /* passive scan params */
90 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR] = 800,
91 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR] = 200,
92 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
93 /* passive scan in dual antenna params */
94 [CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0,
95 [CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN] = 0,
96 [CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN] = 0,
97 /* general params */
98 [CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1,
99 [CONF_SG_ANTENNA_CONFIGURATION] = 0,
100 [CONF_SG_BEACON_MISS_PERCENT] = 60,
101 [CONF_SG_DHCP_TIME] = 5000,
102 [CONF_SG_RXT] = 1200,
103 [CONF_SG_TXT] = 1000,
104 [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
105 [CONF_SG_GENERAL_USAGE_BIT_MAP] = 3,
106 [CONF_SG_HV3_MAX_SERVED] = 6,
107 [CONF_SG_PS_POLL_TIMEOUT] = 10,
108 [CONF_SG_UPSD_TIMEOUT] = 10,
109 [CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2,
110 [CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5,
111 [CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30,
112 /* AP params */
113 [CONF_AP_BEACON_MISS_TX] = 3,
114 [CONF_AP_RX_WINDOW_AFTER_BEACON] = 10,
115 [CONF_AP_BEACON_WINDOW_INTERVAL] = 2,
116 [CONF_AP_CONNECTION_PROTECTION_TIME] = 0,
117 [CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25,
118 [CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25,
119 /* CTS Diluting params */
120 [CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0,
121 [CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0,
122 },
123 .state = CONF_SG_PROTECTIVE,
124 },
125 .rx = {
126 .rx_msdu_life_time = 512000,
127 .packet_detection_threshold = 0,
128 .ps_poll_timeout = 15,
129 .upsd_timeout = 15,
130 .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD,
131 .rx_cca_threshold = 0,
132 .irq_blk_threshold = 0xFFFF,
133 .irq_pkt_threshold = 0,
134 .irq_timeout = 600,
135 .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
136 },
137 .tx = {
138 .tx_energy_detection = 0,
139 .sta_rc_conf = {
140 .enabled_rates = 0,
141 .short_retry_limit = 10,
142 .long_retry_limit = 10,
143 .aflags = 0,
144 },
145 .ac_conf_count = 4,
146 .ac_conf = {
147 [CONF_TX_AC_BE] = {
148 .ac = CONF_TX_AC_BE,
149 .cw_min = 15,
150 .cw_max = 63,
151 .aifsn = 3,
152 .tx_op_limit = 0,
153 },
154 [CONF_TX_AC_BK] = {
155 .ac = CONF_TX_AC_BK,
156 .cw_min = 15,
157 .cw_max = 63,
158 .aifsn = 7,
159 .tx_op_limit = 0,
160 },
161 [CONF_TX_AC_VI] = {
162 .ac = CONF_TX_AC_VI,
163 .cw_min = 15,
164 .cw_max = 63,
165 .aifsn = CONF_TX_AIFS_PIFS,
166 .tx_op_limit = 3008,
167 },
168 [CONF_TX_AC_VO] = {
169 .ac = CONF_TX_AC_VO,
170 .cw_min = 15,
171 .cw_max = 63,
172 .aifsn = CONF_TX_AIFS_PIFS,
173 .tx_op_limit = 1504,
174 },
175 },
176 .max_tx_retries = 100,
177 .ap_aging_period = 300,
178 .tid_conf_count = 4,
179 .tid_conf = {
180 [CONF_TX_AC_BE] = {
181 .queue_id = CONF_TX_AC_BE,
182 .channel_type = CONF_CHANNEL_TYPE_EDCF,
183 .tsid = CONF_TX_AC_BE,
184 .ps_scheme = CONF_PS_SCHEME_LEGACY,
185 .ack_policy = CONF_ACK_POLICY_LEGACY,
186 .apsd_conf = {0, 0},
187 },
188 [CONF_TX_AC_BK] = {
189 .queue_id = CONF_TX_AC_BK,
190 .channel_type = CONF_CHANNEL_TYPE_EDCF,
191 .tsid = CONF_TX_AC_BK,
192 .ps_scheme = CONF_PS_SCHEME_LEGACY,
193 .ack_policy = CONF_ACK_POLICY_LEGACY,
194 .apsd_conf = {0, 0},
195 },
196 [CONF_TX_AC_VI] = {
197 .queue_id = CONF_TX_AC_VI,
198 .channel_type = CONF_CHANNEL_TYPE_EDCF,
199 .tsid = CONF_TX_AC_VI,
200 .ps_scheme = CONF_PS_SCHEME_LEGACY,
201 .ack_policy = CONF_ACK_POLICY_LEGACY,
202 .apsd_conf = {0, 0},
203 },
204 [CONF_TX_AC_VO] = {
205 .queue_id = CONF_TX_AC_VO,
206 .channel_type = CONF_CHANNEL_TYPE_EDCF,
207 .tsid = CONF_TX_AC_VO,
208 .ps_scheme = CONF_PS_SCHEME_LEGACY,
209 .ack_policy = CONF_ACK_POLICY_LEGACY,
210 .apsd_conf = {0, 0},
211 },
212 },
213 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
214 .tx_compl_timeout = 700,
215 .tx_compl_threshold = 4,
216 .basic_rate = CONF_HW_BIT_RATE_1MBPS,
217 .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
218 .tmpl_short_retry_limit = 10,
219 .tmpl_long_retry_limit = 10,
220 .tx_watchdog_timeout = 5000,
221 },
222 .conn = {
223 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
224 .listen_interval = 1,
225 .suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM,
226 .suspend_listen_interval = 3,
227 .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
228 .bcn_filt_ie_count = 2,
229 .bcn_filt_ie = {
230 [0] = {
231 .ie = WLAN_EID_CHANNEL_SWITCH,
232 .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
233 },
234 [1] = {
235 .ie = WLAN_EID_HT_INFORMATION,
236 .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
237 },
238 },
239 .synch_fail_thold = 10,
240 .bss_lose_timeout = 100,
241 .beacon_rx_timeout = 10000,
242 .broadcast_timeout = 20000,
243 .rx_broadcast_in_ps = 1,
244 .ps_poll_threshold = 10,
245 .bet_enable = CONF_BET_MODE_ENABLE,
246 .bet_max_consecutive = 50,
247 .psm_entry_retries = 8,
248 .psm_exit_retries = 16,
249 .psm_entry_nullfunc_retries = 3,
250 .dynamic_ps_timeout = 200,
251 .forced_ps = false,
252 .keep_alive_interval = 55000,
253 .max_listen_interval = 20,
254 },
255 .itrim = {
256 .enable = false,
257 .timeout = 50000,
258 },
259 .pm_config = {
260 .host_clk_settling_time = 5000,
261 .host_fast_wakeup_support = false
262 },
263 .roam_trigger = {
264 .trigger_pacing = 1,
265 .avg_weight_rssi_beacon = 20,
266 .avg_weight_rssi_data = 10,
267 .avg_weight_snr_beacon = 20,
268 .avg_weight_snr_data = 10,
269 },
270 .scan = {
271 .min_dwell_time_active = 7500,
272 .max_dwell_time_active = 30000,
273 .min_dwell_time_passive = 100000,
274 .max_dwell_time_passive = 100000,
275 .num_probe_reqs = 2,
276 .split_scan_timeout = 50000,
277 },
278 .sched_scan = {
279 /* sched_scan requires dwell times in TU instead of TU/1000 */
280 .min_dwell_time_active = 30,
281 .max_dwell_time_active = 60,
282 .dwell_time_passive = 100,
283 .dwell_time_dfs = 150,
284 .num_probe_reqs = 2,
285 .rssi_threshold = -90,
286 .snr_threshold = 0,
287 },
288 .rf = {
289 .tx_per_channel_power_compensation_2 = {
290 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
291 },
292 .tx_per_channel_power_compensation_5 = {
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
295 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
296 },
297 },
298 .ht = {
299 .rx_ba_win_size = 8,
300 .tx_ba_win_size = 64,
301 .inactivity_timeout = 10000,
302 .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
303 },
304 .mem_wl127x = {
305 .num_stations = 1,
306 .ssid_profiles = 1,
307 .rx_block_num = 70,
308 .tx_min_block_num = 40,
309 .dynamic_memory = 1,
310 .min_req_tx_blocks = 100,
311 .min_req_rx_blocks = 22,
312 .tx_min = 27,
313 },
314 .mem_wl128x = {
315 .num_stations = 1,
316 .ssid_profiles = 1,
317 .rx_block_num = 40,
318 .tx_min_block_num = 40,
319 .dynamic_memory = 1,
320 .min_req_tx_blocks = 45,
321 .min_req_rx_blocks = 22,
322 .tx_min = 27,
323 },
324 .fm_coex = {
325 .enable = true,
326 .swallow_period = 5,
327 .n_divider_fref_set_1 = 0xff, /* default */
328 .n_divider_fref_set_2 = 12,
329 .m_divider_fref_set_1 = 148,
330 .m_divider_fref_set_2 = 0xffff, /* default */
331 .coex_pll_stabilization_time = 0xffffffff, /* default */
332 .ldo_stabilization_time = 0xffff, /* default */
333 .fm_disturbed_band_margin = 0xff, /* default */
334 .swallow_clk_diff = 0xff, /* default */
335 },
336 .rx_streaming = {
337 .duration = 150,
338 .queues = 0x1,
339 .interval = 20,
340 .always = 0,
341 },
342 .fwlog = {
343 .mode = WL12XX_FWLOG_ON_DEMAND,
344 .mem_blocks = 2,
345 .severity = 0,
346 .timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED,
347 .output = WL12XX_FWLOG_OUTPUT_HOST,
348 .threshold = 0,
349 },
350 .hci_io_ds = HCI_IO_DS_6MA,
351 .rate = {
352 .rate_retry_score = 32000,
353 .per_add = 8192,
354 .per_th1 = 2048,
355 .per_th2 = 4096,
356 .max_per = 8100,
357 .inverse_curiosity_factor = 5,
358 .tx_fail_low_th = 4,
359 .tx_fail_high_th = 10,
360 .per_alpha_shift = 4,
361 .per_add_shift = 13,
362 .per_beta1_shift = 10,
363 .per_beta2_shift = 8,
364 .rate_check_up = 2,
365 .rate_check_down = 12,
366 .rate_retry_policy = {
367 0x00, 0x00, 0x00, 0x00, 0x00,
368 0x00, 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00,
370 },
371 },
372 .hangover = {
373 .recover_time = 0,
374 .hangover_period = 20,
375 .dynamic_mode = 1,
376 .early_termination_mode = 1,
377 .max_period = 20,
378 .min_period = 1,
379 .increase_delta = 1,
380 .decrease_delta = 2,
381 .quiet_time = 4,
382 .increase_time = 1,
383 .window_size = 16,
384 },
385};
386 57
387static char *fwlog_param; 58static char *fwlog_param;
388static bool bug_on_recovery; 59static bool bug_on_recovery;
60static bool no_recovery;
389 61
390static void __wl1271_op_remove_interface(struct wl1271 *wl, 62static void __wl1271_op_remove_interface(struct wl1271 *wl,
391 struct ieee80211_vif *vif, 63 struct ieee80211_vif *vif,
@@ -628,22 +300,8 @@ out:
628 mutex_unlock(&wl->mutex); 300 mutex_unlock(&wl->mutex);
629} 301}
630 302
631static void wl1271_conf_init(struct wl1271 *wl) 303static void wlcore_adjust_conf(struct wl1271 *wl)
632{ 304{
633
634 /*
635 * This function applies the default configuration to the driver. This
636 * function is invoked upon driver load (spi probe.)
637 *
638 * The configuration is stored in a run-time structure in order to
639 * facilitate for run-time adjustment of any of the parameters. Making
640 * changes to the configuration structure will apply the new values on
641 * the next interface up (wl1271_op_start.)
642 */
643
644 /* apply driver default configuration */
645 memcpy(&wl->conf, &default_conf, sizeof(default_conf));
646
647 /* Adjust settings according to optional module parameters */ 305 /* Adjust settings according to optional module parameters */
648 if (fwlog_param) { 306 if (fwlog_param) {
649 if (!strcmp(fwlog_param, "continuous")) { 307 if (!strcmp(fwlog_param, "continuous")) {
@@ -666,28 +324,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
666{ 324{
667 int ret; 325 int ret;
668 326
669 if (wl->chip.id == CHIP_ID_1283_PG20) 327 ret = wl->ops->hw_init(wl);
670 ret = wl128x_cmd_general_parms(wl);
671 else
672 ret = wl1271_cmd_general_parms(wl);
673 if (ret < 0)
674 return ret;
675
676 if (wl->chip.id == CHIP_ID_1283_PG20)
677 ret = wl128x_cmd_radio_parms(wl);
678 else
679 ret = wl1271_cmd_radio_parms(wl);
680 if (ret < 0)
681 return ret;
682
683 if (wl->chip.id != CHIP_ID_1283_PG20) {
684 ret = wl1271_cmd_ext_radio_parms(wl);
685 if (ret < 0)
686 return ret;
687 }
688
689 /* Chip-specific initializations */
690 ret = wl1271_chip_specific_init(wl);
691 if (ret < 0) 328 if (ret < 0)
692 return ret; 329 return ret;
693 330
@@ -750,7 +387,7 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
750 387
751static void wl12xx_irq_update_links_status(struct wl1271 *wl, 388static void wl12xx_irq_update_links_status(struct wl1271 *wl,
752 struct wl12xx_vif *wlvif, 389 struct wl12xx_vif *wlvif,
753 struct wl12xx_fw_status *status) 390 struct wl_fw_status *status)
754{ 391{
755 struct wl1271_link *lnk; 392 struct wl1271_link *lnk;
756 u32 cur_fw_ps_map; 393 u32 cur_fw_ps_map;
@@ -770,9 +407,10 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
770 407
771 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) { 408 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
772 lnk = &wl->links[hlid]; 409 lnk = &wl->links[hlid];
773 cnt = status->tx_lnk_free_pkts[hlid] - lnk->prev_freed_pkts; 410 cnt = status->counters.tx_lnk_free_pkts[hlid] -
411 lnk->prev_freed_pkts;
774 412
775 lnk->prev_freed_pkts = status->tx_lnk_free_pkts[hlid]; 413 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[hlid];
776 lnk->allocated_pkts -= cnt; 414 lnk->allocated_pkts -= cnt;
777 415
778 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid, 416 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
@@ -781,15 +419,19 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
781} 419}
782 420
783static void wl12xx_fw_status(struct wl1271 *wl, 421static void wl12xx_fw_status(struct wl1271 *wl,
784 struct wl12xx_fw_status *status) 422 struct wl_fw_status *status)
785{ 423{
786 struct wl12xx_vif *wlvif; 424 struct wl12xx_vif *wlvif;
787 struct timespec ts; 425 struct timespec ts;
788 u32 old_tx_blk_count = wl->tx_blocks_available; 426 u32 old_tx_blk_count = wl->tx_blocks_available;
789 int avail, freed_blocks; 427 int avail, freed_blocks;
790 int i; 428 int i;
429 size_t status_len;
430
431 status_len = sizeof(*status) + wl->fw_status_priv_len;
791 432
792 wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false); 433 wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status,
434 status_len, false);
793 435
794 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " 436 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
795 "drv_rx_counter = %d, tx_results_counter = %d)", 437 "drv_rx_counter = %d, tx_results_counter = %d)",
@@ -801,10 +443,10 @@ static void wl12xx_fw_status(struct wl1271 *wl,
801 for (i = 0; i < NUM_TX_QUEUES; i++) { 443 for (i = 0; i < NUM_TX_QUEUES; i++) {
802 /* prevent wrap-around in freed-packets counter */ 444 /* prevent wrap-around in freed-packets counter */
803 wl->tx_allocated_pkts[i] -= 445 wl->tx_allocated_pkts[i] -=
804 (status->tx_released_pkts[i] - 446 (status->counters.tx_released_pkts[i] -
805 wl->tx_pkts_freed[i]) & 0xff; 447 wl->tx_pkts_freed[i]) & 0xff;
806 448
807 wl->tx_pkts_freed[i] = status->tx_released_pkts[i]; 449 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
808 } 450 }
809 451
810 /* prevent wrap-around in total blocks counter */ 452 /* prevent wrap-around in total blocks counter */
@@ -927,6 +569,9 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
927 smp_mb__after_clear_bit(); 569 smp_mb__after_clear_bit();
928 570
929 wl12xx_fw_status(wl, wl->fw_status); 571 wl12xx_fw_status(wl, wl->fw_status);
572
573 wlcore_hw_tx_immediate_compl(wl);
574
930 intr = le32_to_cpu(wl->fw_status->intr); 575 intr = le32_to_cpu(wl->fw_status->intr);
931 intr &= WL1271_INTR_MASK; 576 intr &= WL1271_INTR_MASK;
932 if (!intr) { 577 if (!intr) {
@@ -963,9 +608,7 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
963 } 608 }
964 609
965 /* check for tx results */ 610 /* check for tx results */
966 if (wl->fw_status->tx_results_counter != 611 wlcore_hw_tx_delayed_compl(wl);
967 (wl->tx_results_count & 0xff))
968 wl1271_tx_complete(wl);
969 612
970 /* Make sure the deferred queues don't get too long */ 613 /* Make sure the deferred queues don't get too long */
971 defer_count = skb_queue_len(&wl->deferred_tx_queue) + 614 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
@@ -1046,10 +689,7 @@ static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
1046 689
1047 if (plt) { 690 if (plt) {
1048 fw_type = WL12XX_FW_TYPE_PLT; 691 fw_type = WL12XX_FW_TYPE_PLT;
1049 if (wl->chip.id == CHIP_ID_1283_PG20) 692 fw_name = wl->plt_fw_name;
1050 fw_name = WL128X_PLT_FW_NAME;
1051 else
1052 fw_name = WL127X_PLT_FW_NAME;
1053 } else { 693 } else {
1054 /* 694 /*
1055 * we can't call wl12xx_get_vif_count() here because 695 * we can't call wl12xx_get_vif_count() here because
@@ -1057,16 +697,10 @@ static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
1057 */ 697 */
1058 if (wl->last_vif_count > 1) { 698 if (wl->last_vif_count > 1) {
1059 fw_type = WL12XX_FW_TYPE_MULTI; 699 fw_type = WL12XX_FW_TYPE_MULTI;
1060 if (wl->chip.id == CHIP_ID_1283_PG20) 700 fw_name = wl->mr_fw_name;
1061 fw_name = WL128X_FW_NAME_MULTI;
1062 else
1063 fw_name = WL127X_FW_NAME_MULTI;
1064 } else { 701 } else {
1065 fw_type = WL12XX_FW_TYPE_NORMAL; 702 fw_type = WL12XX_FW_TYPE_NORMAL;
1066 if (wl->chip.id == CHIP_ID_1283_PG20) 703 fw_name = wl->sr_fw_name;
1067 fw_name = WL128X_FW_NAME_SINGLE;
1068 else
1069 fw_name = WL127X_FW_NAME_SINGLE;
1070 } 704 }
1071 } 705 }
1072 706
@@ -1173,7 +807,7 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
1173 u32 first_addr; 807 u32 first_addr;
1174 u8 *block; 808 u8 *block;
1175 809
1176 if ((wl->quirks & WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED) || 810 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
1177 (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) || 811 (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) ||
1178 (wl->conf.fwlog.mem_blocks == 0)) 812 (wl->conf.fwlog.mem_blocks == 0))
1179 return; 813 return;
@@ -1239,11 +873,20 @@ static void wl1271_recovery_work(struct work_struct *work)
1239 wl12xx_read_fwlog_panic(wl); 873 wl12xx_read_fwlog_panic(wl);
1240 874
1241 wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x", 875 wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x",
1242 wl->chip.fw_ver_str, wl1271_read32(wl, SCR_PAD4)); 876 wl->chip.fw_ver_str,
877 wlcore_read_reg(wl, REG_PC_ON_RECOVERY));
1243 878
1244 BUG_ON(bug_on_recovery && 879 BUG_ON(bug_on_recovery &&
1245 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)); 880 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
1246 881
882 if (no_recovery) {
883 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
884 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
885 goto out_unlock;
886 }
887
888 BUG_ON(bug_on_recovery);
889
1247 /* 890 /*
1248 * Advance security sequence number to overcome potential progress 891 * Advance security sequence number to overcome potential progress
1249 * in the firmware during recovery. This doens't hurt if the network is 892 * in the firmware during recovery. This doens't hurt if the network is
@@ -1290,10 +933,7 @@ out_unlock:
1290 933
1291static void wl1271_fw_wakeup(struct wl1271 *wl) 934static void wl1271_fw_wakeup(struct wl1271 *wl)
1292{ 935{
1293 u32 elp_reg; 936 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1294
1295 elp_reg = ELPCTRL_WAKE_UP;
1296 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
1297} 937}
1298 938
1299static int wl1271_setup(struct wl1271 *wl) 939static int wl1271_setup(struct wl1271 *wl)
@@ -1323,7 +963,7 @@ static int wl12xx_set_power_on(struct wl1271 *wl)
1323 wl1271_io_reset(wl); 963 wl1271_io_reset(wl);
1324 wl1271_io_init(wl); 964 wl1271_io_init(wl);
1325 965
1326 wl1271_set_partition(wl, &wl12xx_part_table[PART_DOWN]); 966 wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1327 967
1328 /* ELP module wake up */ 968 /* ELP module wake up */
1329 wl1271_fw_wakeup(wl); 969 wl1271_fw_wakeup(wl);
@@ -1348,44 +988,18 @@ static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1348 * negligible, we use the same block size for all different 988 * negligible, we use the same block size for all different
1349 * chip types. 989 * chip types.
1350 */ 990 */
1351 if (!wl1271_set_block_size(wl)) 991 if (wl1271_set_block_size(wl))
1352 wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT; 992 wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1353
1354 switch (wl->chip.id) {
1355 case CHIP_ID_1271_PG10:
1356 wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
1357 wl->chip.id);
1358 993
1359 ret = wl1271_setup(wl); 994 ret = wl->ops->identify_chip(wl);
1360 if (ret < 0) 995 if (ret < 0)
1361 goto out; 996 goto out;
1362 wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
1363 break;
1364
1365 case CHIP_ID_1271_PG20:
1366 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
1367 wl->chip.id);
1368
1369 ret = wl1271_setup(wl);
1370 if (ret < 0)
1371 goto out;
1372 wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
1373 break;
1374 997
1375 case CHIP_ID_1283_PG20: 998 /* TODO: make sure the lower driver has set things up correctly */
1376 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
1377 wl->chip.id);
1378 999
1379 ret = wl1271_setup(wl); 1000 ret = wl1271_setup(wl);
1380 if (ret < 0) 1001 if (ret < 0)
1381 goto out;
1382 break;
1383 case CHIP_ID_1283_PG10:
1384 default:
1385 wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
1386 ret = -ENODEV;
1387 goto out; 1002 goto out;
1388 }
1389 1003
1390 ret = wl12xx_fetch_firmware(wl, plt); 1004 ret = wl12xx_fetch_firmware(wl, plt);
1391 if (ret < 0) 1005 if (ret < 0)
@@ -1425,7 +1039,7 @@ int wl1271_plt_start(struct wl1271 *wl)
1425 if (ret < 0) 1039 if (ret < 0)
1426 goto power_off; 1040 goto power_off;
1427 1041
1428 ret = wl1271_boot(wl); 1042 ret = wl->ops->boot(wl);
1429 if (ret < 0) 1043 if (ret < 0)
1430 goto power_off; 1044 goto power_off;
1431 1045
@@ -1454,7 +1068,7 @@ irq_disable:
1454 work function will not do anything.) Also, any other 1068 work function will not do anything.) Also, any other
1455 possible concurrent operations will fail due to the 1069 possible concurrent operations will fail due to the
1456 current state, hence the wl1271 struct should be safe. */ 1070 current state, hence the wl1271 struct should be safe. */
1457 wl1271_disable_interrupts(wl); 1071 wlcore_disable_interrupts(wl);
1458 wl1271_flush_deferred_work(wl); 1072 wl1271_flush_deferred_work(wl);
1459 cancel_work_sync(&wl->netstack_work); 1073 cancel_work_sync(&wl->netstack_work);
1460 mutex_lock(&wl->mutex); 1074 mutex_lock(&wl->mutex);
@@ -1481,7 +1095,7 @@ int wl1271_plt_stop(struct wl1271 *wl)
1481 * Otherwise, the interrupt handler might be called and exit without 1095 * Otherwise, the interrupt handler might be called and exit without
1482 * reading the interrupt status. 1096 * reading the interrupt status.
1483 */ 1097 */
1484 wl1271_disable_interrupts(wl); 1098 wlcore_disable_interrupts(wl);
1485 mutex_lock(&wl->mutex); 1099 mutex_lock(&wl->mutex);
1486 if (!wl->plt) { 1100 if (!wl->plt) {
1487 mutex_unlock(&wl->mutex); 1101 mutex_unlock(&wl->mutex);
@@ -1491,7 +1105,7 @@ int wl1271_plt_stop(struct wl1271 *wl)
1491 * may have been disabled when op_stop was called. It will, 1105 * may have been disabled when op_stop was called. It will,
1492 * however, balance the above call to disable_interrupts(). 1106 * however, balance the above call to disable_interrupts().
1493 */ 1107 */
1494 wl1271_enable_interrupts(wl); 1108 wlcore_enable_interrupts(wl);
1495 1109
1496 wl1271_error("cannot power down because not in PLT " 1110 wl1271_error("cannot power down because not in PLT "
1497 "state: %d", wl->state); 1111 "state: %d", wl->state);
@@ -1652,14 +1266,12 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1652{ 1266{
1653 int ret = 0; 1267 int ret = 0;
1654 1268
1655 mutex_lock(&wl->mutex);
1656
1657 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) 1269 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1658 goto out_unlock; 1270 goto out;
1659 1271
1660 ret = wl1271_ps_elp_wakeup(wl); 1272 ret = wl1271_ps_elp_wakeup(wl);
1661 if (ret < 0) 1273 if (ret < 0)
1662 goto out_unlock; 1274 goto out;
1663 1275
1664 ret = wl1271_acx_wake_up_conditions(wl, wlvif, 1276 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1665 wl->conf.conn.suspend_wake_up_event, 1277 wl->conf.conn.suspend_wake_up_event,
@@ -1668,11 +1280,9 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1668 if (ret < 0) 1280 if (ret < 0)
1669 wl1271_error("suspend: set wake up conditions failed: %d", ret); 1281 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1670 1282
1671
1672 wl1271_ps_elp_sleep(wl); 1283 wl1271_ps_elp_sleep(wl);
1673 1284
1674out_unlock: 1285out:
1675 mutex_unlock(&wl->mutex);
1676 return ret; 1286 return ret;
1677 1287
1678} 1288}
@@ -1682,20 +1292,17 @@ static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1682{ 1292{
1683 int ret = 0; 1293 int ret = 0;
1684 1294
1685 mutex_lock(&wl->mutex);
1686
1687 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) 1295 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1688 goto out_unlock; 1296 goto out;
1689 1297
1690 ret = wl1271_ps_elp_wakeup(wl); 1298 ret = wl1271_ps_elp_wakeup(wl);
1691 if (ret < 0) 1299 if (ret < 0)
1692 goto out_unlock; 1300 goto out;
1693 1301
1694 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true); 1302 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1695 1303
1696 wl1271_ps_elp_sleep(wl); 1304 wl1271_ps_elp_sleep(wl);
1697out_unlock: 1305out:
1698 mutex_unlock(&wl->mutex);
1699 return ret; 1306 return ret;
1700 1307
1701} 1308}
@@ -1720,10 +1327,9 @@ static void wl1271_configure_resume(struct wl1271 *wl,
1720 if ((!is_ap) && (!is_sta)) 1327 if ((!is_ap) && (!is_sta))
1721 return; 1328 return;
1722 1329
1723 mutex_lock(&wl->mutex);
1724 ret = wl1271_ps_elp_wakeup(wl); 1330 ret = wl1271_ps_elp_wakeup(wl);
1725 if (ret < 0) 1331 if (ret < 0)
1726 goto out; 1332 return;
1727 1333
1728 if (is_sta) { 1334 if (is_sta) {
1729 ret = wl1271_acx_wake_up_conditions(wl, wlvif, 1335 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
@@ -1739,8 +1345,6 @@ static void wl1271_configure_resume(struct wl1271 *wl,
1739 } 1345 }
1740 1346
1741 wl1271_ps_elp_sleep(wl); 1347 wl1271_ps_elp_sleep(wl);
1742out:
1743 mutex_unlock(&wl->mutex);
1744} 1348}
1745 1349
1746static int wl1271_op_suspend(struct ieee80211_hw *hw, 1350static int wl1271_op_suspend(struct ieee80211_hw *hw,
@@ -1755,6 +1359,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
1755 1359
1756 wl1271_tx_flush(wl); 1360 wl1271_tx_flush(wl);
1757 1361
1362 mutex_lock(&wl->mutex);
1758 wl->wow_enabled = true; 1363 wl->wow_enabled = true;
1759 wl12xx_for_each_wlvif(wl, wlvif) { 1364 wl12xx_for_each_wlvif(wl, wlvif) {
1760 ret = wl1271_configure_suspend(wl, wlvif); 1365 ret = wl1271_configure_suspend(wl, wlvif);
@@ -1763,6 +1368,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
1763 return ret; 1368 return ret;
1764 } 1369 }
1765 } 1370 }
1371 mutex_unlock(&wl->mutex);
1766 /* flush any remaining work */ 1372 /* flush any remaining work */
1767 wl1271_debug(DEBUG_MAC80211, "flushing remaining works"); 1373 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1768 1374
@@ -1770,7 +1376,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
1770 * disable and re-enable interrupts in order to flush 1376 * disable and re-enable interrupts in order to flush
1771 * the threaded_irq 1377 * the threaded_irq
1772 */ 1378 */
1773 wl1271_disable_interrupts(wl); 1379 wlcore_disable_interrupts(wl);
1774 1380
1775 /* 1381 /*
1776 * set suspended flag to avoid triggering a new threaded_irq 1382 * set suspended flag to avoid triggering a new threaded_irq
@@ -1778,7 +1384,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
1778 */ 1384 */
1779 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags); 1385 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1780 1386
1781 wl1271_enable_interrupts(wl); 1387 wlcore_enable_interrupts(wl);
1782 flush_work(&wl->tx_work); 1388 flush_work(&wl->tx_work);
1783 flush_delayed_work(&wl->elp_work); 1389 flush_delayed_work(&wl->elp_work);
1784 1390
@@ -1810,12 +1416,15 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
1810 wl1271_debug(DEBUG_MAC80211, 1416 wl1271_debug(DEBUG_MAC80211,
1811 "run postponed irq_work directly"); 1417 "run postponed irq_work directly");
1812 wl1271_irq(0, wl); 1418 wl1271_irq(0, wl);
1813 wl1271_enable_interrupts(wl); 1419 wlcore_enable_interrupts(wl);
1814 } 1420 }
1421
1422 mutex_lock(&wl->mutex);
1815 wl12xx_for_each_wlvif(wl, wlvif) { 1423 wl12xx_for_each_wlvif(wl, wlvif) {
1816 wl1271_configure_resume(wl, wlvif); 1424 wl1271_configure_resume(wl, wlvif);
1817 } 1425 }
1818 wl->wow_enabled = false; 1426 wl->wow_enabled = false;
1427 mutex_unlock(&wl->mutex);
1819 1428
1820 return 0; 1429 return 0;
1821} 1430}
@@ -1851,7 +1460,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
1851 * Otherwise, the interrupt handler might be called and exit without 1460 * Otherwise, the interrupt handler might be called and exit without
1852 * reading the interrupt status. 1461 * reading the interrupt status.
1853 */ 1462 */
1854 wl1271_disable_interrupts(wl); 1463 wlcore_disable_interrupts(wl);
1855 mutex_lock(&wl->mutex); 1464 mutex_lock(&wl->mutex);
1856 if (wl->state == WL1271_STATE_OFF) { 1465 if (wl->state == WL1271_STATE_OFF) {
1857 mutex_unlock(&wl->mutex); 1466 mutex_unlock(&wl->mutex);
@@ -1861,7 +1470,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
1861 * may have been disabled when op_stop was called. It will, 1470 * may have been disabled when op_stop was called. It will,
1862 * however, balance the above call to disable_interrupts(). 1471 * however, balance the above call to disable_interrupts().
1863 */ 1472 */
1864 wl1271_enable_interrupts(wl); 1473 wlcore_enable_interrupts(wl);
1865 return; 1474 return;
1866 } 1475 }
1867 1476
@@ -1894,7 +1503,6 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
1894 wl->tx_results_count = 0; 1503 wl->tx_results_count = 0;
1895 wl->tx_packets_count = 0; 1504 wl->tx_packets_count = 0;
1896 wl->time_offset = 0; 1505 wl->time_offset = 0;
1897 wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
1898 wl->ap_fw_ps_map = 0; 1506 wl->ap_fw_ps_map = 0;
1899 wl->ap_ps_map = 0; 1507 wl->ap_ps_map = 0;
1900 wl->sched_scanning = false; 1508 wl->sched_scanning = false;
@@ -2067,7 +1675,7 @@ static bool wl12xx_init_fw(struct wl1271 *wl)
2067 if (ret < 0) 1675 if (ret < 0)
2068 goto power_off; 1676 goto power_off;
2069 1677
2070 ret = wl1271_boot(wl); 1678 ret = wl->ops->boot(wl);
2071 if (ret < 0) 1679 if (ret < 0)
2072 goto power_off; 1680 goto power_off;
2073 1681
@@ -2087,7 +1695,7 @@ irq_disable:
2087 work function will not do anything.) Also, any other 1695 work function will not do anything.) Also, any other
2088 possible concurrent operations will fail due to the 1696 possible concurrent operations will fail due to the
2089 current state, hence the wl1271 struct should be safe. */ 1697 current state, hence the wl1271 struct should be safe. */
2090 wl1271_disable_interrupts(wl); 1698 wlcore_disable_interrupts(wl);
2091 wl1271_flush_deferred_work(wl); 1699 wl1271_flush_deferred_work(wl);
2092 cancel_work_sync(&wl->netstack_work); 1700 cancel_work_sync(&wl->netstack_work);
2093 mutex_lock(&wl->mutex); 1701 mutex_lock(&wl->mutex);
@@ -2360,10 +1968,12 @@ deinit:
2360 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++) 1968 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2361 wl12xx_free_rate_policy(wl, 1969 wl12xx_free_rate_policy(wl,
2362 &wlvif->ap.ucast_rate_idx[i]); 1970 &wlvif->ap.ucast_rate_idx[i]);
1971 wl1271_free_ap_keys(wl, wlvif);
2363 } 1972 }
2364 1973
1974 dev_kfree_skb(wlvif->probereq);
1975 wlvif->probereq = NULL;
2365 wl12xx_tx_reset_wlvif(wl, wlvif); 1976 wl12xx_tx_reset_wlvif(wl, wlvif);
2366 wl1271_free_ap_keys(wl, wlvif);
2367 if (wl->last_wlvif == wlvif) 1977 if (wl->last_wlvif == wlvif)
2368 wl->last_wlvif = NULL; 1978 wl->last_wlvif = NULL;
2369 list_del(&wlvif->list); 1979 list_del(&wlvif->list);
@@ -2946,6 +2556,17 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2946 int ret; 2556 int ret;
2947 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); 2557 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2948 2558
2559 /*
2560 * A role set to GEM cipher requires different Tx settings (namely
2561 * spare blocks). Note when we are in this mode so the HW can adjust.
2562 */
2563 if (key_type == KEY_GEM) {
2564 if (action == KEY_ADD_OR_REPLACE)
2565 wlvif->is_gem = true;
2566 else if (action == KEY_REMOVE)
2567 wlvif->is_gem = false;
2568 }
2569
2949 if (is_ap) { 2570 if (is_ap) {
2950 struct wl1271_station *wl_sta; 2571 struct wl1271_station *wl_sta;
2951 u8 hlid; 2572 u8 hlid;
@@ -2984,17 +2605,6 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2984 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 2605 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2985 }; 2606 };
2986 2607
2987 /*
2988 * A STA set to GEM cipher requires 2 tx spare blocks.
2989 * Return to default value when GEM cipher key is removed
2990 */
2991 if (key_type == KEY_GEM) {
2992 if (action == KEY_ADD_OR_REPLACE)
2993 wl->tx_spare_blocks = 2;
2994 else if (action == KEY_REMOVE)
2995 wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
2996 }
2997
2998 addr = sta ? sta->addr : bcast_addr; 2608 addr = sta ? sta->addr : bcast_addr;
2999 2609
3000 if (is_zero_ether_addr(addr)) { 2610 if (is_zero_ether_addr(addr)) {
@@ -3791,8 +3401,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3791 wlvif->rssi_thold = bss_conf->cqm_rssi_thold; 3401 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
3792 } 3402 }
3793 3403
3794 if (changed & BSS_CHANGED_BSSID && 3404 if (changed & BSS_CHANGED_BSSID)
3795 (is_ibss || bss_conf->assoc))
3796 if (!is_zero_ether_addr(bss_conf->bssid)) { 3405 if (!is_zero_ether_addr(bss_conf->bssid)) {
3797 ret = wl12xx_cmd_build_null_data(wl, wlvif); 3406 ret = wl12xx_cmd_build_null_data(wl, wlvif);
3798 if (ret < 0) 3407 if (ret < 0)
@@ -3801,9 +3410,6 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3801 ret = wl1271_build_qos_null_data(wl, vif); 3410 ret = wl1271_build_qos_null_data(wl, vif);
3802 if (ret < 0) 3411 if (ret < 0)
3803 goto out; 3412 goto out;
3804
3805 /* Need to update the BSSID (for filtering etc) */
3806 do_join = true;
3807 } 3413 }
3808 3414
3809 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) { 3415 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
@@ -3830,6 +3436,7 @@ sta_not_found:
3830 int ieoffset; 3436 int ieoffset;
3831 wlvif->aid = bss_conf->aid; 3437 wlvif->aid = bss_conf->aid;
3832 wlvif->beacon_int = bss_conf->beacon_int; 3438 wlvif->beacon_int = bss_conf->beacon_int;
3439 do_join = true;
3833 set_assoc = true; 3440 set_assoc = true;
3834 3441
3835 /* 3442 /*
@@ -4662,60 +4269,12 @@ static struct ieee80211_channel wl1271_channels[] = {
4662 { .hw_value = 14, .center_freq = 2484, .max_power = 25 }, 4269 { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
4663}; 4270};
4664 4271
4665/* mapping to indexes for wl1271_rates */
4666static const u8 wl1271_rate_to_idx_2ghz[] = {
4667 /* MCS rates are used only with 11n */
4668 7, /* CONF_HW_RXTX_RATE_MCS7_SGI */
4669 7, /* CONF_HW_RXTX_RATE_MCS7 */
4670 6, /* CONF_HW_RXTX_RATE_MCS6 */
4671 5, /* CONF_HW_RXTX_RATE_MCS5 */
4672 4, /* CONF_HW_RXTX_RATE_MCS4 */
4673 3, /* CONF_HW_RXTX_RATE_MCS3 */
4674 2, /* CONF_HW_RXTX_RATE_MCS2 */
4675 1, /* CONF_HW_RXTX_RATE_MCS1 */
4676 0, /* CONF_HW_RXTX_RATE_MCS0 */
4677
4678 11, /* CONF_HW_RXTX_RATE_54 */
4679 10, /* CONF_HW_RXTX_RATE_48 */
4680 9, /* CONF_HW_RXTX_RATE_36 */
4681 8, /* CONF_HW_RXTX_RATE_24 */
4682
4683 /* TI-specific rate */
4684 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22 */
4685
4686 7, /* CONF_HW_RXTX_RATE_18 */
4687 6, /* CONF_HW_RXTX_RATE_12 */
4688 3, /* CONF_HW_RXTX_RATE_11 */
4689 5, /* CONF_HW_RXTX_RATE_9 */
4690 4, /* CONF_HW_RXTX_RATE_6 */
4691 2, /* CONF_HW_RXTX_RATE_5_5 */
4692 1, /* CONF_HW_RXTX_RATE_2 */
4693 0 /* CONF_HW_RXTX_RATE_1 */
4694};
4695
4696/* 11n STA capabilities */
4697#define HW_RX_HIGHEST_RATE 72
4698
4699#define WL12XX_HT_CAP { \
4700 .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | \
4701 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT), \
4702 .ht_supported = true, \
4703 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, \
4704 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
4705 .mcs = { \
4706 .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, \
4707 .rx_highest = cpu_to_le16(HW_RX_HIGHEST_RATE), \
4708 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
4709 }, \
4710}
4711
4712/* can't be const, mac80211 writes to this */ 4272/* can't be const, mac80211 writes to this */
4713static struct ieee80211_supported_band wl1271_band_2ghz = { 4273static struct ieee80211_supported_band wl1271_band_2ghz = {
4714 .channels = wl1271_channels, 4274 .channels = wl1271_channels,
4715 .n_channels = ARRAY_SIZE(wl1271_channels), 4275 .n_channels = ARRAY_SIZE(wl1271_channels),
4716 .bitrates = wl1271_rates, 4276 .bitrates = wl1271_rates,
4717 .n_bitrates = ARRAY_SIZE(wl1271_rates), 4277 .n_bitrates = ARRAY_SIZE(wl1271_rates),
4718 .ht_cap = WL12XX_HT_CAP,
4719}; 4278};
4720 4279
4721/* 5 GHz data rates for WL1273 */ 4280/* 5 GHz data rates for WL1273 */
@@ -4784,48 +4343,11 @@ static struct ieee80211_channel wl1271_channels_5ghz[] = {
4784 { .hw_value = 165, .center_freq = 5825, .max_power = 25 }, 4343 { .hw_value = 165, .center_freq = 5825, .max_power = 25 },
4785}; 4344};
4786 4345
4787/* mapping to indexes for wl1271_rates_5ghz */
4788static const u8 wl1271_rate_to_idx_5ghz[] = {
4789 /* MCS rates are used only with 11n */
4790 7, /* CONF_HW_RXTX_RATE_MCS7_SGI */
4791 7, /* CONF_HW_RXTX_RATE_MCS7 */
4792 6, /* CONF_HW_RXTX_RATE_MCS6 */
4793 5, /* CONF_HW_RXTX_RATE_MCS5 */
4794 4, /* CONF_HW_RXTX_RATE_MCS4 */
4795 3, /* CONF_HW_RXTX_RATE_MCS3 */
4796 2, /* CONF_HW_RXTX_RATE_MCS2 */
4797 1, /* CONF_HW_RXTX_RATE_MCS1 */
4798 0, /* CONF_HW_RXTX_RATE_MCS0 */
4799
4800 7, /* CONF_HW_RXTX_RATE_54 */
4801 6, /* CONF_HW_RXTX_RATE_48 */
4802 5, /* CONF_HW_RXTX_RATE_36 */
4803 4, /* CONF_HW_RXTX_RATE_24 */
4804
4805 /* TI-specific rate */
4806 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22 */
4807
4808 3, /* CONF_HW_RXTX_RATE_18 */
4809 2, /* CONF_HW_RXTX_RATE_12 */
4810 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_11 */
4811 1, /* CONF_HW_RXTX_RATE_9 */
4812 0, /* CONF_HW_RXTX_RATE_6 */
4813 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_5_5 */
4814 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_2 */
4815 CONF_HW_RXTX_RATE_UNSUPPORTED /* CONF_HW_RXTX_RATE_1 */
4816};
4817
4818static struct ieee80211_supported_band wl1271_band_5ghz = { 4346static struct ieee80211_supported_band wl1271_band_5ghz = {
4819 .channels = wl1271_channels_5ghz, 4347 .channels = wl1271_channels_5ghz,
4820 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz), 4348 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
4821 .bitrates = wl1271_rates_5ghz, 4349 .bitrates = wl1271_rates_5ghz,
4822 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz), 4350 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
4823 .ht_cap = WL12XX_HT_CAP,
4824};
4825
4826static const u8 *wl1271_band_rate_to_idx[] = {
4827 [IEEE80211_BAND_2GHZ] = wl1271_rate_to_idx_2ghz,
4828 [IEEE80211_BAND_5GHZ] = wl1271_rate_to_idx_5ghz
4829}; 4351};
4830 4352
4831static const struct ieee80211_ops wl1271_ops = { 4353static const struct ieee80211_ops wl1271_ops = {
@@ -4862,18 +4384,18 @@ static const struct ieee80211_ops wl1271_ops = {
4862}; 4384};
4863 4385
4864 4386
4865u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band) 4387u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
4866{ 4388{
4867 u8 idx; 4389 u8 idx;
4868 4390
4869 BUG_ON(band >= sizeof(wl1271_band_rate_to_idx)/sizeof(u8 *)); 4391 BUG_ON(band >= 2);
4870 4392
4871 if (unlikely(rate >= CONF_HW_RXTX_RATE_MAX)) { 4393 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
4872 wl1271_error("Illegal RX rate from HW: %d", rate); 4394 wl1271_error("Illegal RX rate from HW: %d", rate);
4873 return 0; 4395 return 0;
4874 } 4396 }
4875 4397
4876 idx = wl1271_band_rate_to_idx[band][rate]; 4398 idx = wl->band_rate_to_idx[band][rate];
4877 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) { 4399 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
4878 wl1271_error("Unsupported RX rate from HW: %d", rate); 4400 wl1271_error("Unsupported RX rate from HW: %d", rate);
4879 return 0; 4401 return 0;
@@ -5027,34 +4549,6 @@ static struct bin_attribute fwlog_attr = {
5027 .read = wl1271_sysfs_read_fwlog, 4549 .read = wl1271_sysfs_read_fwlog,
5028}; 4550};
5029 4551
5030static bool wl12xx_mac_in_fuse(struct wl1271 *wl)
5031{
5032 bool supported = false;
5033 u8 major, minor;
5034
5035 if (wl->chip.id == CHIP_ID_1283_PG20) {
5036 major = WL128X_PG_GET_MAJOR(wl->hw_pg_ver);
5037 minor = WL128X_PG_GET_MINOR(wl->hw_pg_ver);
5038
5039 /* in wl128x we have the MAC address if the PG is >= (2, 1) */
5040 if (major > 2 || (major == 2 && minor >= 1))
5041 supported = true;
5042 } else {
5043 major = WL127X_PG_GET_MAJOR(wl->hw_pg_ver);
5044 minor = WL127X_PG_GET_MINOR(wl->hw_pg_ver);
5045
5046 /* in wl127x we have the MAC address if the PG is >= (3, 1) */
5047 if (major == 3 && minor >= 1)
5048 supported = true;
5049 }
5050
5051 wl1271_debug(DEBUG_PROBE,
5052 "PG Ver major = %d minor = %d, MAC %s present",
5053 major, minor, supported ? "is" : "is not");
5054
5055 return supported;
5056}
5057
5058static void wl12xx_derive_mac_addresses(struct wl1271 *wl, 4552static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
5059 u32 oui, u32 nic, int n) 4553 u32 oui, u32 nic, int n)
5060{ 4554{
@@ -5080,47 +4574,23 @@ static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
5080 wl->hw->wiphy->addresses = wl->addresses; 4574 wl->hw->wiphy->addresses = wl->addresses;
5081} 4575}
5082 4576
5083static void wl12xx_get_fuse_mac(struct wl1271 *wl)
5084{
5085 u32 mac1, mac2;
5086
5087 wl1271_set_partition(wl, &wl12xx_part_table[PART_DRPW]);
5088
5089 mac1 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_1);
5090 mac2 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_2);
5091
5092 /* these are the two parts of the BD_ADDR */
5093 wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) +
5094 ((mac1 & 0xff000000) >> 24);
5095 wl->fuse_nic_addr = mac1 & 0xffffff;
5096
5097 wl1271_set_partition(wl, &wl12xx_part_table[PART_DOWN]);
5098}
5099
5100static int wl12xx_get_hw_info(struct wl1271 *wl) 4577static int wl12xx_get_hw_info(struct wl1271 *wl)
5101{ 4578{
5102 int ret; 4579 int ret;
5103 u32 die_info;
5104 4580
5105 ret = wl12xx_set_power_on(wl); 4581 ret = wl12xx_set_power_on(wl);
5106 if (ret < 0) 4582 if (ret < 0)
5107 goto out; 4583 goto out;
5108 4584
5109 wl->chip.id = wl1271_read32(wl, CHIP_ID_B); 4585 wl->chip.id = wlcore_read_reg(wl, REG_CHIP_ID_B);
5110 4586
5111 if (wl->chip.id == CHIP_ID_1283_PG20) 4587 wl->fuse_oui_addr = 0;
5112 die_info = wl1271_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1); 4588 wl->fuse_nic_addr = 0;
5113 else
5114 die_info = wl1271_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1);
5115 4589
5116 wl->hw_pg_ver = (s8) (die_info & PG_VER_MASK) >> PG_VER_OFFSET; 4590 wl->hw_pg_ver = wl->ops->get_pg_ver(wl);
5117 4591
5118 if (!wl12xx_mac_in_fuse(wl)) { 4592 if (wl->ops->get_mac)
5119 wl->fuse_oui_addr = 0; 4593 wl->ops->get_mac(wl);
5120 wl->fuse_nic_addr = 0;
5121 } else {
5122 wl12xx_get_fuse_mac(wl);
5123 }
5124 4594
5125 wl1271_power_off(wl); 4595 wl1271_power_off(wl);
5126out: 4596out:
@@ -5242,7 +4712,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
5242 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - 4712 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5243 sizeof(struct ieee80211_header); 4713 sizeof(struct ieee80211_header);
5244 4714
5245 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 4715 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
4716 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
5246 4717
5247 /* make sure all our channels fit in the scanned_ch bitmask */ 4718 /* make sure all our channels fit in the scanned_ch bitmask */
5248 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) + 4719 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
@@ -5254,8 +4725,12 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
5254 */ 4725 */
5255 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz, 4726 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5256 sizeof(wl1271_band_2ghz)); 4727 sizeof(wl1271_band_2ghz));
4728 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap, &wl->ht_cap,
4729 sizeof(wl->ht_cap));
5257 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz, 4730 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5258 sizeof(wl1271_band_5ghz)); 4731 sizeof(wl1271_band_5ghz));
4732 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap, &wl->ht_cap,
4733 sizeof(wl->ht_cap));
5259 4734
5260 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 4735 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5261 &wl->bands[IEEE80211_BAND_2GHZ]; 4736 &wl->bands[IEEE80211_BAND_2GHZ];
@@ -5279,14 +4754,14 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
5279 wl->hw->sta_data_size = sizeof(struct wl1271_station); 4754 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5280 wl->hw->vif_data_size = sizeof(struct wl12xx_vif); 4755 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5281 4756
5282 wl->hw->max_rx_aggregation_subframes = 8; 4757 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5283 4758
5284 return 0; 4759 return 0;
5285} 4760}
5286 4761
5287#define WL1271_DEFAULT_CHANNEL 0 4762#define WL1271_DEFAULT_CHANNEL 0
5288 4763
5289static struct ieee80211_hw *wl1271_alloc_hw(void) 4764struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
5290{ 4765{
5291 struct ieee80211_hw *hw; 4766 struct ieee80211_hw *hw;
5292 struct wl1271 *wl; 4767 struct wl1271 *wl;
@@ -5305,6 +4780,13 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
5305 wl = hw->priv; 4780 wl = hw->priv;
5306 memset(wl, 0, sizeof(*wl)); 4781 memset(wl, 0, sizeof(*wl));
5307 4782
4783 wl->priv = kzalloc(priv_size, GFP_KERNEL);
4784 if (!wl->priv) {
4785 wl1271_error("could not alloc wl priv");
4786 ret = -ENOMEM;
4787 goto err_priv_alloc;
4788 }
4789
5308 INIT_LIST_HEAD(&wl->wlvif_list); 4790 INIT_LIST_HEAD(&wl->wlvif_list);
5309 4791
5310 wl->hw = hw; 4792 wl->hw = hw;
@@ -5341,7 +4823,6 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
5341 wl->quirks = 0; 4823 wl->quirks = 0;
5342 wl->platform_quirks = 0; 4824 wl->platform_quirks = 0;
5343 wl->sched_scanning = false; 4825 wl->sched_scanning = false;
5344 wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
5345 wl->system_hlid = WL12XX_SYSTEM_HLID; 4826 wl->system_hlid = WL12XX_SYSTEM_HLID;
5346 wl->active_sta_count = 0; 4827 wl->active_sta_count = 0;
5347 wl->fwlog_size = 0; 4828 wl->fwlog_size = 0;
@@ -5351,7 +4832,7 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
5351 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map); 4832 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5352 4833
5353 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map)); 4834 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5354 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 4835 for (i = 0; i < wl->num_tx_desc; i++)
5355 wl->tx_frames[i] = NULL; 4836 wl->tx_frames[i] = NULL;
5356 4837
5357 spin_lock_init(&wl->wl_lock); 4838 spin_lock_init(&wl->wl_lock);
@@ -5360,9 +4841,6 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
5360 wl->fw_type = WL12XX_FW_TYPE_NONE; 4841 wl->fw_type = WL12XX_FW_TYPE_NONE;
5361 mutex_init(&wl->mutex); 4842 mutex_init(&wl->mutex);
5362 4843
5363 /* Apply default driver configuration. */
5364 wl1271_conf_init(wl);
5365
5366 order = get_order(WL1271_AGGR_BUFFER_SIZE); 4844 order = get_order(WL1271_AGGR_BUFFER_SIZE);
5367 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order); 4845 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5368 if (!wl->aggr_buf) { 4846 if (!wl->aggr_buf) {
@@ -5383,8 +4861,17 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
5383 goto err_dummy_packet; 4861 goto err_dummy_packet;
5384 } 4862 }
5385 4863
4864 wl->mbox = kmalloc(sizeof(*wl->mbox), GFP_DMA);
4865 if (!wl->mbox) {
4866 ret = -ENOMEM;
4867 goto err_fwlog;
4868 }
4869
5386 return hw; 4870 return hw;
5387 4871
4872err_fwlog:
4873 free_page((unsigned long)wl->fwlog);
4874
5388err_dummy_packet: 4875err_dummy_packet:
5389 dev_kfree_skb(wl->dummy_packet); 4876 dev_kfree_skb(wl->dummy_packet);
5390 4877
@@ -5396,14 +4883,18 @@ err_wq:
5396 4883
5397err_hw: 4884err_hw:
5398 wl1271_debugfs_exit(wl); 4885 wl1271_debugfs_exit(wl);
4886 kfree(wl->priv);
4887
4888err_priv_alloc:
5399 ieee80211_free_hw(hw); 4889 ieee80211_free_hw(hw);
5400 4890
5401err_hw_alloc: 4891err_hw_alloc:
5402 4892
5403 return ERR_PTR(ret); 4893 return ERR_PTR(ret);
5404} 4894}
4895EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5405 4896
5406static int wl1271_free_hw(struct wl1271 *wl) 4897int wlcore_free_hw(struct wl1271 *wl)
5407{ 4898{
5408 /* Unblock any fwlog readers */ 4899 /* Unblock any fwlog readers */
5409 mutex_lock(&wl->mutex); 4900 mutex_lock(&wl->mutex);
@@ -5433,10 +4924,12 @@ static int wl1271_free_hw(struct wl1271 *wl)
5433 kfree(wl->tx_res_if); 4924 kfree(wl->tx_res_if);
5434 destroy_workqueue(wl->freezable_wq); 4925 destroy_workqueue(wl->freezable_wq);
5435 4926
4927 kfree(wl->priv);
5436 ieee80211_free_hw(wl->hw); 4928 ieee80211_free_hw(wl->hw);
5437 4929
5438 return 0; 4930 return 0;
5439} 4931}
4932EXPORT_SYMBOL_GPL(wlcore_free_hw);
5440 4933
5441static irqreturn_t wl12xx_hardirq(int irq, void *cookie) 4934static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5442{ 4935{
@@ -5467,22 +4960,22 @@ static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5467 return IRQ_WAKE_THREAD; 4960 return IRQ_WAKE_THREAD;
5468} 4961}
5469 4962
5470static int __devinit wl12xx_probe(struct platform_device *pdev) 4963int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5471{ 4964{
5472 struct wl12xx_platform_data *pdata = pdev->dev.platform_data; 4965 struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
5473 struct ieee80211_hw *hw;
5474 struct wl1271 *wl;
5475 unsigned long irqflags; 4966 unsigned long irqflags;
5476 int ret = -ENODEV; 4967 int ret;
5477 4968
5478 hw = wl1271_alloc_hw(); 4969 if (!wl->ops || !wl->ptable) {
5479 if (IS_ERR(hw)) { 4970 ret = -EINVAL;
5480 wl1271_error("can't allocate hw"); 4971 goto out_free_hw;
5481 ret = PTR_ERR(hw);
5482 goto out;
5483 } 4972 }
5484 4973
5485 wl = hw->priv; 4974 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
4975
4976 /* adjust some runtime configuration parameters */
4977 wlcore_adjust_conf(wl);
4978
5486 wl->irq = platform_get_irq(pdev, 0); 4979 wl->irq = platform_get_irq(pdev, 0);
5487 wl->ref_clock = pdata->board_ref_clock; 4980 wl->ref_clock = pdata->board_ref_clock;
5488 wl->tcxo_clock = pdata->board_tcxo_clock; 4981 wl->tcxo_clock = pdata->board_tcxo_clock;
@@ -5511,7 +5004,7 @@ static int __devinit wl12xx_probe(struct platform_device *pdev)
5511 wl->irq_wake_enabled = true; 5004 wl->irq_wake_enabled = true;
5512 device_init_wakeup(wl->dev, 1); 5005 device_init_wakeup(wl->dev, 1);
5513 if (pdata->pwr_in_suspend) 5006 if (pdata->pwr_in_suspend)
5514 hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; 5007 wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
5515 5008
5516 } 5009 }
5517 disable_irq(wl->irq); 5010 disable_irq(wl->irq);
@@ -5545,7 +5038,7 @@ static int __devinit wl12xx_probe(struct platform_device *pdev)
5545 goto out_hw_pg_ver; 5038 goto out_hw_pg_ver;
5546 } 5039 }
5547 5040
5548 return 0; 5041 goto out;
5549 5042
5550out_hw_pg_ver: 5043out_hw_pg_ver:
5551 device_remove_file(wl->dev, &dev_attr_hw_pg_ver); 5044 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
@@ -5557,13 +5050,14 @@ out_irq:
5557 free_irq(wl->irq, wl); 5050 free_irq(wl->irq, wl);
5558 5051
5559out_free_hw: 5052out_free_hw:
5560 wl1271_free_hw(wl); 5053 wlcore_free_hw(wl);
5561 5054
5562out: 5055out:
5563 return ret; 5056 return ret;
5564} 5057}
5058EXPORT_SYMBOL_GPL(wlcore_probe);
5565 5059
5566static int __devexit wl12xx_remove(struct platform_device *pdev) 5060int __devexit wlcore_remove(struct platform_device *pdev)
5567{ 5061{
5568 struct wl1271 *wl = platform_get_drvdata(pdev); 5062 struct wl1271 *wl = platform_get_drvdata(pdev);
5569 5063
@@ -5573,38 +5067,11 @@ static int __devexit wl12xx_remove(struct platform_device *pdev)
5573 } 5067 }
5574 wl1271_unregister_hw(wl); 5068 wl1271_unregister_hw(wl);
5575 free_irq(wl->irq, wl); 5069 free_irq(wl->irq, wl);
5576 wl1271_free_hw(wl); 5070 wlcore_free_hw(wl);
5577 5071
5578 return 0; 5072 return 0;
5579} 5073}
5580 5074EXPORT_SYMBOL_GPL(wlcore_remove);
5581static const struct platform_device_id wl12xx_id_table[] __devinitconst = {
5582 { "wl12xx", 0 },
5583 { } /* Terminating Entry */
5584};
5585MODULE_DEVICE_TABLE(platform, wl12xx_id_table);
5586
5587static struct platform_driver wl12xx_driver = {
5588 .probe = wl12xx_probe,
5589 .remove = __devexit_p(wl12xx_remove),
5590 .id_table = wl12xx_id_table,
5591 .driver = {
5592 .name = "wl12xx_driver",
5593 .owner = THIS_MODULE,
5594 }
5595};
5596
5597static int __init wl12xx_init(void)
5598{
5599 return platform_driver_register(&wl12xx_driver);
5600}
5601module_init(wl12xx_init);
5602
5603static void __exit wl12xx_exit(void)
5604{
5605 platform_driver_unregister(&wl12xx_driver);
5606}
5607module_exit(wl12xx_exit);
5608 5075
5609u32 wl12xx_debug_level = DEBUG_NONE; 5076u32 wl12xx_debug_level = DEBUG_NONE;
5610EXPORT_SYMBOL_GPL(wl12xx_debug_level); 5077EXPORT_SYMBOL_GPL(wl12xx_debug_level);
@@ -5618,6 +5085,9 @@ MODULE_PARM_DESC(fwlog,
5618module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR); 5085module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR);
5619MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery"); 5086MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
5620 5087
5088module_param(no_recovery, bool, S_IRUSR | S_IWUSR);
5089MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
5090
5621MODULE_LICENSE("GPL"); 5091MODULE_LICENSE("GPL");
5622MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); 5092MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
5623MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 5093MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 78f598b4f97b..756eee2257b4 100644
--- a/drivers/net/wireless/wl12xx/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -21,7 +21,6 @@
21 * 21 *
22 */ 22 */
23 23
24#include "reg.h"
25#include "ps.h" 24#include "ps.h"
26#include "io.h" 25#include "io.h"
27#include "tx.h" 26#include "tx.h"
@@ -62,7 +61,7 @@ void wl1271_elp_work(struct work_struct *work)
62 } 61 }
63 62
64 wl1271_debug(DEBUG_PSM, "chip to elp"); 63 wl1271_debug(DEBUG_PSM, "chip to elp");
65 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP); 64 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
66 set_bit(WL1271_FLAG_IN_ELP, &wl->flags); 65 set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
67 66
68out: 67out:
@@ -74,6 +73,9 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
74{ 73{
75 struct wl12xx_vif *wlvif; 74 struct wl12xx_vif *wlvif;
76 75
76 if (wl->quirks & WLCORE_QUIRK_NO_ELP)
77 return;
78
77 /* we shouldn't get consecutive sleep requests */ 79 /* we shouldn't get consecutive sleep requests */
78 if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))) 80 if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
79 return; 81 return;
@@ -125,7 +127,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl)
125 wl->elp_compl = &compl; 127 wl->elp_compl = &compl;
126 spin_unlock_irqrestore(&wl->wl_lock, flags); 128 spin_unlock_irqrestore(&wl->wl_lock, flags);
127 129
128 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP); 130 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
129 131
130 if (!pending) { 132 if (!pending) {
131 ret = wait_for_completion_timeout( 133 ret = wait_for_completion_timeout(
diff --git a/drivers/net/wireless/wl12xx/ps.h b/drivers/net/wireless/ti/wlcore/ps.h
index 5f19d4fbbf27..de4f9da8ed26 100644
--- a/drivers/net/wireless/wl12xx/ps.h
+++ b/drivers/net/wireless/ti/wlcore/ps.h
@@ -24,7 +24,7 @@
24#ifndef __PS_H__ 24#ifndef __PS_H__
25#define __PS_H__ 25#define __PS_H__
26 26
27#include "wl12xx.h" 27#include "wlcore.h"
28#include "acx.h" 28#include "acx.h"
29 29
30int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, 30int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index cfa6071704c5..89bd9385e90b 100644
--- a/drivers/net/wireless/wl12xx/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -24,34 +24,36 @@
24#include <linux/gfp.h> 24#include <linux/gfp.h>
25#include <linux/sched.h> 25#include <linux/sched.h>
26 26
27#include "wl12xx.h" 27#include "wlcore.h"
28#include "debug.h" 28#include "debug.h"
29#include "acx.h" 29#include "acx.h"
30#include "reg.h"
31#include "rx.h" 30#include "rx.h"
32#include "tx.h" 31#include "tx.h"
33#include "io.h" 32#include "io.h"
33#include "hw_ops.h"
34 34
35static u8 wl12xx_rx_get_mem_block(struct wl12xx_fw_status *status, 35/*
36 u32 drv_rx_counter) 36 * TODO: this is here just for now, it must be removed when the data
37{ 37 * operations are in place.
38 return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) & 38 */
39 RX_MEM_BLOCK_MASK; 39#include "../wl12xx/reg.h"
40}
41 40
42static u32 wl12xx_rx_get_buf_size(struct wl12xx_fw_status *status, 41static u32 wlcore_rx_get_buf_size(struct wl1271 *wl,
43 u32 drv_rx_counter) 42 u32 rx_pkt_desc)
44{ 43{
45 return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) & 44 if (wl->quirks & WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN)
46 RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV; 45 return (rx_pkt_desc & ALIGNED_RX_BUF_SIZE_MASK) >>
46 ALIGNED_RX_BUF_SIZE_SHIFT;
47
48 return (rx_pkt_desc & RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV;
47} 49}
48 50
49static bool wl12xx_rx_get_unaligned(struct wl12xx_fw_status *status, 51static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len)
50 u32 drv_rx_counter)
51{ 52{
52 /* Convert the value to bool */ 53 if (wl->quirks & WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN)
53 return !!(le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) & 54 return ALIGN(pkt_len, WL12XX_BUS_BLOCK_SIZE);
54 RX_BUF_UNALIGNED_PAYLOAD); 55
56 return pkt_len;
55} 57}
56 58
57static void wl1271_rx_status(struct wl1271 *wl, 59static void wl1271_rx_status(struct wl1271 *wl,
@@ -66,10 +68,10 @@ static void wl1271_rx_status(struct wl1271 *wl,
66 else 68 else
67 status->band = IEEE80211_BAND_5GHZ; 69 status->band = IEEE80211_BAND_5GHZ;
68 70
69 status->rate_idx = wl1271_rate_to_idx(desc->rate, status->band); 71 status->rate_idx = wlcore_rate_to_idx(wl, desc->rate, status->band);
70 72
71 /* 11n support */ 73 /* 11n support */
72 if (desc->rate <= CONF_HW_RXTX_RATE_MCS0) 74 if (desc->rate <= wl->hw_min_ht_rate)
73 status->flag |= RX_FLAG_HT; 75 status->flag |= RX_FLAG_HT;
74 76
75 status->signal = desc->rssi; 77 status->signal = desc->rssi;
@@ -98,7 +100,7 @@ static void wl1271_rx_status(struct wl1271 *wl,
98} 100}
99 101
100static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, 102static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
101 bool unaligned, u8 *hlid) 103 enum wl_rx_buf_align rx_align, u8 *hlid)
102{ 104{
103 struct wl1271_rx_descriptor *desc; 105 struct wl1271_rx_descriptor *desc;
104 struct sk_buff *skb; 106 struct sk_buff *skb;
@@ -106,8 +108,9 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
106 u8 *buf; 108 u8 *buf;
107 u8 beacon = 0; 109 u8 beacon = 0;
108 u8 is_data = 0; 110 u8 is_data = 0;
109 u8 reserved = unaligned ? NET_IP_ALIGN : 0; 111 u8 reserved = 0;
110 u16 seq_num; 112 u16 seq_num;
113 u32 pkt_data_len;
111 114
112 /* 115 /*
113 * In PLT mode we seem to get frames and mac80211 warns about them, 116 * In PLT mode we seem to get frames and mac80211 warns about them,
@@ -116,6 +119,16 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
116 if (unlikely(wl->plt)) 119 if (unlikely(wl->plt))
117 return -EINVAL; 120 return -EINVAL;
118 121
122 pkt_data_len = wlcore_hw_get_rx_packet_len(wl, data, length);
123 if (!pkt_data_len) {
124 wl1271_error("Invalid packet arrived from HW. length %d",
125 length);
126 return -EINVAL;
127 }
128
129 if (rx_align == WLCORE_RX_BUF_UNALIGNED)
130 reserved = NET_IP_ALIGN;
131
119 /* the data read starts with the descriptor */ 132 /* the data read starts with the descriptor */
120 desc = (struct wl1271_rx_descriptor *) data; 133 desc = (struct wl1271_rx_descriptor *) data;
121 134
@@ -142,8 +155,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
142 return -EINVAL; 155 return -EINVAL;
143 } 156 }
144 157
145 /* skb length not included rx descriptor */ 158 /* skb length not including rx descriptor */
146 skb = __dev_alloc_skb(length + reserved - sizeof(*desc), GFP_KERNEL); 159 skb = __dev_alloc_skb(pkt_data_len + reserved, GFP_KERNEL);
147 if (!skb) { 160 if (!skb) {
148 wl1271_error("Couldn't allocate RX frame"); 161 wl1271_error("Couldn't allocate RX frame");
149 return -ENOMEM; 162 return -ENOMEM;
@@ -152,7 +165,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
152 /* reserve the unaligned payload(if any) */ 165 /* reserve the unaligned payload(if any) */
153 skb_reserve(skb, reserved); 166 skb_reserve(skb, reserved);
154 167
155 buf = skb_put(skb, length - sizeof(*desc)); 168 buf = skb_put(skb, pkt_data_len);
156 169
157 /* 170 /*
158 * Copy packets from aggregation buffer to the skbs without rx 171 * Copy packets from aggregation buffer to the skbs without rx
@@ -160,7 +173,10 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
160 * packets copy the packets in offset of 2 bytes guarantee IP header 173 * packets copy the packets in offset of 2 bytes guarantee IP header
161 * payload aligned to 4 bytes. 174 * payload aligned to 4 bytes.
162 */ 175 */
163 memcpy(buf, data + sizeof(*desc), length - sizeof(*desc)); 176 memcpy(buf, data + sizeof(*desc), pkt_data_len);
177 if (rx_align == WLCORE_RX_BUF_PADDED)
178 skb_pull(skb, NET_IP_ALIGN);
179
164 *hlid = desc->hlid; 180 *hlid = desc->hlid;
165 181
166 hdr = (struct ieee80211_hdr *)skb->data; 182 hdr = (struct ieee80211_hdr *)skb->data;
@@ -177,36 +193,35 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
177 beacon ? "beacon" : "", 193 beacon ? "beacon" : "",
178 seq_num, *hlid); 194 seq_num, *hlid);
179 195
180 skb_trim(skb, skb->len - desc->pad_len);
181
182 skb_queue_tail(&wl->deferred_rx_queue, skb); 196 skb_queue_tail(&wl->deferred_rx_queue, skb);
183 queue_work(wl->freezable_wq, &wl->netstack_work); 197 queue_work(wl->freezable_wq, &wl->netstack_work);
184 198
185 return is_data; 199 return is_data;
186} 200}
187 201
188void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status) 202void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
189{ 203{
190 struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
191 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; 204 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
192 u32 buf_size; 205 u32 buf_size;
193 u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK; 206 u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
194 u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; 207 u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
195 u32 rx_counter; 208 u32 rx_counter;
196 u32 mem_block; 209 u32 pkt_len, align_pkt_len;
197 u32 pkt_length; 210 u32 pkt_offset, des;
198 u32 pkt_offset;
199 u8 hlid; 211 u8 hlid;
200 bool unaligned = false; 212 enum wl_rx_buf_align rx_align;
201 213
202 while (drv_rx_counter != fw_rx_counter) { 214 while (drv_rx_counter != fw_rx_counter) {
203 buf_size = 0; 215 buf_size = 0;
204 rx_counter = drv_rx_counter; 216 rx_counter = drv_rx_counter;
205 while (rx_counter != fw_rx_counter) { 217 while (rx_counter != fw_rx_counter) {
206 pkt_length = wl12xx_rx_get_buf_size(status, rx_counter); 218 des = le32_to_cpu(status->rx_pkt_descs[rx_counter]);
207 if (buf_size + pkt_length > WL1271_AGGR_BUFFER_SIZE) 219 pkt_len = wlcore_rx_get_buf_size(wl, des);
220 align_pkt_len = wlcore_rx_get_align_buf_size(wl,
221 pkt_len);
222 if (buf_size + align_pkt_len > WL1271_AGGR_BUFFER_SIZE)
208 break; 223 break;
209 buf_size += pkt_length; 224 buf_size += align_pkt_len;
210 rx_counter++; 225 rx_counter++;
211 rx_counter &= NUM_RX_PKT_DESC_MOD_MASK; 226 rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
212 } 227 }
@@ -216,38 +231,18 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
216 break; 231 break;
217 } 232 }
218 233
219 if (wl->chip.id != CHIP_ID_1283_PG20) {
220 /*
221 * Choose the block we want to read
222 * For aggregated packets, only the first memory block
223 * should be retrieved. The FW takes care of the rest.
224 */
225 mem_block = wl12xx_rx_get_mem_block(status,
226 drv_rx_counter);
227
228 wl->rx_mem_pool_addr.addr = (mem_block << 8) +
229 le32_to_cpu(wl_mem_map->packet_memory_pool_start);
230
231 wl->rx_mem_pool_addr.addr_extra =
232 wl->rx_mem_pool_addr.addr + 4;
233
234 wl1271_write(wl, WL1271_SLV_REG_DATA,
235 &wl->rx_mem_pool_addr,
236 sizeof(wl->rx_mem_pool_addr), false);
237 }
238
239 /* Read all available packets at once */ 234 /* Read all available packets at once */
240 wl1271_read(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, 235 des = le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]);
241 buf_size, true); 236 wlcore_hw_prepare_read(wl, des, buf_size);
237 wlcore_read_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
238 buf_size, true);
242 239
243 /* Split data into separate packets */ 240 /* Split data into separate packets */
244 pkt_offset = 0; 241 pkt_offset = 0;
245 while (pkt_offset < buf_size) { 242 while (pkt_offset < buf_size) {
246 pkt_length = wl12xx_rx_get_buf_size(status, 243 des = le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]);
247 drv_rx_counter); 244 pkt_len = wlcore_rx_get_buf_size(wl, des);
248 245 rx_align = wlcore_hw_get_rx_buf_align(wl, des);
249 unaligned = wl12xx_rx_get_unaligned(status,
250 drv_rx_counter);
251 246
252 /* 247 /*
253 * the handle data call can only fail in memory-outage 248 * the handle data call can only fail in memory-outage
@@ -256,7 +251,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
256 */ 251 */
257 if (wl1271_rx_handle_data(wl, 252 if (wl1271_rx_handle_data(wl,
258 wl->aggr_buf + pkt_offset, 253 wl->aggr_buf + pkt_offset,
259 pkt_length, unaligned, 254 pkt_len, rx_align,
260 &hlid) == 1) { 255 &hlid) == 1) {
261 if (hlid < WL12XX_MAX_LINKS) 256 if (hlid < WL12XX_MAX_LINKS)
262 __set_bit(hlid, active_hlids); 257 __set_bit(hlid, active_hlids);
@@ -269,7 +264,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
269 wl->rx_counter++; 264 wl->rx_counter++;
270 drv_rx_counter++; 265 drv_rx_counter++;
271 drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK; 266 drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
272 pkt_offset += pkt_length; 267 pkt_offset += wlcore_rx_get_align_buf_size(wl, pkt_len);
273 } 268 }
274 } 269 }
275 270
@@ -277,8 +272,9 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
277 * Write the driver's packet counter to the FW. This is only required 272 * Write the driver's packet counter to the FW. This is only required
278 * for older hardware revisions 273 * for older hardware revisions
279 */ 274 */
280 if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION) 275 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION)
281 wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter); 276 wl1271_write32(wl, WL12XX_REG_RX_DRIVER_COUNTER,
277 wl->rx_counter);
282 278
283 wl12xx_rearm_rx_streaming(wl, active_hlids); 279 wl12xx_rearm_rx_streaming(wl, active_hlids);
284} 280}
diff --git a/drivers/net/wireless/wl12xx/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index 86ba6b1d0cdc..6e129e2a8546 100644
--- a/drivers/net/wireless/wl12xx/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -96,9 +96,19 @@
96#define RX_MEM_BLOCK_MASK 0xFF 96#define RX_MEM_BLOCK_MASK 0xFF
97#define RX_BUF_SIZE_MASK 0xFFF00 97#define RX_BUF_SIZE_MASK 0xFFF00
98#define RX_BUF_SIZE_SHIFT_DIV 6 98#define RX_BUF_SIZE_SHIFT_DIV 6
99#define ALIGNED_RX_BUF_SIZE_MASK 0xFFFF00
100#define ALIGNED_RX_BUF_SIZE_SHIFT 8
101
99/* If set, the start of IP payload is not 4 bytes aligned */ 102/* If set, the start of IP payload is not 4 bytes aligned */
100#define RX_BUF_UNALIGNED_PAYLOAD BIT(20) 103#define RX_BUF_UNALIGNED_PAYLOAD BIT(20)
101 104
105/* Describes the alignment state of a Rx buffer */
106enum wl_rx_buf_align {
107 WLCORE_RX_BUF_ALIGNED,
108 WLCORE_RX_BUF_UNALIGNED,
109 WLCORE_RX_BUF_PADDED,
110};
111
102enum { 112enum {
103 WL12XX_RX_CLASS_UNKNOWN, 113 WL12XX_RX_CLASS_UNKNOWN,
104 WL12XX_RX_CLASS_MANAGEMENT, 114 WL12XX_RX_CLASS_MANAGEMENT,
@@ -126,7 +136,7 @@ struct wl1271_rx_descriptor {
126 u8 reserved; 136 u8 reserved;
127} __packed; 137} __packed;
128 138
129void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status); 139void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status);
130u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); 140u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
131 141
132#endif 142#endif
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index fcba055ef196..ade21a011c45 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -23,7 +23,7 @@
23 23
24#include <linux/ieee80211.h> 24#include <linux/ieee80211.h>
25 25
26#include "wl12xx.h" 26#include "wlcore.h"
27#include "debug.h" 27#include "debug.h"
28#include "cmd.h" 28#include "cmd.h"
29#include "scan.h" 29#include "scan.h"
@@ -417,6 +417,23 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
417 int i, j; 417 int i, j;
418 u32 flags; 418 u32 flags;
419 bool force_passive = !req->n_ssids; 419 bool force_passive = !req->n_ssids;
420 u32 min_dwell_time_active, max_dwell_time_active, delta_per_probe;
421 u32 dwell_time_passive, dwell_time_dfs;
422
423 if (band == IEEE80211_BAND_5GHZ)
424 delta_per_probe = c->dwell_time_delta_per_probe_5;
425 else
426 delta_per_probe = c->dwell_time_delta_per_probe;
427
428 min_dwell_time_active = c->base_dwell_time +
429 req->n_ssids * c->num_probe_reqs * delta_per_probe;
430
431 max_dwell_time_active = min_dwell_time_active + c->max_dwell_time_delta;
432
433 min_dwell_time_active = DIV_ROUND_UP(min_dwell_time_active, 1000);
434 max_dwell_time_active = DIV_ROUND_UP(max_dwell_time_active, 1000);
435 dwell_time_passive = DIV_ROUND_UP(c->dwell_time_passive, 1000);
436 dwell_time_dfs = DIV_ROUND_UP(c->dwell_time_dfs, 1000);
420 437
421 for (i = 0, j = start; 438 for (i = 0, j = start;
422 i < req->n_channels && j < max_channels; 439 i < req->n_channels && j < max_channels;
@@ -440,21 +457,24 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
440 req->channels[i]->flags); 457 req->channels[i]->flags);
441 wl1271_debug(DEBUG_SCAN, "max_power %d", 458 wl1271_debug(DEBUG_SCAN, "max_power %d",
442 req->channels[i]->max_power); 459 req->channels[i]->max_power);
460 wl1271_debug(DEBUG_SCAN, "min_dwell_time %d max dwell time %d",
461 min_dwell_time_active,
462 max_dwell_time_active);
443 463
444 if (flags & IEEE80211_CHAN_RADAR) { 464 if (flags & IEEE80211_CHAN_RADAR) {
445 channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS; 465 channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
446 466
447 channels[j].passive_duration = 467 channels[j].passive_duration =
448 cpu_to_le16(c->dwell_time_dfs); 468 cpu_to_le16(dwell_time_dfs);
449 } else { 469 } else {
450 channels[j].passive_duration = 470 channels[j].passive_duration =
451 cpu_to_le16(c->dwell_time_passive); 471 cpu_to_le16(dwell_time_passive);
452 } 472 }
453 473
454 channels[j].min_duration = 474 channels[j].min_duration =
455 cpu_to_le16(c->min_dwell_time_active); 475 cpu_to_le16(min_dwell_time_active);
456 channels[j].max_duration = 476 channels[j].max_duration =
457 cpu_to_le16(c->max_dwell_time_active); 477 cpu_to_le16(max_dwell_time_active);
458 478
459 channels[j].tx_power_att = req->channels[i]->max_power; 479 channels[j].tx_power_att = req->channels[i]->max_power;
460 channels[j].channel = req->channels[i]->hw_value; 480 channels[j].channel = req->channels[i]->hw_value;
diff --git a/drivers/net/wireless/wl12xx/scan.h b/drivers/net/wireless/ti/wlcore/scan.h
index 96ff457a3a0b..81ee36ac2078 100644
--- a/drivers/net/wireless/wl12xx/scan.h
+++ b/drivers/net/wireless/ti/wlcore/scan.h
@@ -24,7 +24,7 @@
24#ifndef __SCAN_H__ 24#ifndef __SCAN_H__
25#define __SCAN_H__ 25#define __SCAN_H__
26 26
27#include "wl12xx.h" 27#include "wlcore.h"
28 28
29int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif, 29int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
30 const u8 *ssid, size_t ssid_len, 30 const u8 *ssid, size_t ssid_len,
@@ -55,7 +55,7 @@ void wl1271_scan_sched_scan_results(struct wl1271 *wl);
55#define WL1271_SCAN_BAND_2_4_GHZ 0 55#define WL1271_SCAN_BAND_2_4_GHZ 0
56#define WL1271_SCAN_BAND_5_GHZ 1 56#define WL1271_SCAN_BAND_5_GHZ 1
57 57
58#define WL1271_SCAN_TIMEOUT 10000 /* msec */ 58#define WL1271_SCAN_TIMEOUT 30000 /* msec */
59 59
60enum { 60enum {
61 WL1271_SCAN_STATE_IDLE, 61 WL1271_SCAN_STATE_IDLE,
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 4b3c32774bae..0a72347cfc4c 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -33,7 +33,7 @@
33#include <linux/wl12xx.h> 33#include <linux/wl12xx.h>
34#include <linux/pm_runtime.h> 34#include <linux/pm_runtime.h>
35 35
36#include "wl12xx.h" 36#include "wlcore.h"
37#include "wl12xx_80211.h" 37#include "wl12xx_80211.h"
38#include "io.h" 38#include "io.h"
39 39
@@ -76,7 +76,7 @@ static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
76 76
77 sdio_claim_host(func); 77 sdio_claim_host(func);
78 78
79 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { 79 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) {
80 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); 80 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
81 dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n", 81 dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n",
82 addr, ((u8 *)buf)[0]); 82 addr, ((u8 *)buf)[0]);
@@ -105,7 +105,7 @@ static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
105 105
106 sdio_claim_host(func); 106 sdio_claim_host(func);
107 107
108 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { 108 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) {
109 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret); 109 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
110 dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n", 110 dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n",
111 addr, ((u8 *)buf)[0]); 111 addr, ((u8 *)buf)[0]);
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 2fc18a8dcce8..553cd3cbb98c 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -30,12 +30,10 @@
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32 32
33#include "wl12xx.h" 33#include "wlcore.h"
34#include "wl12xx_80211.h" 34#include "wl12xx_80211.h"
35#include "io.h" 35#include "io.h"
36 36
37#include "reg.h"
38
39#define WSPI_CMD_READ 0x40000000 37#define WSPI_CMD_READ 0x40000000
40#define WSPI_CMD_WRITE 0x00000000 38#define WSPI_CMD_WRITE 0x00000000
41#define WSPI_CMD_FIXED 0x20000000 39#define WSPI_CMD_FIXED 0x20000000
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 1e93bb9c0246..0e59ea2cdd39 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -25,10 +25,9 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <net/genetlink.h> 26#include <net/genetlink.h>
27 27
28#include "wl12xx.h" 28#include "wlcore.h"
29#include "debug.h" 29#include "debug.h"
30#include "acx.h" 30#include "acx.h"
31#include "reg.h"
32#include "ps.h" 31#include "ps.h"
33#include "io.h" 32#include "io.h"
34 33
@@ -116,7 +115,8 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
116 goto out_sleep; 115 goto out_sleep;
117 } 116 }
118 117
119 NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf); 118 if (nla_put(skb, WL1271_TM_ATTR_DATA, buf_len, buf))
119 goto nla_put_failure;
120 ret = cfg80211_testmode_reply(skb); 120 ret = cfg80211_testmode_reply(skb);
121 if (ret < 0) 121 if (ret < 0)
122 goto out_sleep; 122 goto out_sleep;
@@ -178,7 +178,8 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
178 goto out_free; 178 goto out_free;
179 } 179 }
180 180
181 NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); 181 if (nla_put(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd))
182 goto nla_put_failure;
182 ret = cfg80211_testmode_reply(skb); 183 ret = cfg80211_testmode_reply(skb);
183 if (ret < 0) 184 if (ret < 0)
184 goto out_free; 185 goto out_free;
@@ -297,7 +298,8 @@ static int wl12xx_tm_cmd_get_mac(struct wl1271 *wl, struct nlattr *tb[])
297 goto out; 298 goto out;
298 } 299 }
299 300
300 NLA_PUT(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr); 301 if (nla_put(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr))
302 goto nla_put_failure;
301 ret = cfg80211_testmode_reply(skb); 303 ret = cfg80211_testmode_reply(skb);
302 if (ret < 0) 304 if (ret < 0)
303 goto out; 305 goto out;
diff --git a/drivers/net/wireless/wl12xx/testmode.h b/drivers/net/wireless/ti/wlcore/testmode.h
index 8071654259ea..8071654259ea 100644
--- a/drivers/net/wireless/wl12xx/testmode.h
+++ b/drivers/net/wireless/ti/wlcore/testmode.h
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 43ae49143d68..6893bc207994 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -25,13 +25,19 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
27 27
28#include "wl12xx.h" 28#include "wlcore.h"
29#include "debug.h" 29#include "debug.h"
30#include "io.h" 30#include "io.h"
31#include "reg.h"
32#include "ps.h" 31#include "ps.h"
33#include "tx.h" 32#include "tx.h"
34#include "event.h" 33#include "event.h"
34#include "hw_ops.h"
35
36/*
37 * TODO: this is here just for now, it must be removed when the data
38 * operations are in place.
39 */
40#include "../wl12xx/reg.h"
35 41
36static int wl1271_set_default_wep_key(struct wl1271 *wl, 42static int wl1271_set_default_wep_key(struct wl1271 *wl,
37 struct wl12xx_vif *wlvif, u8 id) 43 struct wl12xx_vif *wlvif, u8 id)
@@ -56,8 +62,8 @@ static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
56{ 62{
57 int id; 63 int id;
58 64
59 id = find_first_zero_bit(wl->tx_frames_map, ACX_TX_DESCRIPTORS); 65 id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc);
60 if (id >= ACX_TX_DESCRIPTORS) 66 if (id >= wl->num_tx_desc)
61 return -EBUSY; 67 return -EBUSY;
62 68
63 __set_bit(id, wl->tx_frames_map); 69 __set_bit(id, wl->tx_frames_map);
@@ -69,7 +75,7 @@ static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
69static void wl1271_free_tx_id(struct wl1271 *wl, int id) 75static void wl1271_free_tx_id(struct wl1271 *wl, int id)
70{ 76{
71 if (__test_and_clear_bit(id, wl->tx_frames_map)) { 77 if (__test_and_clear_bit(id, wl->tx_frames_map)) {
72 if (unlikely(wl->tx_frames_cnt == ACX_TX_DESCRIPTORS)) 78 if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc))
73 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); 79 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
74 80
75 wl->tx_frames[id] = NULL; 81 wl->tx_frames[id] = NULL;
@@ -167,14 +173,15 @@ u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
167 return wlvif->dev_hlid; 173 return wlvif->dev_hlid;
168} 174}
169 175
170static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl, 176unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
171 unsigned int packet_length) 177 unsigned int packet_length)
172{ 178{
173 if (wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT) 179 if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN)
174 return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
175 else
176 return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); 180 return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
181 else
182 return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
177} 183}
184EXPORT_SYMBOL(wlcore_calc_packet_alignment);
178 185
179static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, 186static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
180 struct sk_buff *skb, u32 extra, u32 buf_offset, 187 struct sk_buff *skb, u32 extra, u32 buf_offset,
@@ -182,10 +189,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
182{ 189{
183 struct wl1271_tx_hw_descr *desc; 190 struct wl1271_tx_hw_descr *desc;
184 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; 191 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
185 u32 len;
186 u32 total_blocks; 192 u32 total_blocks;
187 int id, ret = -EBUSY, ac; 193 int id, ret = -EBUSY, ac;
188 u32 spare_blocks = wl->tx_spare_blocks; 194 u32 spare_blocks = wl->normal_tx_spare;
189 bool is_dummy = false; 195 bool is_dummy = false;
190 196
191 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) 197 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
@@ -196,30 +202,19 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
196 if (id < 0) 202 if (id < 0)
197 return id; 203 return id;
198 204
199 /* approximate the number of blocks required for this packet 205 if (unlikely(wl12xx_is_dummy_packet(wl, skb)))
200 in the firmware */
201 len = wl12xx_calc_packet_alignment(wl, total_len);
202
203 /* in case of a dummy packet, use default amount of spare mem blocks */
204 if (unlikely(wl12xx_is_dummy_packet(wl, skb))) {
205 is_dummy = true; 206 is_dummy = true;
206 spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; 207 else if (wlvif->is_gem)
207 } 208 spare_blocks = wl->gem_tx_spare;
208 209
209 total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE + 210 total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
210 spare_blocks;
211 211
212 if (total_blocks <= wl->tx_blocks_available) { 212 if (total_blocks <= wl->tx_blocks_available) {
213 desc = (struct wl1271_tx_hw_descr *)skb_push( 213 desc = (struct wl1271_tx_hw_descr *)skb_push(
214 skb, total_len - skb->len); 214 skb, total_len - skb->len);
215 215
216 /* HW descriptor fields change between wl127x and wl128x */ 216 wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks,
217 if (wl->chip.id == CHIP_ID_1283_PG20) { 217 spare_blocks);
218 desc->wl128x_mem.total_mem_blocks = total_blocks;
219 } else {
220 desc->wl127x_mem.extra_blocks = spare_blocks;
221 desc->wl127x_mem.total_mem_blocks = total_blocks;
222 }
223 218
224 desc->id = id; 219 desc->id = id;
225 220
@@ -256,7 +251,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
256{ 251{
257 struct timespec ts; 252 struct timespec ts;
258 struct wl1271_tx_hw_descr *desc; 253 struct wl1271_tx_hw_descr *desc;
259 int aligned_len, ac, rate_idx; 254 int ac, rate_idx;
260 s64 hosttime; 255 s64 hosttime;
261 u16 tx_attr = 0; 256 u16 tx_attr = 0;
262 __le16 frame_control; 257 __le16 frame_control;
@@ -329,44 +324,16 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
329 } 324 }
330 325
331 tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; 326 tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
332 desc->reserved = 0;
333
334 aligned_len = wl12xx_calc_packet_alignment(wl, skb->len);
335
336 if (wl->chip.id == CHIP_ID_1283_PG20) {
337 desc->wl128x_mem.extra_bytes = aligned_len - skb->len;
338 desc->length = cpu_to_le16(aligned_len >> 2);
339
340 wl1271_debug(DEBUG_TX, "tx_fill_hdr: hlid: %d "
341 "tx_attr: 0x%x len: %d life: %d mem: %d",
342 desc->hlid, tx_attr,
343 le16_to_cpu(desc->length),
344 le16_to_cpu(desc->life_time),
345 desc->wl128x_mem.total_mem_blocks);
346 } else {
347 int pad;
348
349 /* Store the aligned length in terms of words */
350 desc->length = cpu_to_le16(aligned_len >> 2);
351
352 /* calculate number of padding bytes */
353 pad = aligned_len - skb->len;
354 tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
355
356 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d "
357 "tx_attr: 0x%x len: %d life: %d mem: %d", pad,
358 desc->hlid, tx_attr,
359 le16_to_cpu(desc->length),
360 le16_to_cpu(desc->life_time),
361 desc->wl127x_mem.total_mem_blocks);
362 }
363 327
364 /* for WEP shared auth - no fw encryption is needed */ 328 /* for WEP shared auth - no fw encryption is needed */
365 if (ieee80211_is_auth(frame_control) && 329 if (ieee80211_is_auth(frame_control) &&
366 ieee80211_has_protected(frame_control)) 330 ieee80211_has_protected(frame_control))
367 tx_attr |= TX_HW_ATTR_HOST_ENCRYPT; 331 tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
368 332
333 desc->reserved = 0;
369 desc->tx_attr = cpu_to_le16(tx_attr); 334 desc->tx_attr = cpu_to_le16(tx_attr);
335
336 wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
370} 337}
371 338
372/* caller must hold wl->mutex */ 339/* caller must hold wl->mutex */
@@ -432,7 +399,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
432 * In special cases, we want to align to a specific block size 399 * In special cases, we want to align to a specific block size
433 * (eg. for wl128x with SDIO we align to 256). 400 * (eg. for wl128x with SDIO we align to 256).
434 */ 401 */
435 total_len = wl12xx_calc_packet_alignment(wl, skb->len); 402 total_len = wlcore_calc_packet_alignment(wl, skb->len);
436 403
437 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len); 404 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
438 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); 405 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
@@ -718,8 +685,8 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
718 * Flush buffer and try again. 685 * Flush buffer and try again.
719 */ 686 */
720 wl1271_skb_queue_head(wl, wlvif, skb); 687 wl1271_skb_queue_head(wl, wlvif, skb);
721 wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, 688 wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
722 buf_offset, true); 689 buf_offset, true);
723 sent_packets = true; 690 sent_packets = true;
724 buf_offset = 0; 691 buf_offset = 0;
725 continue; 692 continue;
@@ -753,8 +720,8 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
753 720
754out_ack: 721out_ack:
755 if (buf_offset) { 722 if (buf_offset) {
756 wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, 723 wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
757 buf_offset, true); 724 buf_offset, true);
758 sent_packets = true; 725 sent_packets = true;
759 } 726 }
760 if (sent_packets) { 727 if (sent_packets) {
@@ -762,8 +729,8 @@ out_ack:
762 * Interrupt the firmware with the new packets. This is only 729 * Interrupt the firmware with the new packets. This is only
763 * required for older hardware revisions 730 * required for older hardware revisions
764 */ 731 */
765 if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION) 732 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION)
766 wl1271_write32(wl, WL1271_HOST_WR_ACCESS, 733 wl1271_write32(wl, WL12XX_HOST_WR_ACCESS,
767 wl->tx_packets_count); 734 wl->tx_packets_count);
768 735
769 wl1271_handle_tx_low_watermark(wl); 736 wl1271_handle_tx_low_watermark(wl);
@@ -792,11 +759,20 @@ static u8 wl1271_tx_get_rate_flags(u8 rate_class_index)
792{ 759{
793 u8 flags = 0; 760 u8 flags = 0;
794 761
795 if (rate_class_index >= CONF_HW_RXTX_RATE_MCS_MIN && 762 /*
796 rate_class_index <= CONF_HW_RXTX_RATE_MCS_MAX) 763 * TODO: use wl12xx constants when this code is moved to wl12xx, as
764 * only it uses Tx-completion.
765 */
766 if (rate_class_index <= 8)
797 flags |= IEEE80211_TX_RC_MCS; 767 flags |= IEEE80211_TX_RC_MCS;
798 if (rate_class_index == CONF_HW_RXTX_RATE_MCS7_SGI) 768
769 /*
770 * TODO: use wl12xx constants when this code is moved to wl12xx, as
771 * only it uses Tx-completion.
772 */
773 if (rate_class_index == 0)
799 flags |= IEEE80211_TX_RC_SHORT_GI; 774 flags |= IEEE80211_TX_RC_SHORT_GI;
775
800 return flags; 776 return flags;
801} 777}
802 778
@@ -813,7 +789,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
813 u8 retries = 0; 789 u8 retries = 0;
814 790
815 /* check for id legality */ 791 /* check for id legality */
816 if (unlikely(id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL)) { 792 if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
817 wl1271_warning("TX result illegal id: %d", id); 793 wl1271_warning("TX result illegal id: %d", id);
818 return; 794 return;
819 } 795 }
@@ -834,7 +810,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
834 if (result->status == TX_SUCCESS) { 810 if (result->status == TX_SUCCESS) {
835 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 811 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
836 info->flags |= IEEE80211_TX_STAT_ACK; 812 info->flags |= IEEE80211_TX_STAT_ACK;
837 rate = wl1271_rate_to_idx(result->rate_class_index, 813 rate = wlcore_rate_to_idx(wl, result->rate_class_index,
838 wlvif->band); 814 wlvif->band);
839 rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index); 815 rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index);
840 retries = result->ack_failures; 816 retries = result->ack_failures;
@@ -929,6 +905,7 @@ void wl1271_tx_complete(struct wl1271 *wl)
929 wl->tx_results_count++; 905 wl->tx_results_count++;
930 } 906 }
931} 907}
908EXPORT_SYMBOL(wl1271_tx_complete);
932 909
933void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) 910void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
934{ 911{
@@ -1006,7 +983,7 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
1006 if (reset_tx_queues) 983 if (reset_tx_queues)
1007 wl1271_handle_tx_low_watermark(wl); 984 wl1271_handle_tx_low_watermark(wl);
1008 985
1009 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) { 986 for (i = 0; i < wl->num_tx_desc; i++) {
1010 if (wl->tx_frames[i] == NULL) 987 if (wl->tx_frames[i] == NULL)
1011 continue; 988 continue;
1012 989
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 5cf8c32d40d1..2fd6e5dc6f75 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -25,9 +25,6 @@
25#ifndef __TX_H__ 25#ifndef __TX_H__
26#define __TX_H__ 26#define __TX_H__
27 27
28#define TX_HW_BLOCK_SPARE_DEFAULT 1
29#define TX_HW_BLOCK_SIZE 252
30
31#define TX_HW_MGMT_PKT_LIFETIME_TU 2000 28#define TX_HW_MGMT_PKT_LIFETIME_TU 2000
32#define TX_HW_AP_MODE_PKT_LIFETIME_TU 8000 29#define TX_HW_AP_MODE_PKT_LIFETIME_TU 8000
33 30
@@ -212,7 +209,7 @@ void wl1271_tx_complete(struct wl1271 *wl);
212void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif); 209void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
213void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues); 210void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
214void wl1271_tx_flush(struct wl1271 *wl); 211void wl1271_tx_flush(struct wl1271 *wl);
215u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); 212u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band);
216u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, 213u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
217 enum ieee80211_band rate_band); 214 enum ieee80211_band rate_band);
218u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set); 215u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
@@ -224,6 +221,8 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
224void wl1271_handle_tx_low_watermark(struct wl1271 *wl); 221void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
225bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb); 222bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
226void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids); 223void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
224unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
225 unsigned int packet_length);
227 226
228/* from main.c */ 227/* from main.c */
229void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid); 228void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wlcore/wl12xx.h
index 749a15a75d38..a9b220c43e54 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wlcore/wl12xx.h
@@ -89,8 +89,6 @@
89#define WL1271_AP_BSS_INDEX 0 89#define WL1271_AP_BSS_INDEX 0
90#define WL1271_AP_DEF_BEACON_EXP 20 90#define WL1271_AP_DEF_BEACON_EXP 20
91 91
92#define ACX_TX_DESCRIPTORS 16
93
94#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE) 92#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
95 93
96enum wl1271_state { 94enum wl1271_state {
@@ -105,26 +103,6 @@ enum wl12xx_fw_type {
105 WL12XX_FW_TYPE_PLT, 103 WL12XX_FW_TYPE_PLT,
106}; 104};
107 105
108enum wl1271_partition_type {
109 PART_DOWN,
110 PART_WORK,
111 PART_DRPW,
112
113 PART_TABLE_LEN
114};
115
116struct wl1271_partition {
117 u32 size;
118 u32 start;
119};
120
121struct wl1271_partition_set {
122 struct wl1271_partition mem;
123 struct wl1271_partition reg;
124 struct wl1271_partition mem2;
125 struct wl1271_partition mem3;
126};
127
128struct wl1271; 106struct wl1271;
129 107
130enum { 108enum {
@@ -167,8 +145,21 @@ struct wl1271_stats {
167 145
168#define AP_MAX_STATIONS 8 146#define AP_MAX_STATIONS 8
169 147
148struct wl_fw_packet_counters {
149 /* Cumulative counter of released packets per AC */
150 u8 tx_released_pkts[NUM_TX_QUEUES];
151
152 /* Cumulative counter of freed packets per HLID */
153 u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
154
155 /* Cumulative counter of released Voice memory blocks */
156 u8 tx_voice_released_blks;
157
158 u8 padding[3];
159} __packed;
160
170/* FW status registers */ 161/* FW status registers */
171struct wl12xx_fw_status { 162struct wl_fw_status {
172 __le32 intr; 163 __le32 intr;
173 u8 fw_rx_counter; 164 u8 fw_rx_counter;
174 u8 drv_rx_counter; 165 u8 drv_rx_counter;
@@ -195,16 +186,12 @@ struct wl12xx_fw_status {
195 /* Size (in Memory Blocks) of TX pool */ 186 /* Size (in Memory Blocks) of TX pool */
196 __le32 tx_total; 187 __le32 tx_total;
197 188
198 /* Cumulative counter of released packets per AC */ 189 struct wl_fw_packet_counters counters;
199 u8 tx_released_pkts[NUM_TX_QUEUES];
200 190
201 /* Cumulative counter of freed packets per HLID */
202 u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
203
204 /* Cumulative counter of released Voice memory blocks */
205 u8 tx_voice_released_blks;
206 u8 padding_1[3];
207 __le32 log_start_addr; 191 __le32 log_start_addr;
192
193 /* Private status to be used by the lower drivers */
194 u8 priv[0];
208} __packed; 195} __packed;
209 196
210struct wl1271_rx_mem_pool_addr { 197struct wl1271_rx_mem_pool_addr {
@@ -292,214 +279,6 @@ struct wl1271_link {
292 u8 ba_bitmap; 279 u8 ba_bitmap;
293}; 280};
294 281
295struct wl1271 {
296 struct ieee80211_hw *hw;
297 bool mac80211_registered;
298
299 struct device *dev;
300
301 void *if_priv;
302
303 struct wl1271_if_operations *if_ops;
304
305 void (*set_power)(bool enable);
306 int irq;
307 int ref_clock;
308
309 spinlock_t wl_lock;
310
311 enum wl1271_state state;
312 enum wl12xx_fw_type fw_type;
313 bool plt;
314 u8 last_vif_count;
315 struct mutex mutex;
316
317 unsigned long flags;
318
319 struct wl1271_partition_set part;
320
321 struct wl1271_chip chip;
322
323 int cmd_box_addr;
324 int event_box_addr;
325
326 u8 *fw;
327 size_t fw_len;
328 void *nvs;
329 size_t nvs_len;
330
331 s8 hw_pg_ver;
332
333 /* address read from the fuse ROM */
334 u32 fuse_oui_addr;
335 u32 fuse_nic_addr;
336
337 /* we have up to 2 MAC addresses */
338 struct mac_address addresses[2];
339 int channel;
340 u8 system_hlid;
341
342 unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
343 unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
344 unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
345 unsigned long rate_policies_map[
346 BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)];
347
348 struct list_head wlvif_list;
349
350 u8 sta_count;
351 u8 ap_count;
352
353 struct wl1271_acx_mem_map *target_mem_map;
354
355 /* Accounting for allocated / available TX blocks on HW */
356 u32 tx_blocks_freed;
357 u32 tx_blocks_available;
358 u32 tx_allocated_blocks;
359 u32 tx_results_count;
360
361 /* amount of spare TX blocks to use */
362 u32 tx_spare_blocks;
363
364 /* Accounting for allocated / available Tx packets in HW */
365 u32 tx_pkts_freed[NUM_TX_QUEUES];
366 u32 tx_allocated_pkts[NUM_TX_QUEUES];
367
368 /* Transmitted TX packets counter for chipset interface */
369 u32 tx_packets_count;
370
371 /* Time-offset between host and chipset clocks */
372 s64 time_offset;
373
374 /* Frames scheduled for transmission, not handled yet */
375 int tx_queue_count[NUM_TX_QUEUES];
376 long stopped_queues_map;
377
378 /* Frames received, not handled yet by mac80211 */
379 struct sk_buff_head deferred_rx_queue;
380
381 /* Frames sent, not returned yet to mac80211 */
382 struct sk_buff_head deferred_tx_queue;
383
384 struct work_struct tx_work;
385 struct workqueue_struct *freezable_wq;
386
387 /* Pending TX frames */
388 unsigned long tx_frames_map[BITS_TO_LONGS(ACX_TX_DESCRIPTORS)];
389 struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
390 int tx_frames_cnt;
391
392 /* FW Rx counter */
393 u32 rx_counter;
394
395 /* Rx memory pool address */
396 struct wl1271_rx_mem_pool_addr rx_mem_pool_addr;
397
398 /* Intermediate buffer, used for packet aggregation */
399 u8 *aggr_buf;
400
401 /* Reusable dummy packet template */
402 struct sk_buff *dummy_packet;
403
404 /* Network stack work */
405 struct work_struct netstack_work;
406
407 /* FW log buffer */
408 u8 *fwlog;
409
410 /* Number of valid bytes in the FW log buffer */
411 ssize_t fwlog_size;
412
413 /* Sysfs FW log entry readers wait queue */
414 wait_queue_head_t fwlog_waitq;
415
416 /* Hardware recovery work */
417 struct work_struct recovery_work;
418
419 /* The mbox event mask */
420 u32 event_mask;
421
422 /* Mailbox pointers */
423 u32 mbox_ptr[2];
424
425 /* Are we currently scanning */
426 struct ieee80211_vif *scan_vif;
427 struct wl1271_scan scan;
428 struct delayed_work scan_complete_work;
429
430 bool sched_scanning;
431
432 /* The current band */
433 enum ieee80211_band band;
434
435 struct completion *elp_compl;
436 struct delayed_work elp_work;
437
438 /* in dBm */
439 int power_level;
440
441 struct wl1271_stats stats;
442
443 __le32 buffer_32;
444 u32 buffer_cmd;
445 u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
446
447 struct wl12xx_fw_status *fw_status;
448 struct wl1271_tx_hw_res_if *tx_res_if;
449
450 /* Current chipset configuration */
451 struct conf_drv_settings conf;
452
453 bool sg_enabled;
454
455 bool enable_11a;
456
457 /* Most recently reported noise in dBm */
458 s8 noise;
459
460 /* bands supported by this instance of wl12xx */
461 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
462
463 int tcxo_clock;
464
465 /*
466 * wowlan trigger was configured during suspend.
467 * (currently, only "ANY" trigger is supported)
468 */
469 bool wow_enabled;
470 bool irq_wake_enabled;
471
472 /*
473 * AP-mode - links indexed by HLID. The global and broadcast links
474 * are always active.
475 */
476 struct wl1271_link links[WL12XX_MAX_LINKS];
477
478 /* AP-mode - a bitmap of links currently in PS mode according to FW */
479 u32 ap_fw_ps_map;
480
481 /* AP-mode - a bitmap of links currently in PS mode in mac80211 */
482 unsigned long ap_ps_map;
483
484 /* Quirks of specific hardware revisions */
485 unsigned int quirks;
486
487 /* Platform limitations */
488 unsigned int platform_quirks;
489
490 /* number of currently active RX BA sessions */
491 int ba_rx_session_count;
492
493 /* AP-mode - number of currently connected stations */
494 int active_sta_count;
495
496 /* last wlvif we transmitted from */
497 struct wl12xx_vif *last_wlvif;
498
499 /* work to fire when Tx is stuck */
500 struct delayed_work tx_watchdog_work;
501};
502
503struct wl1271_station { 282struct wl1271_station {
504 u8 hlid; 283 u8 hlid;
505}; 284};
@@ -605,6 +384,9 @@ struct wl12xx_vif {
605 struct work_struct rx_streaming_disable_work; 384 struct work_struct rx_streaming_disable_work;
606 struct timer_list rx_streaming_timer; 385 struct timer_list rx_streaming_timer;
607 386
387 /* does the current role use GEM for encryption (AP or STA) */
388 bool is_gem;
389
608 /* 390 /*
609 * This struct must be last! 391 * This struct must be last!
610 * data that has to be saved acrossed reconfigs (e.g. recovery) 392 * data that has to be saved acrossed reconfigs (e.g. recovery)
@@ -679,17 +461,6 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
679#define HW_BG_RATES_MASK 0xffff 461#define HW_BG_RATES_MASK 0xffff
680#define HW_HT_RATES_OFFSET 16 462#define HW_HT_RATES_OFFSET 16
681 463
682/* Quirks */
683
684/* Each RX/TX transaction requires an end-of-transaction transfer */
685#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0)
686
687/* wl127x and SPI don't support SDIO block size alignment */
688#define WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT BIT(2)
689
690/* Older firmwares did not implement the FW logger over bus feature */
691#define WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED BIT(4)
692
693#define WL12XX_HW_BLOCK_SIZE 256 464#define WL12XX_HW_BLOCK_SIZE 256
694 465
695#endif 466#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h
index 22b0bc98d7b5..22b0bc98d7b5 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h
diff --git a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c b/drivers/net/wireless/ti/wlcore/wl12xx_platform_data.c
index 998e95895f9d..998e95895f9d 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
+++ b/drivers/net/wireless/ti/wlcore/wl12xx_platform_data.c
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
new file mode 100644
index 000000000000..39f9fadfebd9
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -0,0 +1,448 @@
1/*
2 * This file is part of wlcore
3 *
4 * Copyright (C) 2011 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WLCORE_H__
23#define __WLCORE_H__
24
25#include <linux/platform_device.h>
26
27#include "wl12xx.h"
28#include "event.h"
29
30/* The maximum number of Tx descriptors in all chip families */
31#define WLCORE_MAX_TX_DESCRIPTORS 32
32
33/* forward declaration */
34struct wl1271_tx_hw_descr;
35enum wl_rx_buf_align;
36
37struct wlcore_ops {
38 int (*identify_chip)(struct wl1271 *wl);
39 int (*identify_fw)(struct wl1271 *wl);
40 int (*boot)(struct wl1271 *wl);
41 void (*trigger_cmd)(struct wl1271 *wl, int cmd_box_addr,
42 void *buf, size_t len);
43 void (*ack_event)(struct wl1271 *wl);
44 u32 (*calc_tx_blocks)(struct wl1271 *wl, u32 len, u32 spare_blks);
45 void (*set_tx_desc_blocks)(struct wl1271 *wl,
46 struct wl1271_tx_hw_descr *desc,
47 u32 blks, u32 spare_blks);
48 void (*set_tx_desc_data_len)(struct wl1271 *wl,
49 struct wl1271_tx_hw_descr *desc,
50 struct sk_buff *skb);
51 enum wl_rx_buf_align (*get_rx_buf_align)(struct wl1271 *wl,
52 u32 rx_desc);
53 void (*prepare_read)(struct wl1271 *wl, u32 rx_desc, u32 len);
54 u32 (*get_rx_packet_len)(struct wl1271 *wl, void *rx_data,
55 u32 data_len);
56 void (*tx_delayed_compl)(struct wl1271 *wl);
57 void (*tx_immediate_compl)(struct wl1271 *wl);
58 int (*hw_init)(struct wl1271 *wl);
59 int (*init_vif)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
60 u32 (*sta_get_ap_rate_mask)(struct wl1271 *wl,
61 struct wl12xx_vif *wlvif);
62 s8 (*get_pg_ver)(struct wl1271 *wl);
63 void (*get_mac)(struct wl1271 *wl);
64};
65
66enum wlcore_partitions {
67 PART_DOWN,
68 PART_WORK,
69 PART_BOOT,
70 PART_DRPW,
71 PART_TOP_PRCM_ELP_SOC,
72 PART_PHY_INIT,
73
74 PART_TABLE_LEN,
75};
76
77struct wlcore_partition {
78 u32 size;
79 u32 start;
80};
81
82struct wlcore_partition_set {
83 struct wlcore_partition mem;
84 struct wlcore_partition reg;
85 struct wlcore_partition mem2;
86 struct wlcore_partition mem3;
87};
88
89enum wlcore_registers {
90 /* register addresses, used with partition translation */
91 REG_ECPU_CONTROL,
92 REG_INTERRUPT_NO_CLEAR,
93 REG_INTERRUPT_ACK,
94 REG_COMMAND_MAILBOX_PTR,
95 REG_EVENT_MAILBOX_PTR,
96 REG_INTERRUPT_TRIG,
97 REG_INTERRUPT_MASK,
98 REG_PC_ON_RECOVERY,
99 REG_CHIP_ID_B,
100 REG_CMD_MBOX_ADDRESS,
101
102 /* data access memory addresses, used with partition translation */
103 REG_SLV_MEM_DATA,
104 REG_SLV_REG_DATA,
105
106 /* raw data access memory addresses */
107 REG_RAW_FW_STATUS_ADDR,
108
109 REG_TABLE_LEN,
110};
111
112struct wl1271 {
113 struct ieee80211_hw *hw;
114 bool mac80211_registered;
115
116 struct device *dev;
117
118 void *if_priv;
119
120 struct wl1271_if_operations *if_ops;
121
122 void (*set_power)(bool enable);
123 int irq;
124 int ref_clock;
125
126 spinlock_t wl_lock;
127
128 enum wl1271_state state;
129 enum wl12xx_fw_type fw_type;
130 bool plt;
131 u8 last_vif_count;
132 struct mutex mutex;
133
134 unsigned long flags;
135
136 struct wlcore_partition_set curr_part;
137
138 struct wl1271_chip chip;
139
140 int cmd_box_addr;
141
142 u8 *fw;
143 size_t fw_len;
144 void *nvs;
145 size_t nvs_len;
146
147 s8 hw_pg_ver;
148
149 /* address read from the fuse ROM */
150 u32 fuse_oui_addr;
151 u32 fuse_nic_addr;
152
153 /* we have up to 2 MAC addresses */
154 struct mac_address addresses[2];
155 int channel;
156 u8 system_hlid;
157
158 unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
159 unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
160 unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
161 unsigned long rate_policies_map[
162 BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)];
163
164 struct list_head wlvif_list;
165
166 u8 sta_count;
167 u8 ap_count;
168
169 struct wl1271_acx_mem_map *target_mem_map;
170
171 /* Accounting for allocated / available TX blocks on HW */
172 u32 tx_blocks_freed;
173 u32 tx_blocks_available;
174 u32 tx_allocated_blocks;
175 u32 tx_results_count;
176
177 /* Accounting for allocated / available Tx packets in HW */
178 u32 tx_pkts_freed[NUM_TX_QUEUES];
179 u32 tx_allocated_pkts[NUM_TX_QUEUES];
180
181 /* Transmitted TX packets counter for chipset interface */
182 u32 tx_packets_count;
183
184 /* Time-offset between host and chipset clocks */
185 s64 time_offset;
186
187 /* Frames scheduled for transmission, not handled yet */
188 int tx_queue_count[NUM_TX_QUEUES];
189 long stopped_queues_map;
190
191 /* Frames received, not handled yet by mac80211 */
192 struct sk_buff_head deferred_rx_queue;
193
194 /* Frames sent, not returned yet to mac80211 */
195 struct sk_buff_head deferred_tx_queue;
196
197 struct work_struct tx_work;
198 struct workqueue_struct *freezable_wq;
199
200 /* Pending TX frames */
201 unsigned long tx_frames_map[BITS_TO_LONGS(WLCORE_MAX_TX_DESCRIPTORS)];
202 struct sk_buff *tx_frames[WLCORE_MAX_TX_DESCRIPTORS];
203 int tx_frames_cnt;
204
205 /* FW Rx counter */
206 u32 rx_counter;
207
208 /* Rx memory pool address */
209 struct wl1271_rx_mem_pool_addr rx_mem_pool_addr;
210
211 /* Intermediate buffer, used for packet aggregation */
212 u8 *aggr_buf;
213
214 /* Reusable dummy packet template */
215 struct sk_buff *dummy_packet;
216
217 /* Network stack work */
218 struct work_struct netstack_work;
219
220 /* FW log buffer */
221 u8 *fwlog;
222
223 /* Number of valid bytes in the FW log buffer */
224 ssize_t fwlog_size;
225
226 /* Sysfs FW log entry readers wait queue */
227 wait_queue_head_t fwlog_waitq;
228
229 /* Hardware recovery work */
230 struct work_struct recovery_work;
231
232 /* Pointer that holds DMA-friendly block for the mailbox */
233 struct event_mailbox *mbox;
234
235 /* The mbox event mask */
236 u32 event_mask;
237
238 /* Mailbox pointers */
239 u32 mbox_ptr[2];
240
241 /* Are we currently scanning */
242 struct ieee80211_vif *scan_vif;
243 struct wl1271_scan scan;
244 struct delayed_work scan_complete_work;
245
246 bool sched_scanning;
247
248 /* The current band */
249 enum ieee80211_band band;
250
251 struct completion *elp_compl;
252 struct delayed_work elp_work;
253
254 /* in dBm */
255 int power_level;
256
257 struct wl1271_stats stats;
258
259 __le32 buffer_32;
260 u32 buffer_cmd;
261 u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
262
263 struct wl_fw_status *fw_status;
264 struct wl1271_tx_hw_res_if *tx_res_if;
265
266 /* Current chipset configuration */
267 struct wlcore_conf conf;
268
269 bool sg_enabled;
270
271 bool enable_11a;
272
273 /* Most recently reported noise in dBm */
274 s8 noise;
275
276 /* bands supported by this instance of wl12xx */
277 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
278
279 int tcxo_clock;
280
281 /*
282 * wowlan trigger was configured during suspend.
283 * (currently, only "ANY" trigger is supported)
284 */
285 bool wow_enabled;
286 bool irq_wake_enabled;
287
288 /*
289 * AP-mode - links indexed by HLID. The global and broadcast links
290 * are always active.
291 */
292 struct wl1271_link links[WL12XX_MAX_LINKS];
293
294 /* AP-mode - a bitmap of links currently in PS mode according to FW */
295 u32 ap_fw_ps_map;
296
297 /* AP-mode - a bitmap of links currently in PS mode in mac80211 */
298 unsigned long ap_ps_map;
299
300 /* Quirks of specific hardware revisions */
301 unsigned int quirks;
302
303 /* Platform limitations */
304 unsigned int platform_quirks;
305
306 /* number of currently active RX BA sessions */
307 int ba_rx_session_count;
308
309 /* AP-mode - number of currently connected stations */
310 int active_sta_count;
311
312 /* last wlvif we transmitted from */
313 struct wl12xx_vif *last_wlvif;
314
315 /* work to fire when Tx is stuck */
316 struct delayed_work tx_watchdog_work;
317
318 struct wlcore_ops *ops;
319 /* pointer to the lower driver partition table */
320 const struct wlcore_partition_set *ptable;
321 /* pointer to the lower driver register table */
322 const int *rtable;
323 /* name of the firmwares to load - for PLT, single role, multi-role */
324 const char *plt_fw_name;
325 const char *sr_fw_name;
326 const char *mr_fw_name;
327
328 /* per-chip-family private structure */
329 void *priv;
330
331 /* number of TX descriptors the HW supports. */
332 u32 num_tx_desc;
333
334 /* spare Tx blocks for normal/GEM operating modes */
335 u32 normal_tx_spare;
336 u32 gem_tx_spare;
337
338 /* translate HW Tx rates to standard rate-indices */
339 const u8 **band_rate_to_idx;
340
341 /* size of table for HW rates that can be received from chip */
342 u8 hw_tx_rate_tbl_size;
343
344 /* this HW rate and below are considered HT rates for this chip */
345 u8 hw_min_ht_rate;
346
347 /* HW HT (11n) capabilities */
348 struct ieee80211_sta_ht_cap ht_cap;
349
350 /* size of the private FW status data */
351 size_t fw_status_priv_len;
352};
353
354int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
355int __devexit wlcore_remove(struct platform_device *pdev);
356struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size);
357int wlcore_free_hw(struct wl1271 *wl);
358
359/* Firmware image load chunk size */
360#define CHUNK_SIZE 16384
361
362/* Quirks */
363
364/* Each RX/TX transaction requires an end-of-transaction transfer */
365#define WLCORE_QUIRK_END_OF_TRANSACTION BIT(0)
366
367/* wl127x and SPI don't support SDIO block size alignment */
368#define WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN BIT(2)
369
370/* means aggregated Rx packets are aligned to a SDIO block */
371#define WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN BIT(3)
372
373/* Older firmwares did not implement the FW logger over bus feature */
374#define WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED BIT(4)
375
376/* Older firmwares use an old NVS format */
377#define WLCORE_QUIRK_LEGACY_NVS BIT(5)
378
379/* Some firmwares may not support ELP */
380#define WLCORE_QUIRK_NO_ELP BIT(6)
381
382/* TODO: move to the lower drivers when all usages are abstracted */
383#define CHIP_ID_1271_PG10 (0x4030101)
384#define CHIP_ID_1271_PG20 (0x4030111)
385#define CHIP_ID_1283_PG10 (0x05030101)
386#define CHIP_ID_1283_PG20 (0x05030111)
387
388/* TODO: move all these common registers and values elsewhere */
389#define HW_ACCESS_ELP_CTRL_REG 0x1FFFC
390
391/* ELP register commands */
392#define ELPCTRL_WAKE_UP 0x1
393#define ELPCTRL_WAKE_UP_WLAN_READY 0x5
394#define ELPCTRL_SLEEP 0x0
395/* ELP WLAN_READY bit */
396#define ELPCTRL_WLAN_READY 0x2
397
398/*************************************************************************
399
400 Interrupt Trigger Register (Host -> WiLink)
401
402**************************************************************************/
403
404/* Hardware to Embedded CPU Interrupts - first 32-bit register set */
405
406/*
407 * The host sets this bit to inform the Wlan
408 * FW that a TX packet is in the XFER
409 * Buffer #0.
410 */
411#define INTR_TRIG_TX_PROC0 BIT(2)
412
413/*
414 * The host sets this bit to inform the FW
415 * that it read a packet from RX XFER
416 * Buffer #0.
417 */
418#define INTR_TRIG_RX_PROC0 BIT(3)
419
420#define INTR_TRIG_DEBUG_ACK BIT(4)
421
422#define INTR_TRIG_STATE_CHANGED BIT(5)
423
424/* Hardware to Embedded CPU Interrupts - second 32-bit register set */
425
426/*
427 * The host sets this bit to inform the FW
428 * that it read a packet from RX XFER
429 * Buffer #1.
430 */
431#define INTR_TRIG_RX_PROC1 BIT(17)
432
433/*
434 * The host sets this bit to inform the Wlan
435 * hardware that a TX packet is in the XFER
436 * Buffer #1.
437 */
438#define INTR_TRIG_TX_PROC1 BIT(18)
439
440#define ACX_SLV_SOFT_RESET_BIT BIT(1)
441#define SOFT_RESET_MAX_TIME 1000000
442#define SOFT_RESET_STALL_TIME 1000
443
444#define ECPU_CONTROL_HALT 0x00000101
445
446#define WELP_ARM_COMMAND_VAL 0x4
447
448#endif /* __WLCORE_H__ */
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
deleted file mode 100644
index af08c8609c63..000000000000
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ /dev/null
@@ -1,48 +0,0 @@
1menuconfig WL12XX_MENU
2 tristate "TI wl12xx driver support"
3 depends on MAC80211 && EXPERIMENTAL
4 ---help---
5 This will enable TI wl12xx driver support for the following chips:
6 wl1271, wl1273, wl1281 and wl1283.
7 The drivers make use of the mac80211 stack.
8
9config WL12XX
10 tristate "TI wl12xx support"
11 depends on WL12XX_MENU && GENERIC_HARDIRQS
12 depends on INET
13 select FW_LOADER
14 ---help---
15 This module adds support for wireless adapters based on TI wl1271 and
16 TI wl1273 chipsets. This module does *not* include support for wl1251.
17 For wl1251 support, use the separate homonymous driver instead.
18
19 If you choose to build a module, it will be called wl12xx. Say N if
20 unsure.
21
22config WL12XX_SPI
23 tristate "TI wl12xx SPI support"
24 depends on WL12XX && SPI_MASTER
25 select CRC7
26 ---help---
27 This module adds support for the SPI interface of adapters using
28 TI wl12xx chipsets. Select this if your platform is using
29 the SPI bus.
30
31 If you choose to build a module, it'll be called wl12xx_spi.
32 Say N if unsure.
33
34config WL12XX_SDIO
35 tristate "TI wl12xx SDIO support"
36 depends on WL12XX && MMC
37 ---help---
38 This module adds support for the SDIO interface of adapters using
39 TI wl12xx chipsets. Select this if your platform is using
40 the SDIO bus.
41
42 If you choose to build a module, it'll be called wl12xx_sdio.
43 Say N if unsure.
44
45config WL12XX_PLATFORM_DATA
46 bool
47 depends on WL12XX_SDIO != n || WL1251_SDIO != n
48 default y
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
deleted file mode 100644
index 98f289c907a9..000000000000
--- a/drivers/net/wireless/wl12xx/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
1wl12xx-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
2 boot.o init.o debugfs.o scan.o
3
4wl12xx_spi-objs = spi.o
5wl12xx_sdio-objs = sdio.o
6
7wl12xx-$(CONFIG_NL80211_TESTMODE) += testmode.o
8obj-$(CONFIG_WL12XX) += wl12xx.o
9obj-$(CONFIG_WL12XX_SPI) += wl12xx_spi.o
10obj-$(CONFIG_WL12XX_SDIO) += wl12xx_sdio.o
11
12# small builtin driver bit
13obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o
14
15ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
deleted file mode 100644
index 954101d03f06..000000000000
--- a/drivers/net/wireless/wl12xx/boot.c
+++ /dev/null
@@ -1,786 +0,0 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2008-2010 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/slab.h>
25#include <linux/wl12xx.h>
26#include <linux/export.h>
27
28#include "debug.h"
29#include "acx.h"
30#include "reg.h"
31#include "boot.h"
32#include "io.h"
33#include "event.h"
34#include "rx.h"
35
36static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
37{
38 u32 cpu_ctrl;
39
40 /* 10.5.0 run the firmware (I) */
41 cpu_ctrl = wl1271_read32(wl, ACX_REG_ECPU_CONTROL);
42
43 /* 10.5.1 run the firmware (II) */
44 cpu_ctrl |= flag;
45 wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
46}
47
48static unsigned int wl12xx_get_fw_ver_quirks(struct wl1271 *wl)
49{
50 unsigned int quirks = 0;
51 unsigned int *fw_ver = wl->chip.fw_ver;
52
53 /* Only new station firmwares support routing fw logs to the host */
54 if ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
55 (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_FWLOG_STA_MIN))
56 quirks |= WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED;
57
58 /* This feature is not yet supported for AP mode */
59 if (fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP)
60 quirks |= WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED;
61
62 return quirks;
63}
64
65static void wl1271_parse_fw_ver(struct wl1271 *wl)
66{
67 int ret;
68
69 ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u",
70 &wl->chip.fw_ver[0], &wl->chip.fw_ver[1],
71 &wl->chip.fw_ver[2], &wl->chip.fw_ver[3],
72 &wl->chip.fw_ver[4]);
73
74 if (ret != 5) {
75 wl1271_warning("fw version incorrect value");
76 memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver));
77 return;
78 }
79
80 /* Check if any quirks are needed with older fw versions */
81 wl->quirks |= wl12xx_get_fw_ver_quirks(wl);
82}
83
84static void wl1271_boot_fw_version(struct wl1271 *wl)
85{
86 struct wl1271_static_data static_data;
87
88 wl1271_read(wl, wl->cmd_box_addr, &static_data, sizeof(static_data),
89 false);
90
91 strncpy(wl->chip.fw_ver_str, static_data.fw_version,
92 sizeof(wl->chip.fw_ver_str));
93
94 /* make sure the string is NULL-terminated */
95 wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
96
97 wl1271_parse_fw_ver(wl);
98}
99
100static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
101 size_t fw_data_len, u32 dest)
102{
103 struct wl1271_partition_set partition;
104 int addr, chunk_num, partition_limit;
105 u8 *p, *chunk;
106
107 /* whal_FwCtrl_LoadFwImageSm() */
108
109 wl1271_debug(DEBUG_BOOT, "starting firmware upload");
110
111 wl1271_debug(DEBUG_BOOT, "fw_data_len %zd chunk_size %d",
112 fw_data_len, CHUNK_SIZE);
113
114 if ((fw_data_len % 4) != 0) {
115 wl1271_error("firmware length not multiple of four");
116 return -EIO;
117 }
118
119 chunk = kmalloc(CHUNK_SIZE, GFP_KERNEL);
120 if (!chunk) {
121 wl1271_error("allocation for firmware upload chunk failed");
122 return -ENOMEM;
123 }
124
125 memcpy(&partition, &wl12xx_part_table[PART_DOWN], sizeof(partition));
126 partition.mem.start = dest;
127 wl1271_set_partition(wl, &partition);
128
129 /* 10.1 set partition limit and chunk num */
130 chunk_num = 0;
131 partition_limit = wl12xx_part_table[PART_DOWN].mem.size;
132
133 while (chunk_num < fw_data_len / CHUNK_SIZE) {
134 /* 10.2 update partition, if needed */
135 addr = dest + (chunk_num + 2) * CHUNK_SIZE;
136 if (addr > partition_limit) {
137 addr = dest + chunk_num * CHUNK_SIZE;
138 partition_limit = chunk_num * CHUNK_SIZE +
139 wl12xx_part_table[PART_DOWN].mem.size;
140 partition.mem.start = addr;
141 wl1271_set_partition(wl, &partition);
142 }
143
144 /* 10.3 upload the chunk */
145 addr = dest + chunk_num * CHUNK_SIZE;
146 p = buf + chunk_num * CHUNK_SIZE;
147 memcpy(chunk, p, CHUNK_SIZE);
148 wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
149 p, addr);
150 wl1271_write(wl, addr, chunk, CHUNK_SIZE, false);
151
152 chunk_num++;
153 }
154
155 /* 10.4 upload the last chunk */
156 addr = dest + chunk_num * CHUNK_SIZE;
157 p = buf + chunk_num * CHUNK_SIZE;
158 memcpy(chunk, p, fw_data_len % CHUNK_SIZE);
159 wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x",
160 fw_data_len % CHUNK_SIZE, p, addr);
161 wl1271_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
162
163 kfree(chunk);
164 return 0;
165}
166
167static int wl1271_boot_upload_firmware(struct wl1271 *wl)
168{
169 u32 chunks, addr, len;
170 int ret = 0;
171 u8 *fw;
172
173 fw = wl->fw;
174 chunks = be32_to_cpup((__be32 *) fw);
175 fw += sizeof(u32);
176
177 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks);
178
179 while (chunks--) {
180 addr = be32_to_cpup((__be32 *) fw);
181 fw += sizeof(u32);
182 len = be32_to_cpup((__be32 *) fw);
183 fw += sizeof(u32);
184
185 if (len > 300000) {
186 wl1271_info("firmware chunk too long: %u", len);
187 return -EINVAL;
188 }
189 wl1271_debug(DEBUG_BOOT, "chunk %d addr 0x%x len %u",
190 chunks, addr, len);
191 ret = wl1271_boot_upload_firmware_chunk(wl, fw, len, addr);
192 if (ret != 0)
193 break;
194 fw += len;
195 }
196
197 return ret;
198}
199
200static int wl1271_boot_upload_nvs(struct wl1271 *wl)
201{
202 size_t nvs_len, burst_len;
203 int i;
204 u32 dest_addr, val;
205 u8 *nvs_ptr, *nvs_aligned;
206
207 if (wl->nvs == NULL)
208 return -ENODEV;
209
210 if (wl->chip.id == CHIP_ID_1283_PG20) {
211 struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
212
213 if (wl->nvs_len == sizeof(struct wl128x_nvs_file)) {
214 if (nvs->general_params.dual_mode_select)
215 wl->enable_11a = true;
216 } else {
217 wl1271_error("nvs size is not as expected: %zu != %zu",
218 wl->nvs_len,
219 sizeof(struct wl128x_nvs_file));
220 kfree(wl->nvs);
221 wl->nvs = NULL;
222 wl->nvs_len = 0;
223 return -EILSEQ;
224 }
225
226 /* only the first part of the NVS needs to be uploaded */
227 nvs_len = sizeof(nvs->nvs);
228 nvs_ptr = (u8 *)nvs->nvs;
229
230 } else {
231 struct wl1271_nvs_file *nvs =
232 (struct wl1271_nvs_file *)wl->nvs;
233 /*
234 * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz
235 * band configurations) can be removed when those NVS files stop
236 * floating around.
237 */
238 if (wl->nvs_len == sizeof(struct wl1271_nvs_file) ||
239 wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) {
240 if (nvs->general_params.dual_mode_select)
241 wl->enable_11a = true;
242 }
243
244 if (wl->nvs_len != sizeof(struct wl1271_nvs_file) &&
245 (wl->nvs_len != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
246 wl->enable_11a)) {
247 wl1271_error("nvs size is not as expected: %zu != %zu",
248 wl->nvs_len, sizeof(struct wl1271_nvs_file));
249 kfree(wl->nvs);
250 wl->nvs = NULL;
251 wl->nvs_len = 0;
252 return -EILSEQ;
253 }
254
255 /* only the first part of the NVS needs to be uploaded */
256 nvs_len = sizeof(nvs->nvs);
257 nvs_ptr = (u8 *) nvs->nvs;
258 }
259
260 /* update current MAC address to NVS */
261 nvs_ptr[11] = wl->addresses[0].addr[0];
262 nvs_ptr[10] = wl->addresses[0].addr[1];
263 nvs_ptr[6] = wl->addresses[0].addr[2];
264 nvs_ptr[5] = wl->addresses[0].addr[3];
265 nvs_ptr[4] = wl->addresses[0].addr[4];
266 nvs_ptr[3] = wl->addresses[0].addr[5];
267
268 /*
269 * Layout before the actual NVS tables:
270 * 1 byte : burst length.
271 * 2 bytes: destination address.
272 * n bytes: data to burst copy.
273 *
274 * This is ended by a 0 length, then the NVS tables.
275 */
276
277 /* FIXME: Do we need to check here whether the LSB is 1? */
278 while (nvs_ptr[0]) {
279 burst_len = nvs_ptr[0];
280 dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8));
281
282 /*
283 * Due to our new wl1271_translate_reg_addr function,
284 * we need to add the REGISTER_BASE to the destination
285 */
286 dest_addr += REGISTERS_BASE;
287
288 /* We move our pointer to the data */
289 nvs_ptr += 3;
290
291 for (i = 0; i < burst_len; i++) {
292 if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len)
293 goto out_badnvs;
294
295 val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
296 | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
297
298 wl1271_debug(DEBUG_BOOT,
299 "nvs burst write 0x%x: 0x%x",
300 dest_addr, val);
301 wl1271_write32(wl, dest_addr, val);
302
303 nvs_ptr += 4;
304 dest_addr += 4;
305 }
306
307 if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
308 goto out_badnvs;
309 }
310
311 /*
312 * We've reached the first zero length, the first NVS table
313 * is located at an aligned offset which is at least 7 bytes further.
314 * NOTE: The wl->nvs->nvs element must be first, in order to
315 * simplify the casting, we assume it is at the beginning of
316 * the wl->nvs structure.
317 */
318 nvs_ptr = (u8 *)wl->nvs +
319 ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4);
320
321 if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
322 goto out_badnvs;
323
324 nvs_len -= nvs_ptr - (u8 *)wl->nvs;
325
326 /* Now we must set the partition correctly */
327 wl1271_set_partition(wl, &wl12xx_part_table[PART_WORK]);
328
329 /* Copy the NVS tables to a new block to ensure alignment */
330 nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);
331 if (!nvs_aligned)
332 return -ENOMEM;
333
334 /* And finally we upload the NVS tables */
335 wl1271_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len, false);
336
337 kfree(nvs_aligned);
338 return 0;
339
340out_badnvs:
341 wl1271_error("nvs data is malformed");
342 return -EILSEQ;
343}
344
345static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
346{
347 wl1271_enable_interrupts(wl);
348 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
349 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
350 wl1271_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
351}
352
353static int wl1271_boot_soft_reset(struct wl1271 *wl)
354{
355 unsigned long timeout;
356 u32 boot_data;
357
358 /* perform soft reset */
359 wl1271_write32(wl, ACX_REG_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);
360
361 /* SOFT_RESET is self clearing */
362 timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
363 while (1) {
364 boot_data = wl1271_read32(wl, ACX_REG_SLV_SOFT_RESET);
365 wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
366 if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
367 break;
368
369 if (time_after(jiffies, timeout)) {
370 /* 1.2 check pWhalBus->uSelfClearTime if the
371 * timeout was reached */
372 wl1271_error("soft reset timeout");
373 return -1;
374 }
375
376 udelay(SOFT_RESET_STALL_TIME);
377 }
378
379 /* disable Rx/Tx */
380 wl1271_write32(wl, ENABLE, 0x0);
381
382 /* disable auto calibration on start*/
383 wl1271_write32(wl, SPARE_A2, 0xffff);
384
385 return 0;
386}
387
388static int wl1271_boot_run_firmware(struct wl1271 *wl)
389{
390 int loop, ret;
391 u32 chip_id, intr;
392
393 wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
394
395 chip_id = wl1271_read32(wl, CHIP_ID_B);
396
397 wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
398
399 if (chip_id != wl->chip.id) {
400 wl1271_error("chip id doesn't match after firmware boot");
401 return -EIO;
402 }
403
404 /* wait for init to complete */
405 loop = 0;
406 while (loop++ < INIT_LOOP) {
407 udelay(INIT_LOOP_DELAY);
408 intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
409
410 if (intr == 0xffffffff) {
411 wl1271_error("error reading hardware complete "
412 "init indication");
413 return -EIO;
414 }
415 /* check that ACX_INTR_INIT_COMPLETE is enabled */
416 else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) {
417 wl1271_write32(wl, ACX_REG_INTERRUPT_ACK,
418 WL1271_ACX_INTR_INIT_COMPLETE);
419 break;
420 }
421 }
422
423 if (loop > INIT_LOOP) {
424 wl1271_error("timeout waiting for the hardware to "
425 "complete initialization");
426 return -EIO;
427 }
428
429 /* get hardware config command mail box */
430 wl->cmd_box_addr = wl1271_read32(wl, REG_COMMAND_MAILBOX_PTR);
431
432 /* get hardware config event mail box */
433 wl->event_box_addr = wl1271_read32(wl, REG_EVENT_MAILBOX_PTR);
434
435 /* set the working partition to its "running" mode offset */
436 wl1271_set_partition(wl, &wl12xx_part_table[PART_WORK]);
437
438 wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x event_box_addr 0x%x",
439 wl->cmd_box_addr, wl->event_box_addr);
440
441 wl1271_boot_fw_version(wl);
442
443 /*
444 * in case of full asynchronous mode the firmware event must be
445 * ready to receive event from the command mailbox
446 */
447
448 /* unmask required mbox events */
449 wl->event_mask = BSS_LOSE_EVENT_ID |
450 SCAN_COMPLETE_EVENT_ID |
451 ROLE_STOP_COMPLETE_EVENT_ID |
452 RSSI_SNR_TRIGGER_0_EVENT_ID |
453 PSPOLL_DELIVERY_FAILURE_EVENT_ID |
454 SOFT_GEMINI_SENSE_EVENT_ID |
455 PERIODIC_SCAN_REPORT_EVENT_ID |
456 PERIODIC_SCAN_COMPLETE_EVENT_ID |
457 DUMMY_PACKET_EVENT_ID |
458 PEER_REMOVE_COMPLETE_EVENT_ID |
459 BA_SESSION_RX_CONSTRAINT_EVENT_ID |
460 REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
461 INACTIVE_STA_EVENT_ID |
462 MAX_TX_RETRY_EVENT_ID |
463 CHANNEL_SWITCH_COMPLETE_EVENT_ID;
464
465 ret = wl1271_event_unmask(wl);
466 if (ret < 0) {
467 wl1271_error("EVENT mask setting failed");
468 return ret;
469 }
470
471 wl1271_event_mbox_config(wl);
472
473 /* firmware startup completed */
474 return 0;
475}
476
477static int wl1271_boot_write_irq_polarity(struct wl1271 *wl)
478{
479 u32 polarity;
480
481 polarity = wl1271_top_reg_read(wl, OCP_REG_POLARITY);
482
483 /* We use HIGH polarity, so unset the LOW bit */
484 polarity &= ~POLARITY_LOW;
485 wl1271_top_reg_write(wl, OCP_REG_POLARITY, polarity);
486
487 return 0;
488}
489
490static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
491{
492 u16 spare_reg;
493
494 /* Mask bits [2] & [8:4] in the sys_clk_cfg register */
495 spare_reg = wl1271_top_reg_read(wl, WL_SPARE_REG);
496 if (spare_reg == 0xFFFF)
497 return -EFAULT;
498 spare_reg |= (BIT(3) | BIT(5) | BIT(6));
499 wl1271_top_reg_write(wl, WL_SPARE_REG, spare_reg);
500
501 /* Enable FREF_CLK_REQ & mux MCS and coex PLLs to FREF */
502 wl1271_top_reg_write(wl, SYS_CLK_CFG_REG,
503 WL_CLK_REQ_TYPE_PG2 | MCS_PLL_CLK_SEL_FREF);
504
505 /* Delay execution for 15msec, to let the HW settle */
506 mdelay(15);
507
508 return 0;
509}
510
511static bool wl128x_is_tcxo_valid(struct wl1271 *wl)
512{
513 u16 tcxo_detection;
514
515 tcxo_detection = wl1271_top_reg_read(wl, TCXO_CLK_DETECT_REG);
516 if (tcxo_detection & TCXO_DET_FAILED)
517 return false;
518
519 return true;
520}
521
522static bool wl128x_is_fref_valid(struct wl1271 *wl)
523{
524 u16 fref_detection;
525
526 fref_detection = wl1271_top_reg_read(wl, FREF_CLK_DETECT_REG);
527 if (fref_detection & FREF_CLK_DETECT_FAIL)
528 return false;
529
530 return true;
531}
532
533static int wl128x_manually_configure_mcs_pll(struct wl1271 *wl)
534{
535 wl1271_top_reg_write(wl, MCS_PLL_M_REG, MCS_PLL_M_REG_VAL);
536 wl1271_top_reg_write(wl, MCS_PLL_N_REG, MCS_PLL_N_REG_VAL);
537 wl1271_top_reg_write(wl, MCS_PLL_CONFIG_REG, MCS_PLL_CONFIG_REG_VAL);
538
539 return 0;
540}
541
542static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
543{
544 u16 spare_reg;
545 u16 pll_config;
546 u8 input_freq;
547
548 /* Mask bits [3:1] in the sys_clk_cfg register */
549 spare_reg = wl1271_top_reg_read(wl, WL_SPARE_REG);
550 if (spare_reg == 0xFFFF)
551 return -EFAULT;
552 spare_reg |= BIT(2);
553 wl1271_top_reg_write(wl, WL_SPARE_REG, spare_reg);
554
555 /* Handle special cases of the TCXO clock */
556 if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_8 ||
557 wl->tcxo_clock == WL12XX_TCXOCLOCK_33_6)
558 return wl128x_manually_configure_mcs_pll(wl);
559
560 /* Set the input frequency according to the selected clock source */
561 input_freq = (clk & 1) + 1;
562
563 pll_config = wl1271_top_reg_read(wl, MCS_PLL_CONFIG_REG);
564 if (pll_config == 0xFFFF)
565 return -EFAULT;
566 pll_config |= (input_freq << MCS_SEL_IN_FREQ_SHIFT);
567 pll_config |= MCS_PLL_ENABLE_HP;
568 wl1271_top_reg_write(wl, MCS_PLL_CONFIG_REG, pll_config);
569
570 return 0;
571}
572
573/*
574 * WL128x has two clocks input - TCXO and FREF.
575 * TCXO is the main clock of the device, while FREF is used to sync
576 * between the GPS and the cellular modem.
577 * In cases where TCXO is 32.736MHz or 16.368MHz, the FREF will be used
578 * as the WLAN/BT main clock.
579 */
580static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock)
581{
582 u16 sys_clk_cfg;
583
584 /* For XTAL-only modes, FREF will be used after switching from TCXO */
585 if (wl->ref_clock == WL12XX_REFCLOCK_26_XTAL ||
586 wl->ref_clock == WL12XX_REFCLOCK_38_XTAL) {
587 if (!wl128x_switch_tcxo_to_fref(wl))
588 return -EINVAL;
589 goto fref_clk;
590 }
591
592 /* Query the HW, to determine which clock source we should use */
593 sys_clk_cfg = wl1271_top_reg_read(wl, SYS_CLK_CFG_REG);
594 if (sys_clk_cfg == 0xFFFF)
595 return -EINVAL;
596 if (sys_clk_cfg & PRCM_CM_EN_MUX_WLAN_FREF)
597 goto fref_clk;
598
599 /* If TCXO is either 32.736MHz or 16.368MHz, switch to FREF */
600 if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_368 ||
601 wl->tcxo_clock == WL12XX_TCXOCLOCK_32_736) {
602 if (!wl128x_switch_tcxo_to_fref(wl))
603 return -EINVAL;
604 goto fref_clk;
605 }
606
607 /* TCXO clock is selected */
608 if (!wl128x_is_tcxo_valid(wl))
609 return -EINVAL;
610 *selected_clock = wl->tcxo_clock;
611 goto config_mcs_pll;
612
613fref_clk:
614 /* FREF clock is selected */
615 if (!wl128x_is_fref_valid(wl))
616 return -EINVAL;
617 *selected_clock = wl->ref_clock;
618
619config_mcs_pll:
620 return wl128x_configure_mcs_pll(wl, *selected_clock);
621}
622
623static int wl127x_boot_clk(struct wl1271 *wl)
624{
625 u32 pause;
626 u32 clk;
627
628 if (WL127X_PG_GET_MAJOR(wl->hw_pg_ver) < 3)
629 wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
630
631 if (wl->ref_clock == CONF_REF_CLK_19_2_E ||
632 wl->ref_clock == CONF_REF_CLK_38_4_E ||
633 wl->ref_clock == CONF_REF_CLK_38_4_M_XTAL)
634 /* ref clk: 19.2/38.4/38.4-XTAL */
635 clk = 0x3;
636 else if (wl->ref_clock == CONF_REF_CLK_26_E ||
637 wl->ref_clock == CONF_REF_CLK_52_E)
638 /* ref clk: 26/52 */
639 clk = 0x5;
640 else
641 return -EINVAL;
642
643 if (wl->ref_clock != CONF_REF_CLK_19_2_E) {
644 u16 val;
645 /* Set clock type (open drain) */
646 val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
647 val &= FREF_CLK_TYPE_BITS;
648 wl1271_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
649
650 /* Set clock pull mode (no pull) */
651 val = wl1271_top_reg_read(wl, OCP_REG_CLK_PULL);
652 val |= NO_PULL;
653 wl1271_top_reg_write(wl, OCP_REG_CLK_PULL, val);
654 } else {
655 u16 val;
656 /* Set clock polarity */
657 val = wl1271_top_reg_read(wl, OCP_REG_CLK_POLARITY);
658 val &= FREF_CLK_POLARITY_BITS;
659 val |= CLK_REQ_OUTN_SEL;
660 wl1271_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
661 }
662
663 wl1271_write32(wl, PLL_PARAMETERS, clk);
664
665 pause = wl1271_read32(wl, PLL_PARAMETERS);
666
667 wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause);
668
669 pause &= ~(WU_COUNTER_PAUSE_VAL);
670 pause |= WU_COUNTER_PAUSE_VAL;
671 wl1271_write32(wl, WU_COUNTER_PAUSE, pause);
672
673 return 0;
674}
675
676/* uploads NVS and firmware */
677int wl1271_load_firmware(struct wl1271 *wl)
678{
679 int ret = 0;
680 u32 tmp, clk;
681 int selected_clock = -1;
682
683 if (wl->chip.id == CHIP_ID_1283_PG20) {
684 ret = wl128x_boot_clk(wl, &selected_clock);
685 if (ret < 0)
686 goto out;
687 } else {
688 ret = wl127x_boot_clk(wl);
689 if (ret < 0)
690 goto out;
691 }
692
693 /* Continue the ELP wake up sequence */
694 wl1271_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
695 udelay(500);
696
697 wl1271_set_partition(wl, &wl12xx_part_table[PART_DRPW]);
698
699 /* Read-modify-write DRPW_SCRATCH_START register (see next state)
700 to be used by DRPw FW. The RTRIM value will be added by the FW
701 before taking DRPw out of reset */
702
703 wl1271_debug(DEBUG_BOOT, "DRPW_SCRATCH_START %08x", DRPW_SCRATCH_START);
704 clk = wl1271_read32(wl, DRPW_SCRATCH_START);
705
706 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
707
708 if (wl->chip.id == CHIP_ID_1283_PG20) {
709 clk |= ((selected_clock & 0x3) << 1) << 4;
710 } else {
711 clk |= (wl->ref_clock << 1) << 4;
712 }
713
714 wl1271_write32(wl, DRPW_SCRATCH_START, clk);
715
716 wl1271_set_partition(wl, &wl12xx_part_table[PART_WORK]);
717
718 /* Disable interrupts */
719 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
720
721 ret = wl1271_boot_soft_reset(wl);
722 if (ret < 0)
723 goto out;
724
725 /* 2. start processing NVS file */
726 ret = wl1271_boot_upload_nvs(wl);
727 if (ret < 0)
728 goto out;
729
730 /* write firmware's last address (ie. it's length) to
731 * ACX_EEPROMLESS_IND_REG */
732 wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG");
733
734 wl1271_write32(wl, ACX_EEPROMLESS_IND_REG, ACX_EEPROMLESS_IND_REG);
735
736 tmp = wl1271_read32(wl, CHIP_ID_B);
737
738 wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
739
740 /* 6. read the EEPROM parameters */
741 tmp = wl1271_read32(wl, SCR_PAD2);
742
743 /* WL1271: The reference driver skips steps 7 to 10 (jumps directly
744 * to upload_fw) */
745
746 if (wl->chip.id == CHIP_ID_1283_PG20)
747 wl1271_top_reg_write(wl, SDIO_IO_DS, wl->conf.hci_io_ds);
748
749 ret = wl1271_boot_upload_firmware(wl);
750 if (ret < 0)
751 goto out;
752
753out:
754 return ret;
755}
756EXPORT_SYMBOL_GPL(wl1271_load_firmware);
757
758int wl1271_boot(struct wl1271 *wl)
759{
760 int ret;
761
762 /* upload NVS and firmware */
763 ret = wl1271_load_firmware(wl);
764 if (ret)
765 return ret;
766
767 /* 10.5 start firmware */
768 ret = wl1271_boot_run_firmware(wl);
769 if (ret < 0)
770 goto out;
771
772 ret = wl1271_boot_write_irq_polarity(wl);
773 if (ret < 0)
774 goto out;
775
776 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
777 WL1271_ACX_ALL_EVENTS_VECTOR);
778
779 /* Enable firmware interrupts now */
780 wl1271_boot_enable_interrupts(wl);
781
782 wl1271_event_mbox_config(wl);
783
784out:
785 return ret;
786}
diff --git a/drivers/net/wireless/wl12xx/boot.h b/drivers/net/wireless/wl12xx/boot.h
deleted file mode 100644
index c3adc09f403d..000000000000
--- a/drivers/net/wireless/wl12xx/boot.h
+++ /dev/null
@@ -1,120 +0,0 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#ifndef __BOOT_H__
25#define __BOOT_H__
26
27#include "wl12xx.h"
28
29int wl1271_boot(struct wl1271 *wl);
30int wl1271_load_firmware(struct wl1271 *wl);
31
32#define WL1271_NO_SUBBANDS 8
33#define WL1271_NO_POWER_LEVELS 4
34#define WL1271_FW_VERSION_MAX_LEN 20
35
36struct wl1271_static_data {
37 u8 mac_address[ETH_ALEN];
38 u8 padding[2];
39 u8 fw_version[WL1271_FW_VERSION_MAX_LEN];
40 u32 hw_version;
41 u8 tx_power_table[WL1271_NO_SUBBANDS][WL1271_NO_POWER_LEVELS];
42};
43
44/* number of times we try to read the INIT interrupt */
45#define INIT_LOOP 20000
46
47/* delay between retries */
48#define INIT_LOOP_DELAY 50
49
50#define WU_COUNTER_PAUSE_VAL 0x3FF
51#define WELP_ARM_COMMAND_VAL 0x4
52
53#define OCP_REG_POLARITY 0x0064
54#define OCP_REG_CLK_TYPE 0x0448
55#define OCP_REG_CLK_POLARITY 0x0cb2
56#define OCP_REG_CLK_PULL 0x0cb4
57
58#define CMD_MBOX_ADDRESS 0x407B4
59
60#define POLARITY_LOW BIT(1)
61#define NO_PULL (BIT(14) | BIT(15))
62
63#define FREF_CLK_TYPE_BITS 0xfffffe7f
64#define CLK_REQ_PRCM 0x100
65#define FREF_CLK_POLARITY_BITS 0xfffff8ff
66#define CLK_REQ_OUTN_SEL 0x700
67
68/* PLL configuration algorithm for wl128x */
69#define SYS_CLK_CFG_REG 0x2200
70/* Bit[0] - 0-TCXO, 1-FREF */
71#define MCS_PLL_CLK_SEL_FREF BIT(0)
72/* Bit[3:2] - 01-TCXO, 10-FREF */
73#define WL_CLK_REQ_TYPE_FREF BIT(3)
74#define WL_CLK_REQ_TYPE_PG2 (BIT(3) | BIT(2))
75/* Bit[4] - 0-TCXO, 1-FREF */
76#define PRCM_CM_EN_MUX_WLAN_FREF BIT(4)
77
78#define TCXO_ILOAD_INT_REG 0x2264
79#define TCXO_CLK_DETECT_REG 0x2266
80
81#define TCXO_DET_FAILED BIT(4)
82
83#define FREF_ILOAD_INT_REG 0x2084
84#define FREF_CLK_DETECT_REG 0x2086
85#define FREF_CLK_DETECT_FAIL BIT(4)
86
87/* Use this reg for masking during driver access */
88#define WL_SPARE_REG 0x2320
89#define WL_SPARE_VAL BIT(2)
90/* Bit[6:5:3] - mask wl write SYS_CLK_CFG[8:5:2:4] */
91#define WL_SPARE_MASK_8526 (BIT(6) | BIT(5) | BIT(3))
92
93#define PLL_LOCK_COUNTERS_REG 0xD8C
94#define PLL_LOCK_COUNTERS_COEX 0x0F
95#define PLL_LOCK_COUNTERS_MCS 0xF0
96#define MCS_PLL_OVERRIDE_REG 0xD90
97#define MCS_PLL_CONFIG_REG 0xD92
98#define MCS_SEL_IN_FREQ_MASK 0x0070
99#define MCS_SEL_IN_FREQ_SHIFT 4
100#define MCS_PLL_CONFIG_REG_VAL 0x73
101#define MCS_PLL_ENABLE_HP (BIT(0) | BIT(1))
102
103#define MCS_PLL_M_REG 0xD94
104#define MCS_PLL_N_REG 0xD96
105#define MCS_PLL_M_REG_VAL 0xC8
106#define MCS_PLL_N_REG_VAL 0x07
107
108#define SDIO_IO_DS 0xd14
109
110/* SDIO/wSPI DS configuration values */
111enum {
112 HCI_IO_DS_8MA = 0,
113 HCI_IO_DS_4MA = 1, /* default */
114 HCI_IO_DS_6MA = 2,
115 HCI_IO_DS_2MA = 3,
116};
117
118/* end PLL configuration algorithm for wl128x */
119
120#endif
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index cb6204f78300..e6ec16d92e65 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -266,9 +266,13 @@ struct pn533 {
266 int in_maxlen; 266 int in_maxlen;
267 struct pn533_frame *in_frame; 267 struct pn533_frame *in_frame;
268 268
269 struct tasklet_struct tasklet; 269 struct sk_buff_head resp_q;
270 struct pn533_frame *tklt_in_frame; 270
271 int tklt_in_error; 271 struct workqueue_struct *wq;
272 struct work_struct cmd_work;
273 struct work_struct mi_work;
274 struct pn533_frame *wq_in_frame;
275 int wq_in_error;
272 276
273 pn533_cmd_complete_t cmd_complete; 277 pn533_cmd_complete_t cmd_complete;
274 void *cmd_complete_arg; 278 void *cmd_complete_arg;
@@ -383,15 +387,21 @@ static bool pn533_rx_frame_is_cmd_response(struct pn533_frame *frame, u8 cmd)
383 return (PN533_FRAME_CMD(frame) == PN533_CMD_RESPONSE(cmd)); 387 return (PN533_FRAME_CMD(frame) == PN533_CMD_RESPONSE(cmd));
384} 388}
385 389
386static void pn533_tasklet_cmd_complete(unsigned long arg) 390
391static void pn533_wq_cmd_complete(struct work_struct *work)
387{ 392{
388 struct pn533 *dev = (struct pn533 *) arg; 393 struct pn533 *dev = container_of(work, struct pn533, cmd_work);
389 struct pn533_frame *in_frame = dev->tklt_in_frame; 394 struct pn533_frame *in_frame;
390 int rc; 395 int rc;
391 396
392 if (dev->tklt_in_error) 397 if (dev == NULL)
398 return;
399
400 in_frame = dev->wq_in_frame;
401
402 if (dev->wq_in_error)
393 rc = dev->cmd_complete(dev, dev->cmd_complete_arg, NULL, 403 rc = dev->cmd_complete(dev, dev->cmd_complete_arg, NULL,
394 dev->tklt_in_error); 404 dev->wq_in_error);
395 else 405 else
396 rc = dev->cmd_complete(dev, dev->cmd_complete_arg, 406 rc = dev->cmd_complete(dev, dev->cmd_complete_arg,
397 PN533_FRAME_CMD_PARAMS_PTR(in_frame), 407 PN533_FRAME_CMD_PARAMS_PTR(in_frame),
@@ -406,7 +416,7 @@ static void pn533_recv_response(struct urb *urb)
406 struct pn533 *dev = urb->context; 416 struct pn533 *dev = urb->context;
407 struct pn533_frame *in_frame; 417 struct pn533_frame *in_frame;
408 418
409 dev->tklt_in_frame = NULL; 419 dev->wq_in_frame = NULL;
410 420
411 switch (urb->status) { 421 switch (urb->status) {
412 case 0: 422 case 0:
@@ -417,36 +427,36 @@ static void pn533_recv_response(struct urb *urb)
417 case -ESHUTDOWN: 427 case -ESHUTDOWN:
418 nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with" 428 nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with"
419 " status: %d", urb->status); 429 " status: %d", urb->status);
420 dev->tklt_in_error = urb->status; 430 dev->wq_in_error = urb->status;
421 goto sched_tasklet; 431 goto sched_wq;
422 default: 432 default:
423 nfc_dev_err(&dev->interface->dev, "Nonzero urb status received:" 433 nfc_dev_err(&dev->interface->dev, "Nonzero urb status received:"
424 " %d", urb->status); 434 " %d", urb->status);
425 dev->tklt_in_error = urb->status; 435 dev->wq_in_error = urb->status;
426 goto sched_tasklet; 436 goto sched_wq;
427 } 437 }
428 438
429 in_frame = dev->in_urb->transfer_buffer; 439 in_frame = dev->in_urb->transfer_buffer;
430 440
431 if (!pn533_rx_frame_is_valid(in_frame)) { 441 if (!pn533_rx_frame_is_valid(in_frame)) {
432 nfc_dev_err(&dev->interface->dev, "Received an invalid frame"); 442 nfc_dev_err(&dev->interface->dev, "Received an invalid frame");
433 dev->tklt_in_error = -EIO; 443 dev->wq_in_error = -EIO;
434 goto sched_tasklet; 444 goto sched_wq;
435 } 445 }
436 446
437 if (!pn533_rx_frame_is_cmd_response(in_frame, dev->cmd)) { 447 if (!pn533_rx_frame_is_cmd_response(in_frame, dev->cmd)) {
438 nfc_dev_err(&dev->interface->dev, "The received frame is not " 448 nfc_dev_err(&dev->interface->dev, "The received frame is not "
439 "response to the last command"); 449 "response to the last command");
440 dev->tklt_in_error = -EIO; 450 dev->wq_in_error = -EIO;
441 goto sched_tasklet; 451 goto sched_wq;
442 } 452 }
443 453
444 nfc_dev_dbg(&dev->interface->dev, "Received a valid frame"); 454 nfc_dev_dbg(&dev->interface->dev, "Received a valid frame");
445 dev->tklt_in_error = 0; 455 dev->wq_in_error = 0;
446 dev->tklt_in_frame = in_frame; 456 dev->wq_in_frame = in_frame;
447 457
448sched_tasklet: 458sched_wq:
449 tasklet_schedule(&dev->tasklet); 459 queue_work(dev->wq, &dev->cmd_work);
450} 460}
451 461
452static int pn533_submit_urb_for_response(struct pn533 *dev, gfp_t flags) 462static int pn533_submit_urb_for_response(struct pn533 *dev, gfp_t flags)
@@ -471,21 +481,21 @@ static void pn533_recv_ack(struct urb *urb)
471 case -ESHUTDOWN: 481 case -ESHUTDOWN:
472 nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with" 482 nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with"
473 " status: %d", urb->status); 483 " status: %d", urb->status);
474 dev->tklt_in_error = urb->status; 484 dev->wq_in_error = urb->status;
475 goto sched_tasklet; 485 goto sched_wq;
476 default: 486 default:
477 nfc_dev_err(&dev->interface->dev, "Nonzero urb status received:" 487 nfc_dev_err(&dev->interface->dev, "Nonzero urb status received:"
478 " %d", urb->status); 488 " %d", urb->status);
479 dev->tklt_in_error = urb->status; 489 dev->wq_in_error = urb->status;
480 goto sched_tasklet; 490 goto sched_wq;
481 } 491 }
482 492
483 in_frame = dev->in_urb->transfer_buffer; 493 in_frame = dev->in_urb->transfer_buffer;
484 494
485 if (!pn533_rx_frame_is_ack(in_frame)) { 495 if (!pn533_rx_frame_is_ack(in_frame)) {
486 nfc_dev_err(&dev->interface->dev, "Received an invalid ack"); 496 nfc_dev_err(&dev->interface->dev, "Received an invalid ack");
487 dev->tklt_in_error = -EIO; 497 dev->wq_in_error = -EIO;
488 goto sched_tasklet; 498 goto sched_wq;
489 } 499 }
490 500
491 nfc_dev_dbg(&dev->interface->dev, "Received a valid ack"); 501 nfc_dev_dbg(&dev->interface->dev, "Received a valid ack");
@@ -494,15 +504,15 @@ static void pn533_recv_ack(struct urb *urb)
494 if (rc) { 504 if (rc) {
495 nfc_dev_err(&dev->interface->dev, "usb_submit_urb failed with" 505 nfc_dev_err(&dev->interface->dev, "usb_submit_urb failed with"
496 " result %d", rc); 506 " result %d", rc);
497 dev->tklt_in_error = rc; 507 dev->wq_in_error = rc;
498 goto sched_tasklet; 508 goto sched_wq;
499 } 509 }
500 510
501 return; 511 return;
502 512
503sched_tasklet: 513sched_wq:
504 dev->tklt_in_frame = NULL; 514 dev->wq_in_frame = NULL;
505 tasklet_schedule(&dev->tasklet); 515 queue_work(dev->wq, &dev->cmd_work);
506} 516}
507 517
508static int pn533_submit_urb_for_ack(struct pn533 *dev, gfp_t flags) 518static int pn533_submit_urb_for_ack(struct pn533 *dev, gfp_t flags)
@@ -1249,6 +1259,8 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx)
1249 1259
1250 dev->tgt_active_prot = 0; 1260 dev->tgt_active_prot = 0;
1251 1261
1262 skb_queue_purge(&dev->resp_q);
1263
1252 pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_RELEASE); 1264 pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_RELEASE);
1253 1265
1254 tg = 1; 1266 tg = 1;
@@ -1447,11 +1459,49 @@ struct pn533_data_exchange_arg {
1447 void *cb_context; 1459 void *cb_context;
1448}; 1460};
1449 1461
1462static struct sk_buff *pn533_build_response(struct pn533 *dev)
1463{
1464 struct sk_buff *skb, *tmp, *t;
1465 unsigned int skb_len = 0, tmp_len = 0;
1466
1467 nfc_dev_dbg(&dev->interface->dev, "%s\n", __func__);
1468
1469 if (skb_queue_empty(&dev->resp_q))
1470 return NULL;
1471
1472 if (skb_queue_len(&dev->resp_q) == 1) {
1473 skb = skb_dequeue(&dev->resp_q);
1474 goto out;
1475 }
1476
1477 skb_queue_walk_safe(&dev->resp_q, tmp, t)
1478 skb_len += tmp->len;
1479
1480 nfc_dev_dbg(&dev->interface->dev, "%s total length %d\n",
1481 __func__, skb_len);
1482
1483 skb = alloc_skb(skb_len, GFP_KERNEL);
1484 if (skb == NULL)
1485 goto out;
1486
1487 skb_put(skb, skb_len);
1488
1489 skb_queue_walk_safe(&dev->resp_q, tmp, t) {
1490 memcpy(skb->data + tmp_len, tmp->data, tmp->len);
1491 tmp_len += tmp->len;
1492 }
1493
1494out:
1495 skb_queue_purge(&dev->resp_q);
1496
1497 return skb;
1498}
1499
1450static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg, 1500static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
1451 u8 *params, int params_len) 1501 u8 *params, int params_len)
1452{ 1502{
1453 struct pn533_data_exchange_arg *arg = _arg; 1503 struct pn533_data_exchange_arg *arg = _arg;
1454 struct sk_buff *skb_resp = arg->skb_resp; 1504 struct sk_buff *skb = NULL, *skb_resp = arg->skb_resp;
1455 struct pn533_frame *in_frame = (struct pn533_frame *) skb_resp->data; 1505 struct pn533_frame *in_frame = (struct pn533_frame *) skb_resp->data;
1456 int err = 0; 1506 int err = 0;
1457 u8 status; 1507 u8 status;
@@ -1459,15 +1509,13 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
1459 1509
1460 nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 1510 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1461 1511
1462 dev_kfree_skb_irq(arg->skb_out); 1512 dev_kfree_skb(arg->skb_out);
1463 1513
1464 if (params_len < 0) { /* error */ 1514 if (params_len < 0) { /* error */
1465 err = params_len; 1515 err = params_len;
1466 goto error; 1516 goto error;
1467 } 1517 }
1468 1518
1469 skb_put(skb_resp, PN533_FRAME_SIZE(in_frame));
1470
1471 status = params[0]; 1519 status = params[0];
1472 1520
1473 cmd_ret = status & PN533_CMD_RET_MASK; 1521 cmd_ret = status & PN533_CMD_RET_MASK;
@@ -1478,25 +1526,27 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
1478 goto error; 1526 goto error;
1479 } 1527 }
1480 1528
1529 skb_put(skb_resp, PN533_FRAME_SIZE(in_frame));
1530 skb_pull(skb_resp, PN533_CMD_DATAEXCH_HEAD_LEN);
1531 skb_trim(skb_resp, skb_resp->len - PN533_FRAME_TAIL_SIZE);
1532 skb_queue_tail(&dev->resp_q, skb_resp);
1533
1481 if (status & PN533_CMD_MI_MASK) { 1534 if (status & PN533_CMD_MI_MASK) {
1482 /* TODO: Implement support to multi-part data exchange */ 1535 queue_work(dev->wq, &dev->mi_work);
1483 nfc_dev_err(&dev->interface->dev, "Multi-part message not yet" 1536 return -EINPROGRESS;
1484 " supported");
1485 /* Prevent the other messages from controller */
1486 pn533_send_ack(dev, GFP_ATOMIC);
1487 err = -ENOSYS;
1488 goto error;
1489 } 1537 }
1490 1538
1491 skb_pull(skb_resp, PN533_CMD_DATAEXCH_HEAD_LEN); 1539 skb = pn533_build_response(dev);
1492 skb_trim(skb_resp, skb_resp->len - PN533_FRAME_TAIL_SIZE); 1540 if (skb == NULL)
1541 goto error;
1493 1542
1494 arg->cb(arg->cb_context, skb_resp, 0); 1543 arg->cb(arg->cb_context, skb, 0);
1495 kfree(arg); 1544 kfree(arg);
1496 return 0; 1545 return 0;
1497 1546
1498error: 1547error:
1499 dev_kfree_skb_irq(skb_resp); 1548 skb_queue_purge(&dev->resp_q);
1549 dev_kfree_skb(skb_resp);
1500 arg->cb(arg->cb_context, NULL, err); 1550 arg->cb(arg->cb_context, NULL, err);
1501 kfree(arg); 1551 kfree(arg);
1502 return 0; 1552 return 0;
@@ -1571,6 +1621,68 @@ error:
1571 return rc; 1621 return rc;
1572} 1622}
1573 1623
1624static void pn533_wq_mi_recv(struct work_struct *work)
1625{
1626 struct pn533 *dev = container_of(work, struct pn533, mi_work);
1627 struct sk_buff *skb_cmd;
1628 struct pn533_data_exchange_arg *arg = dev->cmd_complete_arg;
1629 struct pn533_frame *out_frame, *in_frame;
1630 struct sk_buff *skb_resp;
1631 int skb_resp_len;
1632 int rc;
1633
1634 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1635
1636 /* This is a zero payload size skb */
1637 skb_cmd = alloc_skb(PN533_CMD_DATAEXCH_HEAD_LEN + PN533_FRAME_TAIL_SIZE,
1638 GFP_KERNEL);
1639 if (skb_cmd == NULL)
1640 goto error_cmd;
1641
1642 skb_reserve(skb_cmd, PN533_CMD_DATAEXCH_HEAD_LEN);
1643
1644 rc = pn533_data_exchange_tx_frame(dev, skb_cmd);
1645 if (rc)
1646 goto error_frame;
1647
1648 skb_resp_len = PN533_CMD_DATAEXCH_HEAD_LEN +
1649 PN533_CMD_DATAEXCH_DATA_MAXLEN +
1650 PN533_FRAME_TAIL_SIZE;
1651 skb_resp = alloc_skb(skb_resp_len, GFP_KERNEL);
1652 if (!skb_resp) {
1653 rc = -ENOMEM;
1654 goto error_frame;
1655 }
1656
1657 in_frame = (struct pn533_frame *) skb_resp->data;
1658 out_frame = (struct pn533_frame *) skb_cmd->data;
1659
1660 arg->skb_resp = skb_resp;
1661 arg->skb_out = skb_cmd;
1662
1663 rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame,
1664 skb_resp_len,
1665 pn533_data_exchange_complete,
1666 dev->cmd_complete_arg, GFP_KERNEL);
1667 if (!rc)
1668 return;
1669
1670 nfc_dev_err(&dev->interface->dev, "Error %d when trying to"
1671 " perform data_exchange", rc);
1672
1673 kfree_skb(skb_resp);
1674
1675error_frame:
1676 kfree_skb(skb_cmd);
1677
1678error_cmd:
1679 pn533_send_ack(dev, GFP_KERNEL);
1680
1681 kfree(arg);
1682
1683 up(&dev->cmd_lock);
1684}
1685
1574static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata, 1686static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
1575 u8 cfgdata_len) 1687 u8 cfgdata_len)
1576{ 1688{
@@ -1668,7 +1780,15 @@ static int pn533_probe(struct usb_interface *interface,
1668 NULL, 0, 1780 NULL, 0,
1669 pn533_send_complete, dev); 1781 pn533_send_complete, dev);
1670 1782
1671 tasklet_init(&dev->tasklet, pn533_tasklet_cmd_complete, (ulong)dev); 1783 INIT_WORK(&dev->cmd_work, pn533_wq_cmd_complete);
1784 INIT_WORK(&dev->mi_work, pn533_wq_mi_recv);
1785 dev->wq = alloc_workqueue("pn533",
1786 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
1787 1);
1788 if (dev->wq == NULL)
1789 goto error;
1790
1791 skb_queue_head_init(&dev->resp_q);
1672 1792
1673 usb_set_intfdata(interface, dev); 1793 usb_set_intfdata(interface, dev);
1674 1794
@@ -1678,7 +1798,7 @@ static int pn533_probe(struct usb_interface *interface,
1678 rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame, 1798 rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame,
1679 dev->in_maxlen); 1799 dev->in_maxlen);
1680 if (rc) 1800 if (rc)
1681 goto kill_tasklet; 1801 goto destroy_wq;
1682 1802
1683 fw_ver = (struct pn533_fw_version *) 1803 fw_ver = (struct pn533_fw_version *)
1684 PN533_FRAME_CMD_PARAMS_PTR(dev->in_frame); 1804 PN533_FRAME_CMD_PARAMS_PTR(dev->in_frame);
@@ -1694,7 +1814,7 @@ static int pn533_probe(struct usb_interface *interface,
1694 PN533_CMD_DATAEXCH_HEAD_LEN, 1814 PN533_CMD_DATAEXCH_HEAD_LEN,
1695 PN533_FRAME_TAIL_SIZE); 1815 PN533_FRAME_TAIL_SIZE);
1696 if (!dev->nfc_dev) 1816 if (!dev->nfc_dev)
1697 goto kill_tasklet; 1817 goto destroy_wq;
1698 1818
1699 nfc_set_parent_dev(dev->nfc_dev, &interface->dev); 1819 nfc_set_parent_dev(dev->nfc_dev, &interface->dev);
1700 nfc_set_drvdata(dev->nfc_dev, dev); 1820 nfc_set_drvdata(dev->nfc_dev, dev);
@@ -1720,8 +1840,8 @@ static int pn533_probe(struct usb_interface *interface,
1720 1840
1721free_nfc_dev: 1841free_nfc_dev:
1722 nfc_free_device(dev->nfc_dev); 1842 nfc_free_device(dev->nfc_dev);
1723kill_tasklet: 1843destroy_wq:
1724 tasklet_kill(&dev->tasklet); 1844 destroy_workqueue(dev->wq);
1725error: 1845error:
1726 kfree(dev->in_frame); 1846 kfree(dev->in_frame);
1727 usb_free_urb(dev->in_urb); 1847 usb_free_urb(dev->in_urb);
@@ -1744,7 +1864,9 @@ static void pn533_disconnect(struct usb_interface *interface)
1744 usb_kill_urb(dev->in_urb); 1864 usb_kill_urb(dev->in_urb);
1745 usb_kill_urb(dev->out_urb); 1865 usb_kill_urb(dev->out_urb);
1746 1866
1747 tasklet_kill(&dev->tasklet); 1867 destroy_workqueue(dev->wq);
1868
1869 skb_queue_purge(&dev->resp_q);
1748 1870
1749 kfree(dev->in_frame); 1871 kfree(dev->in_frame);
1750 usb_free_urb(dev->in_urb); 1872 usb_free_urb(dev->in_urb);
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 483c0adcad87..2574abde8d99 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -45,6 +45,8 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
45 for (i=0; i<PHY_MAX_ADDR; i++) 45 for (i=0; i<PHY_MAX_ADDR; i++)
46 mdio->irq[i] = PHY_POLL; 46 mdio->irq[i] = PHY_POLL;
47 47
48 mdio->dev.of_node = np;
49
48 /* Register the MDIO bus */ 50 /* Register the MDIO bus */
49 rc = mdiobus_register(mdio); 51 rc = mdiobus_register(mdio);
50 if (rc) 52 if (rc)
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 8644d5372e7f..42cfcd9eb9aa 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -44,6 +44,7 @@
44#include <asm/ropes.h> 44#include <asm/ropes.h>
45#include <asm/mckinley.h> /* for proc_mckinley_root */ 45#include <asm/mckinley.h> /* for proc_mckinley_root */
46#include <asm/runway.h> /* for proc_runway_root */ 46#include <asm/runway.h> /* for proc_runway_root */
47#include <asm/page.h> /* for PAGE0 */
47#include <asm/pdc.h> /* for PDC_MODEL_* */ 48#include <asm/pdc.h> /* for PDC_MODEL_* */
48#include <asm/pdcpat.h> /* for is_pdc_pat() */ 49#include <asm/pdcpat.h> /* for is_pdc_pat() */
49#include <asm/parisc-device.h> 50#include <asm/parisc-device.h>
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 2c224edae1ac..01c001f3b766 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_UNICORE32) += setup-bus.o setup-irq.o
42obj-$(CONFIG_PARISC) += setup-bus.o 42obj-$(CONFIG_PARISC) += setup-bus.o
43obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o 43obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o
44obj-$(CONFIG_PPC) += setup-bus.o 44obj-$(CONFIG_PPC) += setup-bus.o
45obj-$(CONFIG_FRV) += setup-bus.o
45obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o 46obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
46obj-$(CONFIG_X86_VISWS) += setup-irq.o 47obj-$(CONFIG_X86_VISWS) += setup-irq.o
47obj-$(CONFIG_MN10300) += setup-bus.o 48obj-$(CONFIG_MN10300) += setup-bus.o
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 0f150f271c2a..61e2fefeedab 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -200,7 +200,7 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
200 return PCI_D1; 200 return PCI_D1;
201 case ACPI_STATE_D2: 201 case ACPI_STATE_D2:
202 return PCI_D2; 202 return PCI_D2;
203 case ACPI_STATE_D3: 203 case ACPI_STATE_D3_HOT:
204 return PCI_D3hot; 204 return PCI_D3hot;
205 case ACPI_STATE_D3_COLD: 205 case ACPI_STATE_D3_COLD:
206 return PCI_D3cold; 206 return PCI_D3cold;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 6279d5b85993..2a7521677541 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2626,6 +2626,18 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374,
2626DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375, 2626DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
2627 quirk_msi_intx_disable_bug); 2627 quirk_msi_intx_disable_bug);
2628 2628
2629DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1062,
2630 quirk_msi_intx_disable_bug);
2631DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1063,
2632 quirk_msi_intx_disable_bug);
2633DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2060,
2634 quirk_msi_intx_disable_bug);
2635DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2062,
2636 quirk_msi_intx_disable_bug);
2637DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1073,
2638 quirk_msi_intx_disable_bug);
2639DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1083,
2640 quirk_msi_intx_disable_bug);
2629#endif /* CONFIG_PCI_MSI */ 2641#endif /* CONFIG_PCI_MSI */
2630 2642
2631/* Allow manual resource allocation for PCI hotplug bridges 2643/* Allow manual resource allocation for PCI hotplug bridges
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index bc8384c6f3eb..639db4d0aa76 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -50,7 +50,7 @@
50 */ 50 */
51#undef START_IN_KERNEL_MODE 51#undef START_IN_KERNEL_MODE
52 52
53#define DRV_VER "0.5.24" 53#define DRV_VER "0.5.26"
54 54
55/* 55/*
56 * According to the Atom N270 datasheet, 56 * According to the Atom N270 datasheet,
@@ -83,8 +83,8 @@ static int kernelmode;
83#endif 83#endif
84 84
85static unsigned int interval = 10; 85static unsigned int interval = 10;
86static unsigned int fanon = 63000; 86static unsigned int fanon = 60000;
87static unsigned int fanoff = 58000; 87static unsigned int fanoff = 53000;
88static unsigned int verbose; 88static unsigned int verbose;
89static unsigned int fanstate = ACERHDF_FAN_AUTO; 89static unsigned int fanstate = ACERHDF_FAN_AUTO;
90static char force_bios[16]; 90static char force_bios[16];
@@ -150,6 +150,8 @@ static const struct bios_settings_t bios_tbl[] = {
150 {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00} }, 150 {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00} },
151 {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00} }, 151 {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00} },
152 {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00} }, 152 {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00} },
153 /* LT1005u */
154 {"Acer", "LT-10Q", "v0.3310", 0x55, 0x58, {0x20, 0x00} },
153 /* Acer 1410 */ 155 /* Acer 1410 */
154 {"Acer", "Aspire 1410", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, 156 {"Acer", "Aspire 1410", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
155 {"Acer", "Aspire 1410", "v0.3113", 0x55, 0x58, {0x9e, 0x00} }, 157 {"Acer", "Aspire 1410", "v0.3113", 0x55, 0x58, {0x9e, 0x00} },
@@ -161,6 +163,7 @@ static const struct bios_settings_t bios_tbl[] = {
161 {"Acer", "Aspire 1410", "v1.3303", 0x55, 0x58, {0x9e, 0x00} }, 163 {"Acer", "Aspire 1410", "v1.3303", 0x55, 0x58, {0x9e, 0x00} },
162 {"Acer", "Aspire 1410", "v1.3308", 0x55, 0x58, {0x9e, 0x00} }, 164 {"Acer", "Aspire 1410", "v1.3308", 0x55, 0x58, {0x9e, 0x00} },
163 {"Acer", "Aspire 1410", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, 165 {"Acer", "Aspire 1410", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
166 {"Acer", "Aspire 1410", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
164 /* Acer 1810xx */ 167 /* Acer 1810xx */
165 {"Acer", "Aspire 1810TZ", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, 168 {"Acer", "Aspire 1810TZ", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
166 {"Acer", "Aspire 1810T", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, 169 {"Acer", "Aspire 1810T", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
@@ -183,29 +186,44 @@ static const struct bios_settings_t bios_tbl[] = {
183 {"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, 186 {"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
184 {"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, 187 {"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
185 {"Acer", "Aspire 1810TZ", "v1.3314", 0x55, 0x58, {0x9e, 0x00} }, 188 {"Acer", "Aspire 1810TZ", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
189 {"Acer", "Aspire 1810T", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
186 /* Acer 531 */ 190 /* Acer 531 */
191 {"Acer", "AO531h", "v0.3104", 0x55, 0x58, {0x20, 0x00} },
187 {"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00} }, 192 {"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00} },
193 {"Acer", "AO531h", "v0.3304", 0x55, 0x58, {0x20, 0x00} },
194 /* Acer 751 */
195 {"Acer", "AO751h", "V0.3212", 0x55, 0x58, {0x21, 0x00} },
196 /* Acer 1825 */
197 {"Acer", "Aspire 1825PTZ", "V1.3118", 0x55, 0x58, {0x9e, 0x00} },
198 {"Acer", "Aspire 1825PTZ", "V1.3127", 0x55, 0x58, {0x9e, 0x00} },
199 /* Acer TravelMate 7730 */
200 {"Acer", "TravelMate 7730G", "v0.3509", 0x55, 0x58, {0xaf, 0x00} },
188 /* Gateway */ 201 /* Gateway */
189 {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} }, 202 {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} },
190 {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} }, 203 {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} },
191 {"Gateway", "LT31", "v1.3103", 0x55, 0x58, {0x9e, 0x00} }, 204 {"Gateway", "LT31", "v1.3103", 0x55, 0x58, {0x9e, 0x00} },
192 {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00} }, 205 {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00} },
193 {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00} }, 206 {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00} },
207 {"Gateway", "LT31", "v1.3303t", 0x55, 0x58, {0x9e, 0x00} },
194 /* Packard Bell */ 208 /* Packard Bell */
195 {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} }, 209 {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} },
196 {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} }, 210 {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} },
197 {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} }, 211 {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} },
198 {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} }, 212 {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} },
199 {"Packard Bell", "DOTMU", "v1.3303", 0x55, 0x58, {0x9e, 0x00} }, 213 {"Packard Bell", "ENBFT", "V1.3118", 0x55, 0x58, {0x9e, 0x00} },
200 {"Packard Bell", "DOTMU", "v0.3120", 0x55, 0x58, {0x9e, 0x00} }, 214 {"Packard Bell", "ENBFT", "V1.3127", 0x55, 0x58, {0x9e, 0x00} },
201 {"Packard Bell", "DOTMU", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, 215 {"Packard Bell", "DOTMU", "v1.3303", 0x55, 0x58, {0x9e, 0x00} },
202 {"Packard Bell", "DOTMU", "v0.3113", 0x55, 0x58, {0x9e, 0x00} }, 216 {"Packard Bell", "DOTMU", "v0.3120", 0x55, 0x58, {0x9e, 0x00} },
203 {"Packard Bell", "DOTMU", "v0.3115", 0x55, 0x58, {0x9e, 0x00} }, 217 {"Packard Bell", "DOTMU", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
204 {"Packard Bell", "DOTMU", "v0.3117", 0x55, 0x58, {0x9e, 0x00} }, 218 {"Packard Bell", "DOTMU", "v0.3113", 0x55, 0x58, {0x9e, 0x00} },
205 {"Packard Bell", "DOTMU", "v0.3119", 0x55, 0x58, {0x9e, 0x00} }, 219 {"Packard Bell", "DOTMU", "v0.3115", 0x55, 0x58, {0x9e, 0x00} },
206 {"Packard Bell", "DOTMU", "v1.3204", 0x55, 0x58, {0x9e, 0x00} }, 220 {"Packard Bell", "DOTMU", "v0.3117", 0x55, 0x58, {0x9e, 0x00} },
207 {"Packard Bell", "DOTMA", "v1.3201", 0x55, 0x58, {0x9e, 0x00} }, 221 {"Packard Bell", "DOTMU", "v0.3119", 0x55, 0x58, {0x9e, 0x00} },
208 {"Packard Bell", "DOTMA", "v1.3302", 0x55, 0x58, {0x9e, 0x00} }, 222 {"Packard Bell", "DOTMU", "v1.3204", 0x55, 0x58, {0x9e, 0x00} },
223 {"Packard Bell", "DOTMA", "v1.3201", 0x55, 0x58, {0x9e, 0x00} },
224 {"Packard Bell", "DOTMA", "v1.3302", 0x55, 0x58, {0x9e, 0x00} },
225 {"Packard Bell", "DOTMA", "v1.3303t", 0x55, 0x58, {0x9e, 0x00} },
226 {"Packard Bell", "DOTVR46", "v1.3308", 0x55, 0x58, {0x9e, 0x00} },
209 /* pewpew-terminator */ 227 /* pewpew-terminator */
210 {"", "", "", 0, 0, {0, 0} } 228 {"", "", "", 0, 0, {0, 0} }
211}; 229};
@@ -701,15 +719,20 @@ MODULE_LICENSE("GPL");
701MODULE_AUTHOR("Peter Feuerer"); 719MODULE_AUTHOR("Peter Feuerer");
702MODULE_DESCRIPTION("Aspire One temperature and fan driver"); 720MODULE_DESCRIPTION("Aspire One temperature and fan driver");
703MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:"); 721MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:");
722MODULE_ALIAS("dmi:*:*Acer*:pnAO751h*:");
704MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1410*:"); 723MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1410*:");
705MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1810*:"); 724MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1810*:");
725MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1825PTZ:");
706MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:"); 726MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:");
727MODULE_ALIAS("dmi:*:*Acer*:TravelMate*7730G:");
707MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:"); 728MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:");
708MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:"); 729MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:");
709MODULE_ALIAS("dmi:*:*Packard*Bell*:pnAOA*:"); 730MODULE_ALIAS("dmi:*:*Packard*Bell*:pnAOA*:");
710MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOA*:"); 731MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOA*:");
711MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMU*:"); 732MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMU*:");
733MODULE_ALIAS("dmi:*:*Packard*Bell*:pnENBFT*:");
712MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMA*:"); 734MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMA*:");
735MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTVR46*:");
713 736
714module_init(acerhdf_init); 737module_init(acerhdf_init);
715module_exit(acerhdf_exit); 738module_exit(acerhdf_exit);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index a05fc9c955d8..e6c08ee8d46c 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -212,6 +212,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = {
212 }, 212 },
213 .driver_data = &quirk_dell_vostro_v130, 213 .driver_data = &quirk_dell_vostro_v130,
214 }, 214 },
215 { }
215}; 216};
216 217
217static struct calling_interface_buffer *buffer; 218static struct calling_interface_buffer *buffer;
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index f7ba316e0ed6..0ffdb3cde2bb 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -1565,7 +1565,7 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
1565 ips->poll_turbo_status = true; 1565 ips->poll_turbo_status = true;
1566 1566
1567 if (!ips_get_i915_syms(ips)) { 1567 if (!ips_get_i915_syms(ips)) {
1568 dev_err(&dev->dev, "failed to get i915 symbols, graphics turbo disabled\n"); 1568 dev_info(&dev->dev, "failed to get i915 symbols, graphics turbo disabled until i915 loads\n");
1569 ips->gpu_turbo_enabled = false; 1569 ips->gpu_turbo_enabled = false;
1570 } else { 1570 } else {
1571 dev_dbg(&dev->dev, "graphics turbo enabled\n"); 1571 dev_dbg(&dev->dev, "graphics turbo enabled\n");
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 0a3594c7e912..bcbad8452a6f 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -78,7 +78,7 @@ static int __devinit mfld_pb_probe(struct platform_device *pdev)
78 78
79 input_set_capability(input, EV_KEY, KEY_POWER); 79 input_set_capability(input, EV_KEY, KEY_POWER);
80 80
81 error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0, 81 error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_NO_SUSPEND,
82 DRIVER_NAME, input); 82 DRIVER_NAME, input);
83 if (error) { 83 if (error) {
84 dev_err(&pdev->dev, "Unable to request irq %d for mfld power" 84 dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index cd9bc3b129bc..5648dad71fb3 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -78,9 +78,13 @@ config PTP_1588_CLOCK_PCH
78 depends on PCH_GBE 78 depends on PCH_GBE
79 help 79 help
80 This driver adds support for using the PCH EG20T as a PTP 80 This driver adds support for using the PCH EG20T as a PTP
81 clock. This clock is only useful if your PTP programs are 81 clock. The hardware supports time stamping of PTP packets
82 getting hardware time stamps on the PTP Ethernet packets 82 when using the end-to-end delay (E2E) mechansim. The peer
83 using the SO_TIMESTAMPING API. 83 delay mechansim (P2P) is not supported.
84
85 This clock is only useful if your PTP programs are getting
86 hardware time stamps on the PTP Ethernet packets using the
87 SO_TIMESTAMPING API.
84 88
85 To compile this driver as a module, choose M here: the module 89 To compile this driver as a module, choose M here: the module
86 will be called ptp_pch. 90 will be called ptp_pch.
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index f519a131238d..1e528b539a07 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -304,6 +304,12 @@ void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
304} 304}
305EXPORT_SYMBOL(ptp_clock_event); 305EXPORT_SYMBOL(ptp_clock_event);
306 306
307int ptp_clock_index(struct ptp_clock *ptp)
308{
309 return ptp->index;
310}
311EXPORT_SYMBOL(ptp_clock_index);
312
307/* module operations */ 313/* module operations */
308 314
309static void __exit ptp_exit(void) 315static void __exit ptp_exit(void)
diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/ptp/ptp_ixp46x.c
index 6f2782bb5f41..e03c40692b00 100644
--- a/drivers/ptp/ptp_ixp46x.c
+++ b/drivers/ptp/ptp_ixp46x.c
@@ -284,6 +284,7 @@ static void __exit ptp_ixp_exit(void)
284{ 284{
285 free_irq(MASTER_IRQ, &ixp_clock); 285 free_irq(MASTER_IRQ, &ixp_clock);
286 free_irq(SLAVE_IRQ, &ixp_clock); 286 free_irq(SLAVE_IRQ, &ixp_clock);
287 ixp46x_phc_index = -1;
287 ptp_clock_unregister(ixp_clock.ptp_clock); 288 ptp_clock_unregister(ixp_clock.ptp_clock);
288} 289}
289 290
@@ -302,6 +303,8 @@ static int __init ptp_ixp_init(void)
302 if (IS_ERR(ixp_clock.ptp_clock)) 303 if (IS_ERR(ixp_clock.ptp_clock))
303 return PTR_ERR(ixp_clock.ptp_clock); 304 return PTR_ERR(ixp_clock.ptp_clock);
304 305
306 ixp46x_phc_index = ptp_clock_index(ixp_clock.ptp_clock);
307
305 __raw_writel(DEFAULT_ADDEND, &ixp_clock.regs->addend); 308 __raw_writel(DEFAULT_ADDEND, &ixp_clock.regs->addend);
306 __raw_writel(1, &ixp_clock.regs->trgt_lo); 309 __raw_writel(1, &ixp_clock.regs->trgt_lo);
307 __raw_writel(0, &ixp_clock.regs->trgt_hi); 310 __raw_writel(0, &ixp_clock.regs->trgt_hi);
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
index 375eb04c16ea..3a9c17eced10 100644
--- a/drivers/ptp/ptp_pch.c
+++ b/drivers/ptp/ptp_pch.c
@@ -30,6 +30,7 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/pci.h> 31#include <linux/pci.h>
32#include <linux/ptp_clock_kernel.h> 32#include <linux/ptp_clock_kernel.h>
33#include <linux/slab.h>
33 34
34#define STATION_ADDR_LEN 20 35#define STATION_ADDR_LEN 20
35#define PCI_DEVICE_ID_PCH_1588 0x8819 36#define PCI_DEVICE_ID_PCH_1588 0x8819
@@ -261,6 +262,7 @@ u64 pch_rx_snap_read(struct pci_dev *pdev)
261 262
262 ns = ((u64) hi) << 32; 263 ns = ((u64) hi) << 32;
263 ns |= lo; 264 ns |= lo;
265 ns <<= TICKS_NS_SHIFT;
264 266
265 return ns; 267 return ns;
266} 268}
@@ -277,6 +279,7 @@ u64 pch_tx_snap_read(struct pci_dev *pdev)
277 279
278 ns = ((u64) hi) << 32; 280 ns = ((u64) hi) << 32;
279 ns |= lo; 281 ns |= lo;
282 ns <<= TICKS_NS_SHIFT;
280 283
281 return ns; 284 return ns;
282} 285}
@@ -306,7 +309,7 @@ static void pch_reset(struct pch_dev *chip)
306 * traffic on the ethernet interface 309 * traffic on the ethernet interface
307 * @addr: dress which contain the column separated address to be used. 310 * @addr: dress which contain the column separated address to be used.
308 */ 311 */
309static int pch_set_station_address(u8 *addr, struct pci_dev *pdev) 312int pch_set_station_address(u8 *addr, struct pci_dev *pdev)
310{ 313{
311 s32 i; 314 s32 i;
312 struct pch_dev *chip = pci_get_drvdata(pdev); 315 struct pch_dev *chip = pci_get_drvdata(pdev);
@@ -350,6 +353,7 @@ static int pch_set_station_address(u8 *addr, struct pci_dev *pdev)
350 } 353 }
351 return 0; 354 return 0;
352} 355}
356EXPORT_SYMBOL(pch_set_station_address);
353 357
354/* 358/*
355 * Interrupt service routine 359 * Interrupt service routine
@@ -649,8 +653,6 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
649 iowrite32(1, &chip->regs->trgt_lo); 653 iowrite32(1, &chip->regs->trgt_lo);
650 iowrite32(0, &chip->regs->trgt_hi); 654 iowrite32(0, &chip->regs->trgt_hi);
651 iowrite32(PCH_TSE_TTIPEND, &chip->regs->event); 655 iowrite32(PCH_TSE_TTIPEND, &chip->regs->event);
652 /* Version: IEEE1588 v1 and IEEE1588-2008, Mode: All Evwnt, Locked */
653 iowrite32(0x80020000, &chip->regs->ch_control);
654 656
655 pch_eth_enable_set(chip); 657 pch_eth_enable_set(chip);
656 658
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index e70dd382a009..046fb1bd8619 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1431,7 +1431,10 @@ void devm_regulator_put(struct regulator *regulator)
1431 1431
1432 rc = devres_destroy(regulator->dev, devm_regulator_release, 1432 rc = devres_destroy(regulator->dev, devm_regulator_release,
1433 devm_regulator_match, regulator); 1433 devm_regulator_match, regulator);
1434 WARN_ON(rc); 1434 if (rc == 0)
1435 regulator_put(regulator);
1436 else
1437 WARN_ON(rc);
1435} 1438}
1436EXPORT_SYMBOL_GPL(devm_regulator_put); 1439EXPORT_SYMBOL_GPL(devm_regulator_put);
1437 1440
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 96579296f04d..17a58c56eebf 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -684,7 +684,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
684 } 684 }
685 685
686 new_val++; 686 new_val++;
687 } while (desc->min + desc->step + new_val <= desc->max); 687 } while (desc->min + desc->step * new_val <= desc->max);
688 688
689 new_idx = tmp_idx; 689 new_idx = tmp_idx;
690 new_val = tmp_val; 690 new_val = tmp_val;
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index ee15c68fb519..e756a0df3664 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -354,7 +354,7 @@ static void __rproc_free_vrings(struct rproc_vdev *rvdev, int i)
354{ 354{
355 struct rproc *rproc = rvdev->rproc; 355 struct rproc *rproc = rvdev->rproc;
356 356
357 for (i--; i > 0; i--) { 357 for (i--; i >= 0; i--) {
358 struct rproc_vring *rvring = &rvdev->vring[i]; 358 struct rproc_vring *rvring = &rvdev->vring[i];
359 int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align)); 359 int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
360 360
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index cd188ab72f79..c293d0cdb104 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -902,6 +902,7 @@ read_rtc:
902 } 902 }
903 ds1307->nvram->attr.name = "nvram"; 903 ds1307->nvram->attr.name = "nvram";
904 ds1307->nvram->attr.mode = S_IRUGO | S_IWUSR; 904 ds1307->nvram->attr.mode = S_IRUGO | S_IWUSR;
905 sysfs_bin_attr_init(ds1307->nvram);
905 ds1307->nvram->read = ds1307_nvram_read, 906 ds1307->nvram->read = ds1307_nvram_read,
906 ds1307->nvram->write = ds1307_nvram_write, 907 ds1307->nvram->write = ds1307_nvram_write,
907 ds1307->nvram->size = chip->nvram_size; 908 ds1307->nvram->size = chip->nvram_size;
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 42f5f829b3ee..029e421baaed 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -360,12 +360,11 @@ static int __devinit mpc5121_rtc_probe(struct platform_device *op)
360 &mpc5200_rtc_ops, THIS_MODULE); 360 &mpc5200_rtc_ops, THIS_MODULE);
361 } 361 }
362 362
363 rtc->rtc->uie_unsupported = 1;
364
365 if (IS_ERR(rtc->rtc)) { 363 if (IS_ERR(rtc->rtc)) {
366 err = PTR_ERR(rtc->rtc); 364 err = PTR_ERR(rtc->rtc);
367 goto out_free_irq; 365 goto out_free_irq;
368 } 366 }
367 rtc->rtc->uie_unsupported = 1;
369 368
370 return 0; 369 return 0;
371 370
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 684ef4bbfce4..f027c063fb20 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -312,6 +312,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
312 int ret; 312 int ret;
313 struct pl031_local *ldata; 313 struct pl031_local *ldata;
314 struct rtc_class_ops *ops = id->data; 314 struct rtc_class_ops *ops = id->data;
315 unsigned long time;
315 316
316 ret = amba_request_regions(adev, NULL); 317 ret = amba_request_regions(adev, NULL);
317 if (ret) 318 if (ret)
@@ -343,6 +344,23 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
343 writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN, 344 writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
344 ldata->base + RTC_CR); 345 ldata->base + RTC_CR);
345 346
347 /*
348 * On ST PL031 variants, the RTC reset value does not provide correct
349 * weekday for 2000-01-01. Correct the erroneous sunday to saturday.
350 */
351 if (ldata->hw_designer == AMBA_VENDOR_ST) {
352 if (readl(ldata->base + RTC_YDR) == 0x2000) {
353 time = readl(ldata->base + RTC_DR);
354 if ((time &
355 (RTC_MON_MASK | RTC_MDAY_MASK | RTC_WDAY_MASK))
356 == 0x02120000) {
357 time = time | (0x7 << RTC_WDAY_SHIFT);
358 writel(0x2000, ldata->base + RTC_YLR);
359 writel(time, ldata->base + RTC_LR);
360 }
361 }
362 }
363
346 ldata->rtc = rtc_device_register("pl031", &adev->dev, ops, 364 ldata->rtc = rtc_device_register("pl031", &adev->dev, ops,
347 THIS_MODULE); 365 THIS_MODULE);
348 if (IS_ERR(ldata->rtc)) { 366 if (IS_ERR(ldata->rtc)) {
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 231a1d85127b..36506366158d 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -352,7 +352,17 @@ out:
352 352
353static int sclp_assign_storage(u16 rn) 353static int sclp_assign_storage(u16 rn)
354{ 354{
355 return do_assign_storage(0x000d0001, rn); 355 unsigned long long start, address;
356 int rc;
357
358 rc = do_assign_storage(0x000d0001, rn);
359 if (rc)
360 goto out;
361 start = address = rn2addr(rn);
362 for (; address < start + rzm; address += PAGE_SIZE)
363 page_set_storage_key(address, PAGE_DEFAULT_KEY, 0);
364out:
365 return rc;
356} 366}
357 367
358static int sclp_unassign_storage(u16 rn) 368static int sclp_unassign_storage(u16 rn)
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 267b54e8ff5a..bc6c7cfd36b6 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -154,12 +154,6 @@ struct tape_discipline {
154 struct tape_request *(*read_block)(struct tape_device *, size_t); 154 struct tape_request *(*read_block)(struct tape_device *, size_t);
155 struct tape_request *(*write_block)(struct tape_device *, size_t); 155 struct tape_request *(*write_block)(struct tape_device *, size_t);
156 void (*process_eov)(struct tape_device*); 156 void (*process_eov)(struct tape_device*);
157#ifdef CONFIG_S390_TAPE_BLOCK
158 /* Block device stuff. */
159 struct tape_request *(*bread)(struct tape_device *, struct request *);
160 void (*check_locate)(struct tape_device *, struct tape_request *);
161 void (*free_bread)(struct tape_request *);
162#endif
163 /* ioctl function for additional ioctls. */ 157 /* ioctl function for additional ioctls. */
164 int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long); 158 int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long);
165 /* Array of tape commands with TAPE_NR_MTOPS entries */ 159 /* Array of tape commands with TAPE_NR_MTOPS entries */
@@ -182,26 +176,6 @@ struct tape_char_data {
182 int block_size; /* of size block_size. */ 176 int block_size; /* of size block_size. */
183}; 177};
184 178
185#ifdef CONFIG_S390_TAPE_BLOCK
186/* Block Frontend Data */
187struct tape_blk_data
188{
189 struct tape_device * device;
190 /* Block device request queue. */
191 struct request_queue * request_queue;
192 spinlock_t request_queue_lock;
193
194 /* Task to move entries from block request to CCS request queue. */
195 struct work_struct requeue_task;
196 atomic_t requeue_scheduled;
197
198 /* Current position on the tape. */
199 long block_position;
200 int medium_changed;
201 struct gendisk * disk;
202};
203#endif
204
205/* Tape Info */ 179/* Tape Info */
206struct tape_device { 180struct tape_device {
207 /* entry in tape_device_list */ 181 /* entry in tape_device_list */
@@ -248,10 +222,6 @@ struct tape_device {
248 222
249 /* Character device frontend data */ 223 /* Character device frontend data */
250 struct tape_char_data char_data; 224 struct tape_char_data char_data;
251#ifdef CONFIG_S390_TAPE_BLOCK
252 /* Block dev frontend data */
253 struct tape_blk_data blk_data;
254#endif
255 225
256 /* Function to start or stop the next request later. */ 226 /* Function to start or stop the next request later. */
257 struct delayed_work tape_dnr; 227 struct delayed_work tape_dnr;
@@ -313,19 +283,6 @@ extern void tapechar_exit(void);
313extern int tapechar_setup_device(struct tape_device *); 283extern int tapechar_setup_device(struct tape_device *);
314extern void tapechar_cleanup_device(struct tape_device *); 284extern void tapechar_cleanup_device(struct tape_device *);
315 285
316/* Externals from tape_block.c */
317#ifdef CONFIG_S390_TAPE_BLOCK
318extern int tapeblock_init (void);
319extern void tapeblock_exit(void);
320extern int tapeblock_setup_device(struct tape_device *);
321extern void tapeblock_cleanup_device(struct tape_device *);
322#else
323static inline int tapeblock_init (void) {return 0;}
324static inline void tapeblock_exit (void) {;}
325static inline int tapeblock_setup_device(struct tape_device *t) {return 0;}
326static inline void tapeblock_cleanup_device (struct tape_device *t) {;}
327#endif
328
329/* tape initialisation functions */ 286/* tape initialisation functions */
330#ifdef CONFIG_PROC_FS 287#ifdef CONFIG_PROC_FS
331extern void tape_proc_init (void); 288extern void tape_proc_init (void);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 934ef33eb9a4..b28de80b7ca4 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -323,20 +323,6 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
323 inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0; 323 inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0;
324 sense = irb->ecw; 324 sense = irb->ecw;
325 325
326#ifdef CONFIG_S390_TAPE_BLOCK
327 if (request->op == TO_BLOCK) {
328 /*
329 * Recovery for block device requests. Set the block_position
330 * to something invalid and retry.
331 */
332 device->blk_data.block_position = -1;
333 if (request->retries-- <= 0)
334 return tape_34xx_erp_failed(request, -EIO);
335 else
336 return tape_34xx_erp_retry(request);
337 }
338#endif
339
340 if ( 326 if (
341 sense[0] & SENSE_COMMAND_REJECT && 327 sense[0] & SENSE_COMMAND_REJECT &&
342 sense[1] & SENSE_WRITE_PROTECT 328 sense[1] & SENSE_WRITE_PROTECT
@@ -1129,123 +1115,6 @@ tape_34xx_mtseek(struct tape_device *device, int mt_count)
1129 return tape_do_io_free(device, request); 1115 return tape_do_io_free(device, request);
1130} 1116}
1131 1117
1132#ifdef CONFIG_S390_TAPE_BLOCK
1133/*
1134 * Tape block read for 34xx.
1135 */
1136static struct tape_request *
1137tape_34xx_bread(struct tape_device *device, struct request *req)
1138{
1139 struct tape_request *request;
1140 struct ccw1 *ccw;
1141 int count = 0;
1142 unsigned off;
1143 char *dst;
1144 struct bio_vec *bv;
1145 struct req_iterator iter;
1146 struct tape_34xx_block_id * start_block;
1147
1148 DBF_EVENT(6, "xBREDid:");
1149
1150 /* Count the number of blocks for the request. */
1151 rq_for_each_segment(bv, req, iter)
1152 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
1153
1154 /* Allocate the ccw request. */
1155 request = tape_alloc_request(3+count+1, 8);
1156 if (IS_ERR(request))
1157 return request;
1158
1159 /* Setup ccws. */
1160 request->op = TO_BLOCK;
1161 start_block = (struct tape_34xx_block_id *) request->cpdata;
1162 start_block->block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
1163 DBF_EVENT(6, "start_block = %i\n", start_block->block);
1164
1165 ccw = request->cpaddr;
1166 ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte);
1167
1168 /*
1169 * We always setup a nop after the mode set ccw. This slot is
1170 * used in tape_std_check_locate to insert a locate ccw if the
1171 * current tape position doesn't match the start block to be read.
1172 * The second nop will be filled with a read block id which is in
1173 * turn used by tape_34xx_free_bread to populate the segment bid
1174 * table.
1175 */
1176 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
1177 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
1178
1179 rq_for_each_segment(bv, req, iter) {
1180 dst = kmap(bv->bv_page) + bv->bv_offset;
1181 for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
1182 ccw->flags = CCW_FLAG_CC;
1183 ccw->cmd_code = READ_FORWARD;
1184 ccw->count = TAPEBLOCK_HSEC_SIZE;
1185 set_normalized_cda(ccw, (void*) __pa(dst));
1186 ccw++;
1187 dst += TAPEBLOCK_HSEC_SIZE;
1188 }
1189 }
1190
1191 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
1192 DBF_EVENT(6, "xBREDccwg\n");
1193 return request;
1194}
1195
1196static void
1197tape_34xx_free_bread (struct tape_request *request)
1198{
1199 struct ccw1* ccw;
1200
1201 ccw = request->cpaddr;
1202 if ((ccw + 2)->cmd_code == READ_BLOCK_ID) {
1203 struct {
1204 struct tape_34xx_block_id cbid;
1205 struct tape_34xx_block_id dbid;
1206 } __attribute__ ((packed)) *rbi_data;
1207
1208 rbi_data = request->cpdata;
1209
1210 if (request->device)
1211 tape_34xx_add_sbid(request->device, rbi_data->cbid);
1212 }
1213
1214 /* Last ccw is a nop and doesn't need clear_normalized_cda */
1215 for (; ccw->flags & CCW_FLAG_CC; ccw++)
1216 if (ccw->cmd_code == READ_FORWARD)
1217 clear_normalized_cda(ccw);
1218 tape_free_request(request);
1219}
1220
1221/*
1222 * check_locate is called just before the tape request is passed to
1223 * the common io layer for execution. It has to check the current
1224 * tape position and insert a locate ccw if it doesn't match the
1225 * start block for the request.
1226 */
1227static void
1228tape_34xx_check_locate(struct tape_device *device, struct tape_request *request)
1229{
1230 struct tape_34xx_block_id * start_block;
1231
1232 start_block = (struct tape_34xx_block_id *) request->cpdata;
1233 if (start_block->block == device->blk_data.block_position)
1234 return;
1235
1236 DBF_LH(4, "Block seek(%06d+%06d)\n", start_block->block, device->bof);
1237 start_block->wrap = 0;
1238 start_block->segment = 1;
1239 start_block->format = (*device->modeset_byte & 0x08) ?
1240 TAPE34XX_FMT_3480_XF :
1241 TAPE34XX_FMT_3480;
1242 start_block->block = start_block->block + device->bof;
1243 tape_34xx_merge_sbid(device, start_block);
1244 tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
1245 tape_ccw_cc(request->cpaddr + 2, READ_BLOCK_ID, 8, request->cpdata);
1246}
1247#endif
1248
1249/* 1118/*
1250 * List of 3480/3490 magnetic tape commands. 1119 * List of 3480/3490 magnetic tape commands.
1251 */ 1120 */
@@ -1295,11 +1164,6 @@ static struct tape_discipline tape_discipline_34xx = {
1295 .irq = tape_34xx_irq, 1164 .irq = tape_34xx_irq,
1296 .read_block = tape_std_read_block, 1165 .read_block = tape_std_read_block,
1297 .write_block = tape_std_write_block, 1166 .write_block = tape_std_write_block,
1298#ifdef CONFIG_S390_TAPE_BLOCK
1299 .bread = tape_34xx_bread,
1300 .free_bread = tape_34xx_free_bread,
1301 .check_locate = tape_34xx_check_locate,
1302#endif
1303 .ioctl_fn = tape_34xx_ioctl, 1167 .ioctl_fn = tape_34xx_ioctl,
1304 .mtop_array = tape_34xx_mtop 1168 .mtop_array = tape_34xx_mtop
1305}; 1169};
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 49c6aab7ad78..a5c6614b0db2 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -670,92 +670,6 @@ tape_3590_schedule_work(struct tape_device *device, enum tape_op op)
670 return 0; 670 return 0;
671} 671}
672 672
673#ifdef CONFIG_S390_TAPE_BLOCK
674/*
675 * Tape Block READ
676 */
677static struct tape_request *
678tape_3590_bread(struct tape_device *device, struct request *req)
679{
680 struct tape_request *request;
681 struct ccw1 *ccw;
682 int count = 0, start_block;
683 unsigned off;
684 char *dst;
685 struct bio_vec *bv;
686 struct req_iterator iter;
687
688 DBF_EVENT(6, "xBREDid:");
689 start_block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
690 DBF_EVENT(6, "start_block = %i\n", start_block);
691
692 rq_for_each_segment(bv, req, iter)
693 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
694
695 request = tape_alloc_request(2 + count + 1, 4);
696 if (IS_ERR(request))
697 return request;
698 request->op = TO_BLOCK;
699 *(__u32 *) request->cpdata = start_block;
700 ccw = request->cpaddr;
701 ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte);
702
703 /*
704 * We always setup a nop after the mode set ccw. This slot is
705 * used in tape_std_check_locate to insert a locate ccw if the
706 * current tape position doesn't match the start block to be read.
707 */
708 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
709
710 rq_for_each_segment(bv, req, iter) {
711 dst = page_address(bv->bv_page) + bv->bv_offset;
712 for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
713 ccw->flags = CCW_FLAG_CC;
714 ccw->cmd_code = READ_FORWARD;
715 ccw->count = TAPEBLOCK_HSEC_SIZE;
716 set_normalized_cda(ccw, (void *) __pa(dst));
717 ccw++;
718 dst += TAPEBLOCK_HSEC_SIZE;
719 }
720 BUG_ON(off > bv->bv_len);
721 }
722 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
723 DBF_EVENT(6, "xBREDccwg\n");
724 return request;
725}
726
727static void
728tape_3590_free_bread(struct tape_request *request)
729{
730 struct ccw1 *ccw;
731
732 /* Last ccw is a nop and doesn't need clear_normalized_cda */
733 for (ccw = request->cpaddr; ccw->flags & CCW_FLAG_CC; ccw++)
734 if (ccw->cmd_code == READ_FORWARD)
735 clear_normalized_cda(ccw);
736 tape_free_request(request);
737}
738
739/*
740 * check_locate is called just before the tape request is passed to
741 * the common io layer for execution. It has to check the current
742 * tape position and insert a locate ccw if it doesn't match the
743 * start block for the request.
744 */
745static void
746tape_3590_check_locate(struct tape_device *device, struct tape_request *request)
747{
748 __u32 *start_block;
749
750 start_block = (__u32 *) request->cpdata;
751 if (*start_block != device->blk_data.block_position) {
752 /* Add the start offset of the file to get the real block. */
753 *start_block += device->bof;
754 tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
755 }
756}
757#endif
758
759static void tape_3590_med_state_set(struct tape_device *device, 673static void tape_3590_med_state_set(struct tape_device *device,
760 struct tape_3590_med_sense *sense) 674 struct tape_3590_med_sense *sense)
761{ 675{
@@ -1423,20 +1337,6 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1423{ 1337{
1424 struct tape_3590_sense *sense; 1338 struct tape_3590_sense *sense;
1425 1339
1426#ifdef CONFIG_S390_TAPE_BLOCK
1427 if (request->op == TO_BLOCK) {
1428 /*
1429 * Recovery for block device requests. Set the block_position
1430 * to something invalid and retry.
1431 */
1432 device->blk_data.block_position = -1;
1433 if (request->retries-- <= 0)
1434 return tape_3590_erp_failed(device, request, irb, -EIO);
1435 else
1436 return tape_3590_erp_retry(device, request, irb);
1437 }
1438#endif
1439
1440 sense = (struct tape_3590_sense *) irb->ecw; 1340 sense = (struct tape_3590_sense *) irb->ecw;
1441 1341
1442 DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc); 1342 DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc);
@@ -1729,11 +1629,6 @@ static struct tape_discipline tape_discipline_3590 = {
1729 .irq = tape_3590_irq, 1629 .irq = tape_3590_irq,
1730 .read_block = tape_std_read_block, 1630 .read_block = tape_std_read_block,
1731 .write_block = tape_std_write_block, 1631 .write_block = tape_std_write_block,
1732#ifdef CONFIG_S390_TAPE_BLOCK
1733 .bread = tape_3590_bread,
1734 .free_bread = tape_3590_free_bread,
1735 .check_locate = tape_3590_check_locate,
1736#endif
1737 .ioctl_fn = tape_3590_ioctl, 1632 .ioctl_fn = tape_3590_ioctl,
1738 .mtop_array = tape_3590_mtop 1633 .mtop_array = tape_3590_mtop
1739}; 1634};
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 87cd0ab242de..46886a7578c6 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -161,11 +161,6 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
161 if (rc) 161 if (rc)
162 return rc; 162 return rc;
163 163
164#ifdef CONFIG_S390_TAPE_BLOCK
165 /* Changes position. */
166 device->blk_data.medium_changed = 1;
167#endif
168
169 DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size); 164 DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size);
170 /* Let the discipline build the ccw chain. */ 165 /* Let the discipline build the ccw chain. */
171 request = device->discipline->read_block(device, block_size); 166 request = device->discipline->read_block(device, block_size);
@@ -218,11 +213,6 @@ tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t
218 if (rc) 213 if (rc)
219 return rc; 214 return rc;
220 215
221#ifdef CONFIG_S390_TAPE_BLOCK
222 /* Changes position. */
223 device->blk_data.medium_changed = 1;
224#endif
225
226 DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size); 216 DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size);
227 DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks); 217 DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks);
228 /* Let the discipline build the ccw chain. */ 218 /* Let the discipline build the ccw chain. */
@@ -379,9 +369,6 @@ __tapechar_ioctl(struct tape_device *device,
379 case MTBSFM: 369 case MTBSFM:
380 case MTFSFM: 370 case MTFSFM:
381 case MTSEEK: 371 case MTSEEK:
382#ifdef CONFIG_S390_TAPE_BLOCK
383 device->blk_data.medium_changed = 1;
384#endif
385 if (device->required_tapemarks) 372 if (device->required_tapemarks)
386 tape_std_terminate_write(device); 373 tape_std_terminate_write(device);
387 default: 374 default:
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index b3a3e8e8656e..585618663ba4 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -401,9 +401,6 @@ tape_generic_online(struct tape_device *device,
401 rc = tapechar_setup_device(device); 401 rc = tapechar_setup_device(device);
402 if (rc) 402 if (rc)
403 goto out_minor; 403 goto out_minor;
404 rc = tapeblock_setup_device(device);
405 if (rc)
406 goto out_char;
407 404
408 tape_state_set(device, TS_UNUSED); 405 tape_state_set(device, TS_UNUSED);
409 406
@@ -411,8 +408,6 @@ tape_generic_online(struct tape_device *device,
411 408
412 return 0; 409 return 0;
413 410
414out_char:
415 tapechar_cleanup_device(device);
416out_minor: 411out_minor:
417 tape_remove_minor(device); 412 tape_remove_minor(device);
418out_discipline: 413out_discipline:
@@ -426,7 +421,6 @@ out:
426static void 421static void
427tape_cleanup_device(struct tape_device *device) 422tape_cleanup_device(struct tape_device *device)
428{ 423{
429 tapeblock_cleanup_device(device);
430 tapechar_cleanup_device(device); 424 tapechar_cleanup_device(device);
431 device->discipline->cleanup_device(device); 425 device->discipline->cleanup_device(device);
432 module_put(device->discipline->owner); 426 module_put(device->discipline->owner);
@@ -785,10 +779,6 @@ __tape_start_io(struct tape_device *device, struct tape_request *request)
785{ 779{
786 int rc; 780 int rc;
787 781
788#ifdef CONFIG_S390_TAPE_BLOCK
789 if (request->op == TO_BLOCK)
790 device->discipline->check_locate(device, request);
791#endif
792 rc = ccw_device_start( 782 rc = ccw_device_start(
793 device->cdev, 783 device->cdev,
794 request->cpaddr, 784 request->cpaddr,
@@ -1253,7 +1243,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1253} 1243}
1254 1244
1255/* 1245/*
1256 * Tape device open function used by tape_char & tape_block frontends. 1246 * Tape device open function used by tape_char frontend.
1257 */ 1247 */
1258int 1248int
1259tape_open(struct tape_device *device) 1249tape_open(struct tape_device *device)
@@ -1283,7 +1273,7 @@ tape_open(struct tape_device *device)
1283} 1273}
1284 1274
1285/* 1275/*
1286 * Tape device release function used by tape_char & tape_block frontends. 1276 * Tape device release function used by tape_char frontend.
1287 */ 1277 */
1288int 1278int
1289tape_release(struct tape_device *device) 1279tape_release(struct tape_device *device)
@@ -1344,7 +1334,6 @@ tape_init (void)
1344 DBF_EVENT(3, "tape init\n"); 1334 DBF_EVENT(3, "tape init\n");
1345 tape_proc_init(); 1335 tape_proc_init();
1346 tapechar_init (); 1336 tapechar_init ();
1347 tapeblock_init ();
1348 return 0; 1337 return 0;
1349} 1338}
1350 1339
@@ -1358,7 +1347,6 @@ tape_exit(void)
1358 1347
1359 /* Get rid of the frontends */ 1348 /* Get rid of the frontends */
1360 tapechar_exit(); 1349 tapechar_exit();
1361 tapeblock_exit();
1362 tape_proc_cleanup(); 1350 tape_proc_cleanup();
1363 debug_unregister (TAPE_DBF_AREA); 1351 debug_unregister (TAPE_DBF_AREA);
1364} 1352}
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 5f1dc6fb5708..731470e68493 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * bus driver for ccwgroup 2 * bus driver for ccwgroup
3 * 3 *
4 * Copyright IBM Corp. 2002, 2009 4 * Copyright IBM Corp. 2002, 2012
5 * 5 *
6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
@@ -15,10 +15,13 @@
15#include <linux/ctype.h> 15#include <linux/ctype.h>
16#include <linux/dcache.h> 16#include <linux/dcache.h>
17 17
18#include <asm/cio.h>
18#include <asm/ccwdev.h> 19#include <asm/ccwdev.h>
19#include <asm/ccwgroup.h> 20#include <asm/ccwgroup.h>
20 21
21#define CCW_BUS_ID_SIZE 20 22#include "device.h"
23
24#define CCW_BUS_ID_SIZE 10
22 25
23/* In Linux 2.4, we had a channel device layer called "chandev" 26/* In Linux 2.4, we had a channel device layer called "chandev"
24 * that did all sorts of obscure stuff for networking devices. 27 * that did all sorts of obscure stuff for networking devices.
@@ -27,19 +30,6 @@
27 * to devices that use multiple subchannels. 30 * to devices that use multiple subchannels.
28 */ 31 */
29 32
30/* a device matches a driver if all its slave devices match the same
31 * entry of the driver */
32static int ccwgroup_bus_match(struct device *dev, struct device_driver * drv)
33{
34 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
35 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(drv);
36
37 if (gdev->creator_id == gdrv->driver_id)
38 return 1;
39
40 return 0;
41}
42
43static struct bus_type ccwgroup_bus_type; 33static struct bus_type ccwgroup_bus_type;
44 34
45static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) 35static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
@@ -254,9 +244,10 @@ static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
254 return 0; 244 return 0;
255} 245}
256 246
257static int __get_next_bus_id(const char **buf, char *bus_id) 247static int __get_next_id(const char **buf, struct ccw_dev_id *id)
258{ 248{
259 int rc, len; 249 unsigned int cssid, ssid, devno;
250 int ret = 0, len;
260 char *start, *end; 251 char *start, *end;
261 252
262 start = (char *)*buf; 253 start = (char *)*buf;
@@ -271,49 +262,40 @@ static int __get_next_bus_id(const char **buf, char *bus_id)
271 len = end - start + 1; 262 len = end - start + 1;
272 end++; 263 end++;
273 } 264 }
274 if (len < CCW_BUS_ID_SIZE) { 265 if (len <= CCW_BUS_ID_SIZE) {
275 strlcpy(bus_id, start, len); 266 if (sscanf(start, "%2x.%1x.%04x", &cssid, &ssid, &devno) != 3)
276 rc = 0; 267 ret = -EINVAL;
277 } else 268 } else
278 rc = -EINVAL; 269 ret = -EINVAL;
279 *buf = end;
280 return rc;
281}
282
283static int __is_valid_bus_id(char bus_id[CCW_BUS_ID_SIZE])
284{
285 int cssid, ssid, devno;
286 270
287 /* Must be of form %x.%x.%04x */ 271 if (!ret) {
288 if (sscanf(bus_id, "%x.%1x.%04x", &cssid, &ssid, &devno) != 3) 272 id->ssid = ssid;
289 return 0; 273 id->devno = devno;
290 return 1; 274 }
275 *buf = end;
276 return ret;
291} 277}
292 278
293/** 279/**
294 * ccwgroup_create_from_string() - create and register a ccw group device 280 * ccwgroup_create_dev() - create and register a ccw group device
295 * @root: parent device for the new device 281 * @parent: parent device for the new device
296 * @creator_id: identifier of creating driver 282 * @gdrv: driver for the new group device
297 * @cdrv: ccw driver of slave devices
298 * @num_devices: number of slave devices 283 * @num_devices: number of slave devices
299 * @buf: buffer containing comma separated bus ids of slave devices 284 * @buf: buffer containing comma separated bus ids of slave devices
300 * 285 *
301 * Create and register a new ccw group device as a child of @root. Slave 286 * Create and register a new ccw group device as a child of @parent. Slave
302 * devices are obtained from the list of bus ids given in @buf and must all 287 * devices are obtained from the list of bus ids given in @buf.
303 * belong to @cdrv.
304 * Returns: 288 * Returns:
305 * %0 on success and an error code on failure. 289 * %0 on success and an error code on failure.
306 * Context: 290 * Context:
307 * non-atomic 291 * non-atomic
308 */ 292 */
309int ccwgroup_create_from_string(struct device *root, unsigned int creator_id, 293int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
310 struct ccw_driver *cdrv, int num_devices, 294 int num_devices, const char *buf)
311 const char *buf)
312{ 295{
313 struct ccwgroup_device *gdev; 296 struct ccwgroup_device *gdev;
297 struct ccw_dev_id dev_id;
314 int rc, i; 298 int rc, i;
315 char tmp_bus_id[CCW_BUS_ID_SIZE];
316 const char *curr_buf;
317 299
318 gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]), 300 gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
319 GFP_KERNEL); 301 GFP_KERNEL);
@@ -323,29 +305,24 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
323 atomic_set(&gdev->onoff, 0); 305 atomic_set(&gdev->onoff, 0);
324 mutex_init(&gdev->reg_mutex); 306 mutex_init(&gdev->reg_mutex);
325 mutex_lock(&gdev->reg_mutex); 307 mutex_lock(&gdev->reg_mutex);
326 gdev->creator_id = creator_id;
327 gdev->count = num_devices; 308 gdev->count = num_devices;
328 gdev->dev.bus = &ccwgroup_bus_type; 309 gdev->dev.bus = &ccwgroup_bus_type;
329 gdev->dev.parent = root; 310 gdev->dev.parent = parent;
330 gdev->dev.release = ccwgroup_release; 311 gdev->dev.release = ccwgroup_release;
331 device_initialize(&gdev->dev); 312 device_initialize(&gdev->dev);
332 313
333 curr_buf = buf; 314 for (i = 0; i < num_devices && buf; i++) {
334 for (i = 0; i < num_devices && curr_buf; i++) { 315 rc = __get_next_id(&buf, &dev_id);
335 rc = __get_next_bus_id(&curr_buf, tmp_bus_id);
336 if (rc != 0) 316 if (rc != 0)
337 goto error; 317 goto error;
338 if (!__is_valid_bus_id(tmp_bus_id)) { 318 gdev->cdev[i] = get_ccwdev_by_dev_id(&dev_id);
339 rc = -EINVAL;
340 goto error;
341 }
342 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, tmp_bus_id);
343 /* 319 /*
344 * All devices have to be of the same type in 320 * All devices have to be of the same type in
345 * order to be grouped. 321 * order to be grouped.
346 */ 322 */
347 if (!gdev->cdev[i] 323 if (!gdev->cdev[i] || !gdev->cdev[i]->drv ||
348 || gdev->cdev[i]->id.driver_info != 324 gdev->cdev[i]->drv != gdev->cdev[0]->drv ||
325 gdev->cdev[i]->id.driver_info !=
349 gdev->cdev[0]->id.driver_info) { 326 gdev->cdev[0]->id.driver_info) {
350 rc = -EINVAL; 327 rc = -EINVAL;
351 goto error; 328 goto error;
@@ -361,18 +338,25 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
361 spin_unlock_irq(gdev->cdev[i]->ccwlock); 338 spin_unlock_irq(gdev->cdev[i]->ccwlock);
362 } 339 }
363 /* Check for sufficient number of bus ids. */ 340 /* Check for sufficient number of bus ids. */
364 if (i < num_devices && !curr_buf) { 341 if (i < num_devices) {
365 rc = -EINVAL; 342 rc = -EINVAL;
366 goto error; 343 goto error;
367 } 344 }
368 /* Check for trailing stuff. */ 345 /* Check for trailing stuff. */
369 if (i == num_devices && strlen(curr_buf) > 0) { 346 if (i == num_devices && strlen(buf) > 0) {
370 rc = -EINVAL; 347 rc = -EINVAL;
371 goto error; 348 goto error;
372 } 349 }
373 350
374 dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev)); 351 dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
375 gdev->dev.groups = ccwgroup_attr_groups; 352 gdev->dev.groups = ccwgroup_attr_groups;
353
354 if (gdrv) {
355 gdev->dev.driver = &gdrv->driver;
356 rc = gdrv->setup ? gdrv->setup(gdev) : 0;
357 if (rc)
358 goto error;
359 }
376 rc = device_add(&gdev->dev); 360 rc = device_add(&gdev->dev);
377 if (rc) 361 if (rc)
378 goto error; 362 goto error;
@@ -397,7 +381,7 @@ error:
397 put_device(&gdev->dev); 381 put_device(&gdev->dev);
398 return rc; 382 return rc;
399} 383}
400EXPORT_SYMBOL(ccwgroup_create_from_string); 384EXPORT_SYMBOL(ccwgroup_create_dev);
401 385
402static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action, 386static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
403 void *data) 387 void *data)
@@ -440,14 +424,6 @@ module_exit(cleanup_ccwgroup);
440 424
441/************************** driver stuff ******************************/ 425/************************** driver stuff ******************************/
442 426
443static int ccwgroup_probe(struct device *dev)
444{
445 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
446 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
447
448 return gdrv->probe ? gdrv->probe(gdev) : -ENODEV;
449}
450
451static int ccwgroup_remove(struct device *dev) 427static int ccwgroup_remove(struct device *dev)
452{ 428{
453 struct ccwgroup_device *gdev = to_ccwgroupdev(dev); 429 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
@@ -542,8 +518,6 @@ static const struct dev_pm_ops ccwgroup_pm_ops = {
542 518
543static struct bus_type ccwgroup_bus_type = { 519static struct bus_type ccwgroup_bus_type = {
544 .name = "ccwgroup", 520 .name = "ccwgroup",
545 .match = ccwgroup_bus_match,
546 .probe = ccwgroup_probe,
547 .remove = ccwgroup_remove, 521 .remove = ccwgroup_remove,
548 .shutdown = ccwgroup_shutdown, 522 .shutdown = ccwgroup_shutdown,
549 .pm = &ccwgroup_pm_ops, 523 .pm = &ccwgroup_pm_ops,
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index a49c46c91983..a6ddaed8793d 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -656,51 +656,34 @@ static struct io_subchannel_private console_priv;
656static int console_subchannel_in_use; 656static int console_subchannel_in_use;
657 657
658/* 658/*
659 * Use cio_tpi to get a pending interrupt and call the interrupt handler. 659 * Use cio_tsch to update the subchannel status and call the interrupt handler
660 * Return non-zero if an interrupt was processed, zero otherwise. 660 * if status had been pending. Called with the console_subchannel lock.
661 */ 661 */
662static int cio_tpi(void) 662static void cio_tsch(struct subchannel *sch)
663{ 663{
664 struct tpi_info *tpi_info;
665 struct subchannel *sch;
666 struct irb *irb; 664 struct irb *irb;
667 int irq_context; 665 int irq_context;
668 666
669 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
670 if (tpi(NULL) != 1)
671 return 0;
672 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
673 if (tpi_info->adapter_IO) {
674 do_adapter_IO(tpi_info->isc);
675 return 1;
676 }
677 irb = (struct irb *)&S390_lowcore.irb; 667 irb = (struct irb *)&S390_lowcore.irb;
678 /* Store interrupt response block to lowcore. */ 668 /* Store interrupt response block to lowcore. */
679 if (tsch(tpi_info->schid, irb) != 0) { 669 if (tsch(sch->schid, irb) != 0)
680 /* Not status pending or not operational. */ 670 /* Not status pending or not operational. */
681 kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; 671 return;
682 return 1; 672 memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
683 } 673 /* Call interrupt handler with updated status. */
684 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
685 if (!sch) {
686 kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
687 return 1;
688 }
689 irq_context = in_interrupt(); 674 irq_context = in_interrupt();
690 if (!irq_context) 675 if (!irq_context) {
691 local_bh_disable(); 676 local_bh_disable();
692 irq_enter(); 677 irq_enter();
693 spin_lock(sch->lock); 678 }
694 memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
695 if (sch->driver && sch->driver->irq) 679 if (sch->driver && sch->driver->irq)
696 sch->driver->irq(sch); 680 sch->driver->irq(sch);
697 else 681 else
698 kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; 682 kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
699 spin_unlock(sch->lock); 683 if (!irq_context) {
700 irq_exit(); 684 irq_exit();
701 if (!irq_context)
702 _local_bh_enable(); 685 _local_bh_enable();
703 return 1; 686 }
704} 687}
705 688
706void *cio_get_console_priv(void) 689void *cio_get_console_priv(void)
@@ -712,34 +695,16 @@ void *cio_get_console_priv(void)
712 * busy wait for the next interrupt on the console 695 * busy wait for the next interrupt on the console
713 */ 696 */
714void wait_cons_dev(void) 697void wait_cons_dev(void)
715 __releases(console_subchannel.lock)
716 __acquires(console_subchannel.lock)
717{ 698{
718 unsigned long cr6 __attribute__ ((aligned (8)));
719 unsigned long save_cr6 __attribute__ ((aligned (8)));
720
721 /*
722 * before entering the spinlock we may already have
723 * processed the interrupt on a different CPU...
724 */
725 if (!console_subchannel_in_use) 699 if (!console_subchannel_in_use)
726 return; 700 return;
727 701
728 /* disable all but the console isc */ 702 while (1) {
729 __ctl_store (save_cr6, 6, 6); 703 cio_tsch(&console_subchannel);
730 cr6 = 1UL << (31 - CONSOLE_ISC); 704 if (console_subchannel.schib.scsw.cmd.actl == 0)
731 __ctl_load (cr6, 6, 6); 705 break;
732 706 udelay_simple(100);
733 do { 707 }
734 spin_unlock(console_subchannel.lock);
735 if (!cio_tpi())
736 cpu_relax();
737 spin_lock(console_subchannel.lock);
738 } while (console_subchannel.schib.scsw.cmd.actl != 0);
739 /*
740 * restore previous isc value
741 */
742 __ctl_load (save_cr6, 6, 6);
743} 708}
744 709
745static int 710static int
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 02d015259461..f8f952d52045 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -695,7 +695,17 @@ static int match_dev_id(struct device *dev, void *data)
695 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id); 695 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
696} 696}
697 697
698static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id) 698/**
699 * get_ccwdev_by_dev_id() - obtain device from a ccw device id
700 * @dev_id: id of the device to be searched
701 *
702 * This function searches all devices attached to the ccw bus for a device
703 * matching @dev_id.
704 * Returns:
705 * If a device is found its reference count is increased and returned;
706 * else %NULL is returned.
707 */
708struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
699{ 709{
700 struct device *dev; 710 struct device *dev;
701 711
@@ -703,6 +713,7 @@ static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
703 713
704 return dev ? to_ccwdev(dev) : NULL; 714 return dev ? to_ccwdev(dev) : NULL;
705} 715}
716EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
706 717
707static void ccw_device_do_unbind_bind(struct ccw_device *cdev) 718static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
708{ 719{
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 179824b3082f..6bace6942396 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -101,6 +101,7 @@ int ccw_device_test_sense_data(struct ccw_device *);
101void ccw_device_schedule_sch_unregister(struct ccw_device *); 101void ccw_device_schedule_sch_unregister(struct ccw_device *);
102int ccw_purge_blacklisted(void); 102int ccw_purge_blacklisted(void);
103void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo); 103void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
104struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id);
104 105
105/* Function prototypes for device status and basic sense stuff. */ 106/* Function prototypes for device status and basic sense stuff. */
106void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); 107void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 35c685c374e9..7493efafa0d5 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -63,7 +63,7 @@ static inline int do_siga_input(unsigned long schid, unsigned int mask,
63 " ipm %0\n" 63 " ipm %0\n"
64 " srl %0,28\n" 64 " srl %0,28\n"
65 : "=d" (cc) 65 : "=d" (cc)
66 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory"); 66 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
67 return cc; 67 return cc;
68} 68}
69 69
@@ -74,7 +74,7 @@ static inline int do_siga_input(unsigned long schid, unsigned int mask,
74 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer 74 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
75 * @fc: function code to perform 75 * @fc: function code to perform
76 * 76 *
77 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION. 77 * Returns condition code.
78 * Note: For IQDC unicast queues only the highest priority queue is processed. 78 * Note: For IQDC unicast queues only the highest priority queue is processed.
79 */ 79 */
80static inline int do_siga_output(unsigned long schid, unsigned long mask, 80static inline int do_siga_output(unsigned long schid, unsigned long mask,
@@ -85,18 +85,16 @@ static inline int do_siga_output(unsigned long schid, unsigned long mask,
85 register unsigned long __schid asm("1") = schid; 85 register unsigned long __schid asm("1") = schid;
86 register unsigned long __mask asm("2") = mask; 86 register unsigned long __mask asm("2") = mask;
87 register unsigned long __aob asm("3") = aob; 87 register unsigned long __aob asm("3") = aob;
88 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION; 88 int cc;
89 89
90 asm volatile( 90 asm volatile(
91 " siga 0\n" 91 " siga 0\n"
92 "0: ipm %0\n" 92 " ipm %0\n"
93 " srl %0,28\n" 93 " srl %0,28\n"
94 "1:\n" 94 : "=d" (cc), "+d" (__fc), "+d" (__aob)
95 EX_TABLE(0b, 1b) 95 : "d" (__schid), "d" (__mask)
96 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask), 96 : "cc");
97 "+d" (__aob) 97 *bb = __fc >> 31;
98 : : "cc", "memory");
99 *bb = ((unsigned int) __fc) >> 31;
100 return cc; 98 return cc;
101} 99}
102 100
@@ -167,7 +165,7 @@ again:
167 165
168 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); 166 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
169 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); 167 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
170 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 168 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE,
171 q->nr, q->first_to_kick, count, q->irq_ptr->int_parm); 169 q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
172 return 0; 170 return 0;
173} 171}
@@ -215,7 +213,7 @@ again:
215 213
216 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); 214 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
217 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); 215 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
218 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 216 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE,
219 q->nr, q->first_to_kick, count, q->irq_ptr->int_parm); 217 q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
220 return 0; 218 return 0;
221} 219}
@@ -313,7 +311,7 @@ static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
313 cc = do_siga_sync(schid, output, input, fc); 311 cc = do_siga_sync(schid, output, input, fc);
314 if (unlikely(cc)) 312 if (unlikely(cc))
315 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc); 313 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
316 return cc; 314 return (cc) ? -EIO : 0;
317} 315}
318 316
319static inline int qdio_siga_sync_q(struct qdio_q *q) 317static inline int qdio_siga_sync_q(struct qdio_q *q)
@@ -384,7 +382,7 @@ static inline int qdio_siga_input(struct qdio_q *q)
384 cc = do_siga_input(schid, q->mask, fc); 382 cc = do_siga_input(schid, q->mask, fc);
385 if (unlikely(cc)) 383 if (unlikely(cc))
386 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); 384 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
387 return cc; 385 return (cc) ? -EIO : 0;
388} 386}
389 387
390#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0) 388#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
@@ -443,7 +441,7 @@ static void process_buffer_error(struct qdio_q *q, int count)
443 unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT : 441 unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
444 SLSB_P_OUTPUT_NOT_INIT; 442 SLSB_P_OUTPUT_NOT_INIT;
445 443
446 q->qdio_error |= QDIO_ERROR_SLSB_STATE; 444 q->qdio_error = QDIO_ERROR_SLSB_STATE;
447 445
448 /* special handling for no target buffer empty */ 446 /* special handling for no target buffer empty */
449 if ((!q->is_input_q && 447 if ((!q->is_input_q &&
@@ -519,7 +517,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
519 int count, stop; 517 int count, stop;
520 unsigned char state = 0; 518 unsigned char state = 0;
521 519
522 q->timestamp = get_clock_fast(); 520 q->timestamp = get_clock();
523 521
524 /* 522 /*
525 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 523 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -575,7 +573,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
575 573
576 bufnr = get_inbound_buffer_frontier(q); 574 bufnr = get_inbound_buffer_frontier(q);
577 575
578 if ((bufnr != q->last_move) || q->qdio_error) { 576 if (bufnr != q->last_move) {
579 q->last_move = bufnr; 577 q->last_move = bufnr;
580 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) 578 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
581 q->u.in.timestamp = get_clock(); 579 q->u.in.timestamp = get_clock();
@@ -790,7 +788,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
790 int count, stop; 788 int count, stop;
791 unsigned char state = 0; 789 unsigned char state = 0;
792 790
793 q->timestamp = get_clock_fast(); 791 q->timestamp = get_clock();
794 792
795 if (need_siga_sync(q)) 793 if (need_siga_sync(q))
796 if (((queue_type(q) != QDIO_IQDIO_QFMT) && 794 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
@@ -863,7 +861,7 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q)
863 861
864 bufnr = get_outbound_buffer_frontier(q); 862 bufnr = get_outbound_buffer_frontier(q);
865 863
866 if ((bufnr != q->last_move) || q->qdio_error) { 864 if (bufnr != q->last_move) {
867 q->last_move = bufnr; 865 q->last_move = bufnr;
868 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); 866 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
869 return 1; 867 return 1;
@@ -894,13 +892,16 @@ retry:
894 goto retry; 892 goto retry;
895 } 893 }
896 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr); 894 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
897 cc |= QDIO_ERROR_SIGA_BUSY; 895 cc = -EBUSY;
898 } else 896 } else {
899 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr); 897 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
898 cc = -ENOBUFS;
899 }
900 break; 900 break;
901 case 1: 901 case 1:
902 case 3: 902 case 3:
903 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); 903 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
904 cc = -EIO;
904 break; 905 break;
905 } 906 }
906 if (retries) { 907 if (retries) {
@@ -1090,7 +1091,7 @@ static void qdio_handle_activate_check(struct ccw_device *cdev,
1090 } 1091 }
1091 1092
1092 count = sub_buf(q->first_to_check, q->first_to_kick); 1093 count = sub_buf(q->first_to_check, q->first_to_kick);
1093 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 1094 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
1094 q->nr, q->first_to_kick, count, irq_ptr->int_parm); 1095 q->nr, q->first_to_kick, count, irq_ptr->int_parm);
1095no_handler: 1096no_handler:
1096 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 1097 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
@@ -1691,7 +1692,7 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1691 "do%02x b:%02x c:%02x", callflags, bufnr, count); 1692 "do%02x b:%02x c:%02x", callflags, bufnr, count);
1692 1693
1693 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) 1694 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1694 return -EBUSY; 1695 return -EIO;
1695 if (!count) 1696 if (!count)
1696 return 0; 1697 return 0;
1697 if (callflags & QDIO_FLAG_SYNC_INPUT) 1698 if (callflags & QDIO_FLAG_SYNC_INPUT)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 7e9a72eb2fe0..b987d4619586 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -215,7 +215,7 @@ ap_queue_interruption_control(ap_qid_t qid, void *ind)
215 register struct ap_queue_status reg1_out asm ("1"); 215 register struct ap_queue_status reg1_out asm ("1");
216 register void *reg2 asm ("2") = ind; 216 register void *reg2 asm ("2") = ind;
217 asm volatile( 217 asm volatile(
218 ".long 0xb2af0000" /* PQAP(RAPQ) */ 218 ".long 0xb2af0000" /* PQAP(AQIC) */
219 : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) 219 : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
220 : 220 :
221 : "cc" ); 221 : "cc" );
@@ -232,7 +232,7 @@ __ap_query_functions(ap_qid_t qid, unsigned int *functions)
232 register unsigned long reg2 asm ("2"); 232 register unsigned long reg2 asm ("2");
233 233
234 asm volatile( 234 asm volatile(
235 ".long 0xb2af0000\n" 235 ".long 0xb2af0000\n" /* PQAP(TAPQ) */
236 "0:\n" 236 "0:\n"
237 EX_TABLE(0b, 0b) 237 EX_TABLE(0b, 0b)
238 : "+d" (reg0), "+d" (reg1), "=d" (reg2) 238 : "+d" (reg0), "+d" (reg1), "=d" (reg2)
@@ -391,7 +391,7 @@ __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
391 reg0 |= 0x400000UL; 391 reg0 |= 0x400000UL;
392 392
393 asm volatile ( 393 asm volatile (
394 "0: .long 0xb2ad0042\n" /* DQAP */ 394 "0: .long 0xb2ad0042\n" /* NQAP */
395 " brc 2,0b" 395 " brc 2,0b"
396 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3) 396 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
397 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg) 397 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
@@ -450,7 +450,7 @@ __ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
450 450
451 451
452 asm volatile( 452 asm volatile(
453 "0: .long 0xb2ae0064\n" 453 "0: .long 0xb2ae0064\n" /* DQAP */
454 " brc 6,0b\n" 454 " brc 6,0b\n"
455 : "+d" (reg0), "=d" (reg1), "+d" (reg2), 455 : "+d" (reg0), "=d" (reg1), "+d" (reg2),
456 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7), 456 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
@@ -836,12 +836,12 @@ static void __ap_flush_queue(struct ap_device *ap_dev)
836 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) { 836 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
837 list_del_init(&ap_msg->list); 837 list_del_init(&ap_msg->list);
838 ap_dev->pendingq_count--; 838 ap_dev->pendingq_count--;
839 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 839 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
840 } 840 }
841 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) { 841 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
842 list_del_init(&ap_msg->list); 842 list_del_init(&ap_msg->list);
843 ap_dev->requestq_count--; 843 ap_dev->requestq_count--;
844 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 844 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
845 } 845 }
846} 846}
847 847
@@ -1329,7 +1329,7 @@ static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
1329 continue; 1329 continue;
1330 list_del_init(&ap_msg->list); 1330 list_del_init(&ap_msg->list);
1331 ap_dev->pendingq_count--; 1331 ap_dev->pendingq_count--;
1332 ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply); 1332 ap_msg->receive(ap_dev, ap_msg, ap_dev->reply);
1333 break; 1333 break;
1334 } 1334 }
1335 if (ap_dev->queue_count > 0) 1335 if (ap_dev->queue_count > 0)
@@ -1450,10 +1450,10 @@ static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_ms
1450 return -EBUSY; 1450 return -EBUSY;
1451 case AP_RESPONSE_REQ_FAC_NOT_INST: 1451 case AP_RESPONSE_REQ_FAC_NOT_INST:
1452 case AP_RESPONSE_MESSAGE_TOO_BIG: 1452 case AP_RESPONSE_MESSAGE_TOO_BIG:
1453 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL)); 1453 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
1454 return -EINVAL; 1454 return -EINVAL;
1455 default: /* Device is gone. */ 1455 default: /* Device is gone. */
1456 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 1456 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1457 return -ENODEV; 1457 return -ENODEV;
1458 } 1458 }
1459 } else { 1459 } else {
@@ -1471,6 +1471,10 @@ void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1471 unsigned long flags; 1471 unsigned long flags;
1472 int rc; 1472 int rc;
1473 1473
1474 /* For asynchronous message handling a valid receive-callback
1475 * is required. */
1476 BUG_ON(!ap_msg->receive);
1477
1474 spin_lock_bh(&ap_dev->lock); 1478 spin_lock_bh(&ap_dev->lock);
1475 if (!ap_dev->unregistered) { 1479 if (!ap_dev->unregistered) {
1476 /* Make room on the queue by polling for finished requests. */ 1480 /* Make room on the queue by polling for finished requests. */
@@ -1482,7 +1486,7 @@ void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1482 if (rc == -ENODEV) 1486 if (rc == -ENODEV)
1483 ap_dev->unregistered = 1; 1487 ap_dev->unregistered = 1;
1484 } else { 1488 } else {
1485 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 1489 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1486 rc = -ENODEV; 1490 rc = -ENODEV;
1487 } 1491 }
1488 spin_unlock_bh(&ap_dev->lock); 1492 spin_unlock_bh(&ap_dev->lock);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index d960a6309eec..726fc65809d8 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -136,9 +136,6 @@ struct ap_driver {
136 136
137 int (*probe)(struct ap_device *); 137 int (*probe)(struct ap_device *);
138 void (*remove)(struct ap_device *); 138 void (*remove)(struct ap_device *);
139 /* receive is called from tasklet context */
140 void (*receive)(struct ap_device *, struct ap_message *,
141 struct ap_message *);
142 int request_timeout; /* request timeout in jiffies */ 139 int request_timeout; /* request timeout in jiffies */
143}; 140};
144 141
@@ -183,6 +180,9 @@ struct ap_message {
183 180
184 void *private; /* ap driver private pointer. */ 181 void *private; /* ap driver private pointer. */
185 unsigned int special:1; /* Used for special commands. */ 182 unsigned int special:1; /* Used for special commands. */
183 /* receive is called from tasklet context */
184 void (*receive)(struct ap_device *, struct ap_message *,
185 struct ap_message *);
186}; 186};
187 187
188#define AP_DEVICE(dt) \ 188#define AP_DEVICE(dt) \
@@ -199,6 +199,7 @@ static inline void ap_init_message(struct ap_message *ap_msg)
199 ap_msg->psmid = 0; 199 ap_msg->psmid = 0;
200 ap_msg->length = 0; 200 ap_msg->length = 0;
201 ap_msg->special = 0; 201 ap_msg->special = 0;
202 ap_msg->receive = NULL;
202} 203}
203 204
204/* 205/*
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 084286728166..46812440425a 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -77,7 +77,6 @@ static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *,
77static struct ap_driver zcrypt_cex2a_driver = { 77static struct ap_driver zcrypt_cex2a_driver = {
78 .probe = zcrypt_cex2a_probe, 78 .probe = zcrypt_cex2a_probe,
79 .remove = zcrypt_cex2a_remove, 79 .remove = zcrypt_cex2a_remove,
80 .receive = zcrypt_cex2a_receive,
81 .ids = zcrypt_cex2a_ids, 80 .ids = zcrypt_cex2a_ids,
82 .request_timeout = CEX2A_CLEANUP_TIME, 81 .request_timeout = CEX2A_CLEANUP_TIME,
83}; 82};
@@ -349,6 +348,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
349 ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL); 348 ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
350 if (!ap_msg.message) 349 if (!ap_msg.message)
351 return -ENOMEM; 350 return -ENOMEM;
351 ap_msg.receive = zcrypt_cex2a_receive;
352 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 352 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
353 atomic_inc_return(&zcrypt_step); 353 atomic_inc_return(&zcrypt_step);
354 ap_msg.private = &work; 354 ap_msg.private = &work;
@@ -390,6 +390,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
390 ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL); 390 ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
391 if (!ap_msg.message) 391 if (!ap_msg.message)
392 return -ENOMEM; 392 return -ENOMEM;
393 ap_msg.receive = zcrypt_cex2a_receive;
393 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 394 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
394 atomic_inc_return(&zcrypt_step); 395 atomic_inc_return(&zcrypt_step);
395 ap_msg.private = &work; 396 ap_msg.private = &work;
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 0effca925451..ad7951c21b79 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -67,7 +67,6 @@ static void zcrypt_pcica_receive(struct ap_device *, struct ap_message *,
67static struct ap_driver zcrypt_pcica_driver = { 67static struct ap_driver zcrypt_pcica_driver = {
68 .probe = zcrypt_pcica_probe, 68 .probe = zcrypt_pcica_probe,
69 .remove = zcrypt_pcica_remove, 69 .remove = zcrypt_pcica_remove,
70 .receive = zcrypt_pcica_receive,
71 .ids = zcrypt_pcica_ids, 70 .ids = zcrypt_pcica_ids,
72 .request_timeout = PCICA_CLEANUP_TIME, 71 .request_timeout = PCICA_CLEANUP_TIME,
73}; 72};
@@ -284,6 +283,7 @@ static long zcrypt_pcica_modexpo(struct zcrypt_device *zdev,
284 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); 283 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
285 if (!ap_msg.message) 284 if (!ap_msg.message)
286 return -ENOMEM; 285 return -ENOMEM;
286 ap_msg.receive = zcrypt_pcica_receive;
287 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 287 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
288 atomic_inc_return(&zcrypt_step); 288 atomic_inc_return(&zcrypt_step);
289 ap_msg.private = &work; 289 ap_msg.private = &work;
@@ -322,6 +322,7 @@ static long zcrypt_pcica_modexpo_crt(struct zcrypt_device *zdev,
322 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); 322 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
323 if (!ap_msg.message) 323 if (!ap_msg.message)
324 return -ENOMEM; 324 return -ENOMEM;
325 ap_msg.receive = zcrypt_pcica_receive;
325 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 326 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
326 atomic_inc_return(&zcrypt_step); 327 atomic_inc_return(&zcrypt_step);
327 ap_msg.private = &work; 328 ap_msg.private = &work;
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index f9523c0cc8d2..e5dd335fda53 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -79,7 +79,6 @@ static void zcrypt_pcicc_receive(struct ap_device *, struct ap_message *,
79static struct ap_driver zcrypt_pcicc_driver = { 79static struct ap_driver zcrypt_pcicc_driver = {
80 .probe = zcrypt_pcicc_probe, 80 .probe = zcrypt_pcicc_probe,
81 .remove = zcrypt_pcicc_remove, 81 .remove = zcrypt_pcicc_remove,
82 .receive = zcrypt_pcicc_receive,
83 .ids = zcrypt_pcicc_ids, 82 .ids = zcrypt_pcicc_ids,
84 .request_timeout = PCICC_CLEANUP_TIME, 83 .request_timeout = PCICC_CLEANUP_TIME,
85}; 84};
@@ -488,6 +487,7 @@ static long zcrypt_pcicc_modexpo(struct zcrypt_device *zdev,
488 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 487 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
489 if (!ap_msg.message) 488 if (!ap_msg.message)
490 return -ENOMEM; 489 return -ENOMEM;
490 ap_msg.receive = zcrypt_pcicc_receive;
491 ap_msg.length = PAGE_SIZE; 491 ap_msg.length = PAGE_SIZE;
492 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 492 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
493 atomic_inc_return(&zcrypt_step); 493 atomic_inc_return(&zcrypt_step);
@@ -527,6 +527,7 @@ static long zcrypt_pcicc_modexpo_crt(struct zcrypt_device *zdev,
527 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 527 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
528 if (!ap_msg.message) 528 if (!ap_msg.message)
529 return -ENOMEM; 529 return -ENOMEM;
530 ap_msg.receive = zcrypt_pcicc_receive;
530 ap_msg.length = PAGE_SIZE; 531 ap_msg.length = PAGE_SIZE;
531 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 532 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
532 atomic_inc_return(&zcrypt_step); 533 atomic_inc_return(&zcrypt_step);
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index cf1cbd4747f4..f7cc43401816 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -89,7 +89,6 @@ static void zcrypt_pcixcc_receive(struct ap_device *, struct ap_message *,
89static struct ap_driver zcrypt_pcixcc_driver = { 89static struct ap_driver zcrypt_pcixcc_driver = {
90 .probe = zcrypt_pcixcc_probe, 90 .probe = zcrypt_pcixcc_probe,
91 .remove = zcrypt_pcixcc_remove, 91 .remove = zcrypt_pcixcc_remove,
92 .receive = zcrypt_pcixcc_receive,
93 .ids = zcrypt_pcixcc_ids, 92 .ids = zcrypt_pcixcc_ids,
94 .request_timeout = PCIXCC_CLEANUP_TIME, 93 .request_timeout = PCIXCC_CLEANUP_TIME,
95}; 94};
@@ -698,6 +697,7 @@ static long zcrypt_pcixcc_modexpo(struct zcrypt_device *zdev,
698 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 697 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
699 if (!ap_msg.message) 698 if (!ap_msg.message)
700 return -ENOMEM; 699 return -ENOMEM;
700 ap_msg.receive = zcrypt_pcixcc_receive;
701 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 701 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
702 atomic_inc_return(&zcrypt_step); 702 atomic_inc_return(&zcrypt_step);
703 ap_msg.private = &resp_type; 703 ap_msg.private = &resp_type;
@@ -738,6 +738,7 @@ static long zcrypt_pcixcc_modexpo_crt(struct zcrypt_device *zdev,
738 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 738 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
739 if (!ap_msg.message) 739 if (!ap_msg.message)
740 return -ENOMEM; 740 return -ENOMEM;
741 ap_msg.receive = zcrypt_pcixcc_receive;
741 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 742 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
742 atomic_inc_return(&zcrypt_step); 743 atomic_inc_return(&zcrypt_step);
743 ap_msg.private = &resp_type; 744 ap_msg.private = &resp_type;
@@ -778,6 +779,7 @@ static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev,
778 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL); 779 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
779 if (!ap_msg.message) 780 if (!ap_msg.message)
780 return -ENOMEM; 781 return -ENOMEM;
782 ap_msg.receive = zcrypt_pcixcc_receive;
781 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 783 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
782 atomic_inc_return(&zcrypt_step); 784 atomic_inc_return(&zcrypt_step);
783 ap_msg.private = &resp_type; 785 ap_msg.private = &resp_type;
@@ -818,6 +820,7 @@ static long zcrypt_pcixcc_rng(struct zcrypt_device *zdev,
818 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL); 820 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
819 if (!ap_msg.message) 821 if (!ap_msg.message)
820 return -ENOMEM; 822 return -ENOMEM;
823 ap_msg.receive = zcrypt_pcixcc_receive;
821 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 824 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
822 atomic_inc_return(&zcrypt_step); 825 atomic_inc_return(&zcrypt_step);
823 ap_msg.private = &resp_type; 826 ap_msg.private = &resp_type;
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 9b66d2d1809b..dfda748c4000 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -4,11 +4,10 @@ menu "S/390 network device drivers"
4config LCS 4config LCS
5 def_tristate m 5 def_tristate m
6 prompt "Lan Channel Station Interface" 6 prompt "Lan Channel Station Interface"
7 depends on CCW && NETDEVICES && (ETHERNET || TR || FDDI) 7 depends on CCW && NETDEVICES && (ETHERNET || FDDI)
8 help 8 help
9 Select this option if you want to use LCS networking on IBM System z. 9 Select this option if you want to use LCS networking on IBM System z.
10 This device driver supports Token Ring (IEEE 802.5), 10 This device driver supports FDDI (IEEE 802.7) and Ethernet.
11 FDDI (IEEE 802.7) and Ethernet.
12 To compile as a module, choose M. The module name is lcs. 11 To compile as a module, choose M. The module name is lcs.
13 If you do not know what it is, it's safe to choose Y. 12 If you do not know what it is, it's safe to choose Y.
14 13
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index b41fae37d3af..6b1ff90d2f00 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -136,7 +136,6 @@ static inline void
136claw_set_busy(struct net_device *dev) 136claw_set_busy(struct net_device *dev)
137{ 137{
138 ((struct claw_privbk *)dev->ml_priv)->tbusy = 1; 138 ((struct claw_privbk *)dev->ml_priv)->tbusy = 1;
139 eieio();
140} 139}
141 140
142static inline void 141static inline void
@@ -144,13 +143,11 @@ claw_clear_busy(struct net_device *dev)
144{ 143{
145 clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy)); 144 clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy));
146 netif_wake_queue(dev); 145 netif_wake_queue(dev);
147 eieio();
148} 146}
149 147
150static inline int 148static inline int
151claw_check_busy(struct net_device *dev) 149claw_check_busy(struct net_device *dev)
152{ 150{
153 eieio();
154 return ((struct claw_privbk *) dev->ml_priv)->tbusy; 151 return ((struct claw_privbk *) dev->ml_priv)->tbusy;
155} 152}
156 153
@@ -233,8 +230,6 @@ static ssize_t claw_rbuff_show(struct device *dev,
233static ssize_t claw_rbuff_write(struct device *dev, 230static ssize_t claw_rbuff_write(struct device *dev,
234 struct device_attribute *attr, 231 struct device_attribute *attr,
235 const char *buf, size_t count); 232 const char *buf, size_t count);
236static int claw_add_files(struct device *dev);
237static void claw_remove_files(struct device *dev);
238 233
239/* Functions for System Validate */ 234/* Functions for System Validate */
240static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw); 235static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw);
@@ -267,12 +262,10 @@ static struct ccwgroup_driver claw_group_driver = {
267 .owner = THIS_MODULE, 262 .owner = THIS_MODULE,
268 .name = "claw", 263 .name = "claw",
269 }, 264 },
270 .max_slaves = 2, 265 .setup = claw_probe,
271 .driver_id = 0xC3D3C1E6, 266 .remove = claw_remove_device,
272 .probe = claw_probe, 267 .set_online = claw_new_device,
273 .remove = claw_remove_device, 268 .set_offline = claw_shutdown_device,
274 .set_online = claw_new_device,
275 .set_offline = claw_shutdown_device,
276 .prepare = claw_pm_prepare, 269 .prepare = claw_pm_prepare,
277}; 270};
278 271
@@ -293,30 +286,24 @@ static struct ccw_driver claw_ccw_driver = {
293 .int_class = IOINT_CLW, 286 .int_class = IOINT_CLW,
294}; 287};
295 288
296static ssize_t 289static ssize_t claw_driver_group_store(struct device_driver *ddrv,
297claw_driver_group_store(struct device_driver *ddrv, const char *buf, 290 const char *buf, size_t count)
298 size_t count)
299{ 291{
300 int err; 292 int err;
301 err = ccwgroup_create_from_string(claw_root_dev, 293 err = ccwgroup_create_dev(claw_root_dev, &claw_group_driver, 2, buf);
302 claw_group_driver.driver_id,
303 &claw_ccw_driver, 2, buf);
304 return err ? err : count; 294 return err ? err : count;
305} 295}
306
307static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store); 296static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
308 297
309static struct attribute *claw_group_attrs[] = { 298static struct attribute *claw_drv_attrs[] = {
310 &driver_attr_group.attr, 299 &driver_attr_group.attr,
311 NULL, 300 NULL,
312}; 301};
313 302static struct attribute_group claw_drv_attr_group = {
314static struct attribute_group claw_group_attr_group = { 303 .attrs = claw_drv_attrs,
315 .attrs = claw_group_attrs,
316}; 304};
317 305static const struct attribute_group *claw_drv_attr_groups[] = {
318static const struct attribute_group *claw_group_attr_groups[] = { 306 &claw_drv_attr_group,
319 &claw_group_attr_group,
320 NULL, 307 NULL,
321}; 308};
322 309
@@ -324,60 +311,6 @@ static const struct attribute_group *claw_group_attr_groups[] = {
324* Key functions 311* Key functions
325*/ 312*/
326 313
327/*----------------------------------------------------------------*
328 * claw_probe *
329 * this function is called for each CLAW device. *
330 *----------------------------------------------------------------*/
331static int
332claw_probe(struct ccwgroup_device *cgdev)
333{
334 int rc;
335 struct claw_privbk *privptr=NULL;
336
337 CLAW_DBF_TEXT(2, setup, "probe");
338 if (!get_device(&cgdev->dev))
339 return -ENODEV;
340 privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
341 dev_set_drvdata(&cgdev->dev, privptr);
342 if (privptr == NULL) {
343 probe_error(cgdev);
344 put_device(&cgdev->dev);
345 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
346 return -ENOMEM;
347 }
348 privptr->p_mtc_envelope= kzalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL);
349 privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
350 if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) {
351 probe_error(cgdev);
352 put_device(&cgdev->dev);
353 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
354 return -ENOMEM;
355 }
356 memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8);
357 memcpy(privptr->p_env->host_name,WS_NAME_NOT_DEF,8);
358 memcpy(privptr->p_env->api_type,WS_NAME_NOT_DEF,8);
359 privptr->p_env->packing = 0;
360 privptr->p_env->write_buffers = 5;
361 privptr->p_env->read_buffers = 5;
362 privptr->p_env->read_size = CLAW_FRAME_SIZE;
363 privptr->p_env->write_size = CLAW_FRAME_SIZE;
364 rc = claw_add_files(&cgdev->dev);
365 if (rc) {
366 probe_error(cgdev);
367 put_device(&cgdev->dev);
368 dev_err(&cgdev->dev, "Creating the /proc files for a new"
369 " CLAW device failed\n");
370 CLAW_DBF_TEXT_(2, setup, "probex%d", rc);
371 return rc;
372 }
373 privptr->p_env->p_priv = privptr;
374 cgdev->cdev[0]->handler = claw_irq_handler;
375 cgdev->cdev[1]->handler = claw_irq_handler;
376 CLAW_DBF_TEXT(2, setup, "prbext 0");
377
378 return 0;
379} /* end of claw_probe */
380
381/*-------------------------------------------------------------------* 314/*-------------------------------------------------------------------*
382 * claw_tx * 315 * claw_tx *
383 *-------------------------------------------------------------------*/ 316 *-------------------------------------------------------------------*/
@@ -3093,7 +3026,6 @@ claw_remove_device(struct ccwgroup_device *cgdev)
3093 dev_info(&cgdev->dev, " will be removed.\n"); 3026 dev_info(&cgdev->dev, " will be removed.\n");
3094 if (cgdev->state == CCWGROUP_ONLINE) 3027 if (cgdev->state == CCWGROUP_ONLINE)
3095 claw_shutdown_device(cgdev); 3028 claw_shutdown_device(cgdev);
3096 claw_remove_files(&cgdev->dev);
3097 kfree(priv->p_mtc_envelope); 3029 kfree(priv->p_mtc_envelope);
3098 priv->p_mtc_envelope=NULL; 3030 priv->p_mtc_envelope=NULL;
3099 kfree(priv->p_env); 3031 kfree(priv->p_env);
@@ -3321,7 +3253,6 @@ claw_rbuff_write(struct device *dev, struct device_attribute *attr,
3321 CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers); 3253 CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers);
3322 return count; 3254 return count;
3323} 3255}
3324
3325static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write); 3256static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write);
3326 3257
3327static struct attribute *claw_attr[] = { 3258static struct attribute *claw_attr[] = {
@@ -3332,40 +3263,73 @@ static struct attribute *claw_attr[] = {
3332 &dev_attr_host_name.attr, 3263 &dev_attr_host_name.attr,
3333 NULL, 3264 NULL,
3334}; 3265};
3335
3336static struct attribute_group claw_attr_group = { 3266static struct attribute_group claw_attr_group = {
3337 .attrs = claw_attr, 3267 .attrs = claw_attr,
3338}; 3268};
3269static const struct attribute_group *claw_attr_groups[] = {
3270 &claw_attr_group,
3271 NULL,
3272};
3273static const struct device_type claw_devtype = {
3274 .name = "claw",
3275 .groups = claw_attr_groups,
3276};
3339 3277
3340static int 3278/*----------------------------------------------------------------*
3341claw_add_files(struct device *dev) 3279 * claw_probe *
3280 * this function is called for each CLAW device. *
3281 *----------------------------------------------------------------*/
3282static int claw_probe(struct ccwgroup_device *cgdev)
3342{ 3283{
3343 CLAW_DBF_TEXT(2, setup, "add_file"); 3284 struct claw_privbk *privptr = NULL;
3344 return sysfs_create_group(&dev->kobj, &claw_attr_group);
3345}
3346 3285
3347static void 3286 CLAW_DBF_TEXT(2, setup, "probe");
3348claw_remove_files(struct device *dev) 3287 if (!get_device(&cgdev->dev))
3349{ 3288 return -ENODEV;
3350 CLAW_DBF_TEXT(2, setup, "rem_file"); 3289 privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
3351 sysfs_remove_group(&dev->kobj, &claw_attr_group); 3290 dev_set_drvdata(&cgdev->dev, privptr);
3352} 3291 if (privptr == NULL) {
3292 probe_error(cgdev);
3293 put_device(&cgdev->dev);
3294 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
3295 return -ENOMEM;
3296 }
3297 privptr->p_mtc_envelope = kzalloc(MAX_ENVELOPE_SIZE, GFP_KERNEL);
3298 privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
3299 if ((privptr->p_mtc_envelope == NULL) || (privptr->p_env == NULL)) {
3300 probe_error(cgdev);
3301 put_device(&cgdev->dev);
3302 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
3303 return -ENOMEM;
3304 }
3305 memcpy(privptr->p_env->adapter_name, WS_NAME_NOT_DEF, 8);
3306 memcpy(privptr->p_env->host_name, WS_NAME_NOT_DEF, 8);
3307 memcpy(privptr->p_env->api_type, WS_NAME_NOT_DEF, 8);
3308 privptr->p_env->packing = 0;
3309 privptr->p_env->write_buffers = 5;
3310 privptr->p_env->read_buffers = 5;
3311 privptr->p_env->read_size = CLAW_FRAME_SIZE;
3312 privptr->p_env->write_size = CLAW_FRAME_SIZE;
3313 privptr->p_env->p_priv = privptr;
3314 cgdev->cdev[0]->handler = claw_irq_handler;
3315 cgdev->cdev[1]->handler = claw_irq_handler;
3316 cgdev->dev.type = &claw_devtype;
3317 CLAW_DBF_TEXT(2, setup, "prbext 0");
3318
3319 return 0;
3320} /* end of claw_probe */
3353 3321
3354/*--------------------------------------------------------------------* 3322/*--------------------------------------------------------------------*
3355* claw_init and cleanup * 3323* claw_init and cleanup *
3356*---------------------------------------------------------------------*/ 3324*---------------------------------------------------------------------*/
3357 3325
3358static void __exit 3326static void __exit claw_cleanup(void)
3359claw_cleanup(void)
3360{ 3327{
3361 driver_remove_file(&claw_group_driver.driver,
3362 &driver_attr_group);
3363 ccwgroup_driver_unregister(&claw_group_driver); 3328 ccwgroup_driver_unregister(&claw_group_driver);
3364 ccw_driver_unregister(&claw_ccw_driver); 3329 ccw_driver_unregister(&claw_ccw_driver);
3365 root_device_unregister(claw_root_dev); 3330 root_device_unregister(claw_root_dev);
3366 claw_unregister_debug_facility(); 3331 claw_unregister_debug_facility();
3367 pr_info("Driver unloaded\n"); 3332 pr_info("Driver unloaded\n");
3368
3369} 3333}
3370 3334
3371/** 3335/**
@@ -3374,8 +3338,7 @@ claw_cleanup(void)
3374 * 3338 *
3375 * @return 0 on success, !0 on error. 3339 * @return 0 on success, !0 on error.
3376 */ 3340 */
3377static int __init 3341static int __init claw_init(void)
3378claw_init(void)
3379{ 3342{
3380 int ret = 0; 3343 int ret = 0;
3381 3344
@@ -3394,7 +3357,7 @@ claw_init(void)
3394 ret = ccw_driver_register(&claw_ccw_driver); 3357 ret = ccw_driver_register(&claw_ccw_driver);
3395 if (ret) 3358 if (ret)
3396 goto ccw_err; 3359 goto ccw_err;
3397 claw_group_driver.driver.groups = claw_group_attr_groups; 3360 claw_group_driver.driver.groups = claw_drv_attr_groups;
3398 ret = ccwgroup_driver_register(&claw_group_driver); 3361 ret = ccwgroup_driver_register(&claw_group_driver);
3399 if (ret) 3362 if (ret)
3400 goto ccwgroup_err; 3363 goto ccwgroup_err;
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 11f3b071f305..3cd25544a27a 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1296,6 +1296,11 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
1296 1296
1297} 1297}
1298 1298
1299static const struct device_type ctcm_devtype = {
1300 .name = "ctcm",
1301 .groups = ctcm_attr_groups,
1302};
1303
1299/** 1304/**
1300 * Add ctcm specific attributes. 1305 * Add ctcm specific attributes.
1301 * Add ctcm private data. 1306 * Add ctcm private data.
@@ -1307,7 +1312,6 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
1307static int ctcm_probe_device(struct ccwgroup_device *cgdev) 1312static int ctcm_probe_device(struct ccwgroup_device *cgdev)
1308{ 1313{
1309 struct ctcm_priv *priv; 1314 struct ctcm_priv *priv;
1310 int rc;
1311 1315
1312 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, 1316 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1313 "%s %p", 1317 "%s %p",
@@ -1324,17 +1328,11 @@ static int ctcm_probe_device(struct ccwgroup_device *cgdev)
1324 put_device(&cgdev->dev); 1328 put_device(&cgdev->dev);
1325 return -ENOMEM; 1329 return -ENOMEM;
1326 } 1330 }
1327
1328 rc = ctcm_add_files(&cgdev->dev);
1329 if (rc) {
1330 kfree(priv);
1331 put_device(&cgdev->dev);
1332 return rc;
1333 }
1334 priv->buffer_size = CTCM_BUFSIZE_DEFAULT; 1331 priv->buffer_size = CTCM_BUFSIZE_DEFAULT;
1335 cgdev->cdev[0]->handler = ctcm_irq_handler; 1332 cgdev->cdev[0]->handler = ctcm_irq_handler;
1336 cgdev->cdev[1]->handler = ctcm_irq_handler; 1333 cgdev->cdev[1]->handler = ctcm_irq_handler;
1337 dev_set_drvdata(&cgdev->dev, priv); 1334 dev_set_drvdata(&cgdev->dev, priv);
1335 cgdev->dev.type = &ctcm_devtype;
1338 1336
1339 return 0; 1337 return 0;
1340} 1338}
@@ -1611,11 +1609,6 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1611 goto out_dev; 1609 goto out_dev;
1612 } 1610 }
1613 1611
1614 if (ctcm_add_attributes(&cgdev->dev)) {
1615 result = -ENODEV;
1616 goto out_unregister;
1617 }
1618
1619 strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name)); 1612 strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
1620 1613
1621 dev_info(&dev->dev, 1614 dev_info(&dev->dev,
@@ -1629,8 +1622,6 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1629 priv->channel[CTCM_WRITE]->id, priv->protocol); 1622 priv->channel[CTCM_WRITE]->id, priv->protocol);
1630 1623
1631 return 0; 1624 return 0;
1632out_unregister:
1633 unregister_netdev(dev);
1634out_dev: 1625out_dev:
1635 ctcm_free_netdevice(dev); 1626 ctcm_free_netdevice(dev);
1636out_ccw2: 1627out_ccw2:
@@ -1669,7 +1660,6 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
1669 /* Close the device */ 1660 /* Close the device */
1670 ctcm_close(dev); 1661 ctcm_close(dev);
1671 dev->flags &= ~IFF_RUNNING; 1662 dev->flags &= ~IFF_RUNNING;
1672 ctcm_remove_attributes(&cgdev->dev);
1673 channel_free(priv->channel[CTCM_READ]); 1663 channel_free(priv->channel[CTCM_READ]);
1674 } else 1664 } else
1675 dev = NULL; 1665 dev = NULL;
@@ -1711,7 +1701,6 @@ static void ctcm_remove_device(struct ccwgroup_device *cgdev)
1711 1701
1712 if (cgdev->state == CCWGROUP_ONLINE) 1702 if (cgdev->state == CCWGROUP_ONLINE)
1713 ctcm_shutdown_device(cgdev); 1703 ctcm_shutdown_device(cgdev);
1714 ctcm_remove_files(&cgdev->dev);
1715 dev_set_drvdata(&cgdev->dev, NULL); 1704 dev_set_drvdata(&cgdev->dev, NULL);
1716 kfree(priv); 1705 kfree(priv);
1717 put_device(&cgdev->dev); 1706 put_device(&cgdev->dev);
@@ -1778,9 +1767,7 @@ static struct ccwgroup_driver ctcm_group_driver = {
1778 .owner = THIS_MODULE, 1767 .owner = THIS_MODULE,
1779 .name = CTC_DRIVER_NAME, 1768 .name = CTC_DRIVER_NAME,
1780 }, 1769 },
1781 .max_slaves = 2, 1770 .setup = ctcm_probe_device,
1782 .driver_id = 0xC3E3C3D4, /* CTCM */
1783 .probe = ctcm_probe_device,
1784 .remove = ctcm_remove_device, 1771 .remove = ctcm_remove_device,
1785 .set_online = ctcm_new_device, 1772 .set_online = ctcm_new_device,
1786 .set_offline = ctcm_shutdown_device, 1773 .set_offline = ctcm_shutdown_device,
@@ -1789,31 +1776,25 @@ static struct ccwgroup_driver ctcm_group_driver = {
1789 .restore = ctcm_pm_resume, 1776 .restore = ctcm_pm_resume,
1790}; 1777};
1791 1778
1792static ssize_t 1779static ssize_t ctcm_driver_group_store(struct device_driver *ddrv,
1793ctcm_driver_group_store(struct device_driver *ddrv, const char *buf, 1780 const char *buf, size_t count)
1794 size_t count)
1795{ 1781{
1796 int err; 1782 int err;
1797 1783
1798 err = ccwgroup_create_from_string(ctcm_root_dev, 1784 err = ccwgroup_create_dev(ctcm_root_dev, &ctcm_group_driver, 2, buf);
1799 ctcm_group_driver.driver_id,
1800 &ctcm_ccw_driver, 2, buf);
1801 return err ? err : count; 1785 return err ? err : count;
1802} 1786}
1803
1804static DRIVER_ATTR(group, 0200, NULL, ctcm_driver_group_store); 1787static DRIVER_ATTR(group, 0200, NULL, ctcm_driver_group_store);
1805 1788
1806static struct attribute *ctcm_group_attrs[] = { 1789static struct attribute *ctcm_drv_attrs[] = {
1807 &driver_attr_group.attr, 1790 &driver_attr_group.attr,
1808 NULL, 1791 NULL,
1809}; 1792};
1810 1793static struct attribute_group ctcm_drv_attr_group = {
1811static struct attribute_group ctcm_group_attr_group = { 1794 .attrs = ctcm_drv_attrs,
1812 .attrs = ctcm_group_attrs,
1813}; 1795};
1814 1796static const struct attribute_group *ctcm_drv_attr_groups[] = {
1815static const struct attribute_group *ctcm_group_attr_groups[] = { 1797 &ctcm_drv_attr_group,
1816 &ctcm_group_attr_group,
1817 NULL, 1798 NULL,
1818}; 1799};
1819 1800
@@ -1829,7 +1810,6 @@ static const struct attribute_group *ctcm_group_attr_groups[] = {
1829 */ 1810 */
1830static void __exit ctcm_exit(void) 1811static void __exit ctcm_exit(void)
1831{ 1812{
1832 driver_remove_file(&ctcm_group_driver.driver, &driver_attr_group);
1833 ccwgroup_driver_unregister(&ctcm_group_driver); 1813 ccwgroup_driver_unregister(&ctcm_group_driver);
1834 ccw_driver_unregister(&ctcm_ccw_driver); 1814 ccw_driver_unregister(&ctcm_ccw_driver);
1835 root_device_unregister(ctcm_root_dev); 1815 root_device_unregister(ctcm_root_dev);
@@ -1867,7 +1847,7 @@ static int __init ctcm_init(void)
1867 ret = ccw_driver_register(&ctcm_ccw_driver); 1847 ret = ccw_driver_register(&ctcm_ccw_driver);
1868 if (ret) 1848 if (ret)
1869 goto ccw_err; 1849 goto ccw_err;
1870 ctcm_group_driver.driver.groups = ctcm_group_attr_groups; 1850 ctcm_group_driver.driver.groups = ctcm_drv_attr_groups;
1871 ret = ccwgroup_driver_register(&ctcm_group_driver); 1851 ret = ccwgroup_driver_register(&ctcm_group_driver);
1872 if (ret) 1852 if (ret)
1873 goto ccwgroup_err; 1853 goto ccwgroup_err;
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index 24d5215eb0c4..b9056a55d995 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -225,13 +225,7 @@ struct ctcm_priv {
225int ctcm_open(struct net_device *dev); 225int ctcm_open(struct net_device *dev);
226int ctcm_close(struct net_device *dev); 226int ctcm_close(struct net_device *dev);
227 227
228/* 228extern const struct attribute_group *ctcm_attr_groups[];
229 * prototypes for non-static sysfs functions
230 */
231int ctcm_add_attributes(struct device *dev);
232void ctcm_remove_attributes(struct device *dev);
233int ctcm_add_files(struct device *dev);
234void ctcm_remove_files(struct device *dev);
235 229
236/* 230/*
237 * Compatibility macros for busy handling 231 * Compatibility macros for busy handling
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 650aec1839e9..0c27ae726475 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -13,6 +13,7 @@
13#define KMSG_COMPONENT "ctcm" 13#define KMSG_COMPONENT "ctcm"
14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 15
16#include <linux/device.h>
16#include <linux/sysfs.h> 17#include <linux/sysfs.h>
17#include <linux/slab.h> 18#include <linux/slab.h>
18#include "ctcm_main.h" 19#include "ctcm_main.h"
@@ -108,10 +109,12 @@ static void ctcm_print_statistics(struct ctcm_priv *priv)
108} 109}
109 110
110static ssize_t stats_show(struct device *dev, 111static ssize_t stats_show(struct device *dev,
111 struct device_attribute *attr, char *buf) 112 struct device_attribute *attr, char *buf)
112{ 113{
114 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
113 struct ctcm_priv *priv = dev_get_drvdata(dev); 115 struct ctcm_priv *priv = dev_get_drvdata(dev);
114 if (!priv) 116
117 if (!priv || gdev->state != CCWGROUP_ONLINE)
115 return -ENODEV; 118 return -ENODEV;
116 ctcm_print_statistics(priv); 119 ctcm_print_statistics(priv);
117 return sprintf(buf, "0\n"); 120 return sprintf(buf, "0\n");
@@ -190,34 +193,14 @@ static struct attribute *ctcm_attr[] = {
190 &dev_attr_protocol.attr, 193 &dev_attr_protocol.attr,
191 &dev_attr_type.attr, 194 &dev_attr_type.attr,
192 &dev_attr_buffer.attr, 195 &dev_attr_buffer.attr,
196 &dev_attr_stats.attr,
193 NULL, 197 NULL,
194}; 198};
195 199
196static struct attribute_group ctcm_attr_group = { 200static struct attribute_group ctcm_attr_group = {
197 .attrs = ctcm_attr, 201 .attrs = ctcm_attr,
198}; 202};
199 203const struct attribute_group *ctcm_attr_groups[] = {
200int ctcm_add_attributes(struct device *dev) 204 &ctcm_attr_group,
201{ 205 NULL,
202 int rc; 206};
203
204 rc = device_create_file(dev, &dev_attr_stats);
205
206 return rc;
207}
208
209void ctcm_remove_attributes(struct device *dev)
210{
211 device_remove_file(dev, &dev_attr_stats);
212}
213
214int ctcm_add_files(struct device *dev)
215{
216 return sysfs_create_group(&dev->kobj, &ctcm_attr_group);
217}
218
219void ctcm_remove_files(struct device *dev)
220{
221 sysfs_remove_group(&dev->kobj, &ctcm_attr_group);
222}
223
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 687efe4d589a..a3adf4b1c60d 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -30,7 +30,6 @@
30#include <linux/if.h> 30#include <linux/if.h>
31#include <linux/netdevice.h> 31#include <linux/netdevice.h>
32#include <linux/etherdevice.h> 32#include <linux/etherdevice.h>
33#include <linux/trdevice.h>
34#include <linux/fddidevice.h> 33#include <linux/fddidevice.h>
35#include <linux/inetdevice.h> 34#include <linux/inetdevice.h>
36#include <linux/in.h> 35#include <linux/in.h>
@@ -50,8 +49,7 @@
50#include "lcs.h" 49#include "lcs.h"
51 50
52 51
53#if !defined(CONFIG_ETHERNET) && \ 52#if !defined(CONFIG_ETHERNET) && !defined(CONFIG_FDDI)
54 !defined(CONFIG_TR) && !defined(CONFIG_FDDI)
55#error Cannot compile lcs.c without some net devices switched on. 53#error Cannot compile lcs.c without some net devices switched on.
56#endif 54#endif
57 55
@@ -1166,10 +1164,7 @@ static void
1166lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev) 1164lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev)
1167{ 1165{
1168 LCS_DBF_TEXT(4,trace, "getmac"); 1166 LCS_DBF_TEXT(4,trace, "getmac");
1169 if (dev->type == ARPHRD_IEEE802_TR) 1167 ip_eth_mc_map(ipm, mac);
1170 ip_tr_mc_map(ipm, mac);
1171 else
1172 ip_eth_mc_map(ipm, mac);
1173} 1168}
1174 1169
1175/** 1170/**
@@ -1641,12 +1636,6 @@ lcs_startlan_auto(struct lcs_card *card)
1641 return 0; 1636 return 0;
1642 1637
1643#endif 1638#endif
1644#ifdef CONFIG_TR
1645 card->lan_type = LCS_FRAME_TYPE_TR;
1646 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1647 if (rc == 0)
1648 return 0;
1649#endif
1650#ifdef CONFIG_FDDI 1639#ifdef CONFIG_FDDI
1651 card->lan_type = LCS_FRAME_TYPE_FDDI; 1640 card->lan_type = LCS_FRAME_TYPE_FDDI;
1652 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); 1641 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
@@ -2051,10 +2040,17 @@ static struct attribute * lcs_attrs[] = {
2051 &dev_attr_recover.attr, 2040 &dev_attr_recover.attr,
2052 NULL, 2041 NULL,
2053}; 2042};
2054
2055static struct attribute_group lcs_attr_group = { 2043static struct attribute_group lcs_attr_group = {
2056 .attrs = lcs_attrs, 2044 .attrs = lcs_attrs,
2057}; 2045};
2046static const struct attribute_group *lcs_attr_groups[] = {
2047 &lcs_attr_group,
2048 NULL,
2049};
2050static const struct device_type lcs_devtype = {
2051 .name = "lcs",
2052 .groups = lcs_attr_groups,
2053};
2058 2054
2059/** 2055/**
2060 * lcs_probe_device is called on establishing a new ccwgroup_device. 2056 * lcs_probe_device is called on establishing a new ccwgroup_device.
@@ -2063,7 +2059,6 @@ static int
2063lcs_probe_device(struct ccwgroup_device *ccwgdev) 2059lcs_probe_device(struct ccwgroup_device *ccwgdev)
2064{ 2060{
2065 struct lcs_card *card; 2061 struct lcs_card *card;
2066 int ret;
2067 2062
2068 if (!get_device(&ccwgdev->dev)) 2063 if (!get_device(&ccwgdev->dev))
2069 return -ENODEV; 2064 return -ENODEV;
@@ -2075,12 +2070,6 @@ lcs_probe_device(struct ccwgroup_device *ccwgdev)
2075 put_device(&ccwgdev->dev); 2070 put_device(&ccwgdev->dev);
2076 return -ENOMEM; 2071 return -ENOMEM;
2077 } 2072 }
2078 ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group);
2079 if (ret) {
2080 lcs_free_card(card);
2081 put_device(&ccwgdev->dev);
2082 return ret;
2083 }
2084 dev_set_drvdata(&ccwgdev->dev, card); 2073 dev_set_drvdata(&ccwgdev->dev, card);
2085 ccwgdev->cdev[0]->handler = lcs_irq; 2074 ccwgdev->cdev[0]->handler = lcs_irq;
2086 ccwgdev->cdev[1]->handler = lcs_irq; 2075 ccwgdev->cdev[1]->handler = lcs_irq;
@@ -2089,7 +2078,9 @@ lcs_probe_device(struct ccwgroup_device *ccwgdev)
2089 card->thread_start_mask = 0; 2078 card->thread_start_mask = 0;
2090 card->thread_allowed_mask = 0; 2079 card->thread_allowed_mask = 0;
2091 card->thread_running_mask = 0; 2080 card->thread_running_mask = 0;
2092 return 0; 2081 ccwgdev->dev.type = &lcs_devtype;
2082
2083 return 0;
2093} 2084}
2094 2085
2095static int 2086static int
@@ -2172,12 +2163,6 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
2172 dev = alloc_etherdev(0); 2163 dev = alloc_etherdev(0);
2173 break; 2164 break;
2174#endif 2165#endif
2175#ifdef CONFIG_TR
2176 case LCS_FRAME_TYPE_TR:
2177 card->lan_type_trans = tr_type_trans;
2178 dev = alloc_trdev(0);
2179 break;
2180#endif
2181#ifdef CONFIG_FDDI 2166#ifdef CONFIG_FDDI
2182 case LCS_FRAME_TYPE_FDDI: 2167 case LCS_FRAME_TYPE_FDDI:
2183 card->lan_type_trans = fddi_type_trans; 2168 card->lan_type_trans = fddi_type_trans;
@@ -2323,9 +2308,9 @@ lcs_remove_device(struct ccwgroup_device *ccwgdev)
2323 } 2308 }
2324 if (card->dev) 2309 if (card->dev)
2325 unregister_netdev(card->dev); 2310 unregister_netdev(card->dev);
2326 sysfs_remove_group(&ccwgdev->dev.kobj, &lcs_attr_group);
2327 lcs_cleanup_card(card); 2311 lcs_cleanup_card(card);
2328 lcs_free_card(card); 2312 lcs_free_card(card);
2313 dev_set_drvdata(&ccwgdev->dev, NULL);
2329 put_device(&ccwgdev->dev); 2314 put_device(&ccwgdev->dev);
2330} 2315}
2331 2316
@@ -2410,9 +2395,7 @@ static struct ccwgroup_driver lcs_group_driver = {
2410 .owner = THIS_MODULE, 2395 .owner = THIS_MODULE,
2411 .name = "lcs", 2396 .name = "lcs",
2412 }, 2397 },
2413 .max_slaves = 2, 2398 .setup = lcs_probe_device,
2414 .driver_id = 0xD3C3E2,
2415 .probe = lcs_probe_device,
2416 .remove = lcs_remove_device, 2399 .remove = lcs_remove_device,
2417 .set_online = lcs_new_device, 2400 .set_online = lcs_new_device,
2418 .set_offline = lcs_shutdown_device, 2401 .set_offline = lcs_shutdown_device,
@@ -2423,30 +2406,24 @@ static struct ccwgroup_driver lcs_group_driver = {
2423 .restore = lcs_restore, 2406 .restore = lcs_restore,
2424}; 2407};
2425 2408
2426static ssize_t 2409static ssize_t lcs_driver_group_store(struct device_driver *ddrv,
2427lcs_driver_group_store(struct device_driver *ddrv, const char *buf, 2410 const char *buf, size_t count)
2428 size_t count)
2429{ 2411{
2430 int err; 2412 int err;
2431 err = ccwgroup_create_from_string(lcs_root_dev, 2413 err = ccwgroup_create_dev(lcs_root_dev, &lcs_group_driver, 2, buf);
2432 lcs_group_driver.driver_id,
2433 &lcs_ccw_driver, 2, buf);
2434 return err ? err : count; 2414 return err ? err : count;
2435} 2415}
2436
2437static DRIVER_ATTR(group, 0200, NULL, lcs_driver_group_store); 2416static DRIVER_ATTR(group, 0200, NULL, lcs_driver_group_store);
2438 2417
2439static struct attribute *lcs_group_attrs[] = { 2418static struct attribute *lcs_drv_attrs[] = {
2440 &driver_attr_group.attr, 2419 &driver_attr_group.attr,
2441 NULL, 2420 NULL,
2442}; 2421};
2443 2422static struct attribute_group lcs_drv_attr_group = {
2444static struct attribute_group lcs_group_attr_group = { 2423 .attrs = lcs_drv_attrs,
2445 .attrs = lcs_group_attrs,
2446}; 2424};
2447 2425static const struct attribute_group *lcs_drv_attr_groups[] = {
2448static const struct attribute_group *lcs_group_attr_groups[] = { 2426 &lcs_drv_attr_group,
2449 &lcs_group_attr_group,
2450 NULL, 2427 NULL,
2451}; 2428};
2452 2429
@@ -2470,7 +2447,7 @@ __init lcs_init_module(void)
2470 rc = ccw_driver_register(&lcs_ccw_driver); 2447 rc = ccw_driver_register(&lcs_ccw_driver);
2471 if (rc) 2448 if (rc)
2472 goto ccw_err; 2449 goto ccw_err;
2473 lcs_group_driver.driver.groups = lcs_group_attr_groups; 2450 lcs_group_driver.driver.groups = lcs_drv_attr_groups;
2474 rc = ccwgroup_driver_register(&lcs_group_driver); 2451 rc = ccwgroup_driver_register(&lcs_group_driver);
2475 if (rc) 2452 if (rc)
2476 goto ccwgroup_err; 2453 goto ccwgroup_err;
@@ -2496,8 +2473,6 @@ __exit lcs_cleanup_module(void)
2496{ 2473{
2497 pr_info("Terminating lcs module.\n"); 2474 pr_info("Terminating lcs module.\n");
2498 LCS_DBF_TEXT(0, trace, "cleanup"); 2475 LCS_DBF_TEXT(0, trace, "cleanup");
2499 driver_remove_file(&lcs_group_driver.driver,
2500 &driver_attr_group);
2501 ccwgroup_driver_unregister(&lcs_group_driver); 2476 ccwgroup_driver_unregister(&lcs_group_driver);
2502 ccw_driver_unregister(&lcs_ccw_driver); 2477 ccw_driver_unregister(&lcs_ccw_driver);
2503 root_device_unregister(lcs_root_dev); 2478 root_device_unregister(lcs_root_dev);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index ec7921b5138e..06e8f31ff3dc 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -13,8 +13,6 @@
13 13
14#include <linux/if.h> 14#include <linux/if.h>
15#include <linux/if_arp.h> 15#include <linux/if_arp.h>
16#include <linux/if_tr.h>
17#include <linux/trdevice.h>
18#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
19#include <linux/if_vlan.h> 17#include <linux/if_vlan.h>
20#include <linux/ctype.h> 18#include <linux/ctype.h>
@@ -676,8 +674,6 @@ struct qeth_card_options {
676 struct qeth_ipa_info adp; /*Adapter parameters*/ 674 struct qeth_ipa_info adp; /*Adapter parameters*/
677 struct qeth_routing_info route6; 675 struct qeth_routing_info route6;
678 struct qeth_ipa_info ipa6; 676 struct qeth_ipa_info ipa6;
679 int broadcast_mode;
680 int macaddr_mode;
681 int fake_broadcast; 677 int fake_broadcast;
682 int add_hhlen; 678 int add_hhlen;
683 int layer2; 679 int layer2;
@@ -711,7 +707,16 @@ struct qeth_discipline {
711 qdio_handler_t *input_handler; 707 qdio_handler_t *input_handler;
712 qdio_handler_t *output_handler; 708 qdio_handler_t *output_handler;
713 int (*recover)(void *ptr); 709 int (*recover)(void *ptr);
714 struct ccwgroup_driver *ccwgdriver; 710 int (*setup) (struct ccwgroup_device *);
711 void (*remove) (struct ccwgroup_device *);
712 int (*set_online) (struct ccwgroup_device *);
713 int (*set_offline) (struct ccwgroup_device *);
714 void (*shutdown)(struct ccwgroup_device *);
715 int (*prepare) (struct ccwgroup_device *);
716 void (*complete) (struct ccwgroup_device *);
717 int (*freeze)(struct ccwgroup_device *);
718 int (*thaw) (struct ccwgroup_device *);
719 int (*restore)(struct ccwgroup_device *);
715}; 720};
716 721
717struct qeth_vlan_vid { 722struct qeth_vlan_vid {
@@ -775,7 +780,7 @@ struct qeth_card {
775 struct qeth_perf_stats perf_stats; 780 struct qeth_perf_stats perf_stats;
776 int read_or_write_problem; 781 int read_or_write_problem;
777 struct qeth_osn_info osn_info; 782 struct qeth_osn_info osn_info;
778 struct qeth_discipline discipline; 783 struct qeth_discipline *discipline;
779 atomic_t force_alloc_skb; 784 atomic_t force_alloc_skb;
780 struct service_level qeth_service_level; 785 struct service_level qeth_service_level;
781 struct qdio_ssqd_desc ssqd; 786 struct qdio_ssqd_desc ssqd;
@@ -841,16 +846,15 @@ static inline int qeth_is_diagass_supported(struct qeth_card *card,
841 return card->info.diagass_support & (__u32)cmd; 846 return card->info.diagass_support & (__u32)cmd;
842} 847}
843 848
844extern struct ccwgroup_driver qeth_l2_ccwgroup_driver; 849extern struct qeth_discipline qeth_l2_discipline;
845extern struct ccwgroup_driver qeth_l3_ccwgroup_driver; 850extern struct qeth_discipline qeth_l3_discipline;
851extern const struct attribute_group *qeth_generic_attr_groups[];
852extern const struct attribute_group *qeth_osn_attr_groups[];
853
846const char *qeth_get_cardname_short(struct qeth_card *); 854const char *qeth_get_cardname_short(struct qeth_card *);
847int qeth_realloc_buffer_pool(struct qeth_card *, int); 855int qeth_realloc_buffer_pool(struct qeth_card *, int);
848int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id); 856int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
849void qeth_core_free_discipline(struct qeth_card *); 857void qeth_core_free_discipline(struct qeth_card *);
850int qeth_core_create_device_attributes(struct device *);
851void qeth_core_remove_device_attributes(struct device *);
852int qeth_core_create_osn_attributes(struct device *);
853void qeth_core_remove_osn_attributes(struct device *);
854void qeth_buffer_reclaim_work(struct work_struct *); 858void qeth_buffer_reclaim_work(struct work_struct *);
855 859
856/* exports for qeth discipline device drivers */ 860/* exports for qeth discipline device drivers */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 120955c66410..e118e1e1e1c1 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1329,8 +1329,6 @@ static void qeth_set_intial_options(struct qeth_card *card)
1329{ 1329{
1330 card->options.route4.type = NO_ROUTER; 1330 card->options.route4.type = NO_ROUTER;
1331 card->options.route6.type = NO_ROUTER; 1331 card->options.route6.type = NO_ROUTER;
1332 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
1333 card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
1334 card->options.fake_broadcast = 0; 1332 card->options.fake_broadcast = 0;
1335 card->options.add_hhlen = DEFAULT_ADD_HHLEN; 1333 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1336 card->options.performance_stats = 0; 1334 card->options.performance_stats = 0;
@@ -1365,7 +1363,7 @@ static void qeth_start_kernel_thread(struct work_struct *work)
1365 card->write.state != CH_STATE_UP) 1363 card->write.state != CH_STATE_UP)
1366 return; 1364 return;
1367 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { 1365 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1368 ts = kthread_run(card->discipline.recover, (void *)card, 1366 ts = kthread_run(card->discipline->recover, (void *)card,
1369 "qeth_recover"); 1367 "qeth_recover");
1370 if (IS_ERR(ts)) { 1368 if (IS_ERR(ts)) {
1371 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 1369 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
@@ -1672,7 +1670,8 @@ static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
1672{ 1670{
1673 QETH_DBF_TEXT(SETUP, 2, "cfgblkt"); 1671 QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
1674 1672
1675 if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && prcd[76] == 0xF5) { 1673 if (prcd[74] == 0xF0 && prcd[75] == 0xF0 &&
1674 (prcd[76] == 0xF5 || prcd[76] == 0xF6)) {
1676 card->info.blkt.time_total = 250; 1675 card->info.blkt.time_total = 250;
1677 card->info.blkt.inter_packet = 5; 1676 card->info.blkt.inter_packet = 5;
1678 card->info.blkt.inter_packet_jumbo = 15; 1677 card->info.blkt.inter_packet_jumbo = 15;
@@ -3338,7 +3337,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3338 if (rc) { 3337 if (rc) {
3339 queue->card->stats.tx_errors += count; 3338 queue->card->stats.tx_errors += count;
3340 /* ignore temporary SIGA errors without busy condition */ 3339 /* ignore temporary SIGA errors without busy condition */
3341 if (rc == QDIO_ERROR_SIGA_TARGET) 3340 if (rc == -ENOBUFS)
3342 return; 3341 return;
3343 QETH_CARD_TEXT(queue->card, 2, "flushbuf"); 3342 QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3344 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no); 3343 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
@@ -3532,7 +3531,7 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3532 int i; 3531 int i;
3533 3532
3534 QETH_CARD_TEXT(card, 6, "qdouhdl"); 3533 QETH_CARD_TEXT(card, 6, "qdouhdl");
3535 if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { 3534 if (qdio_error & QDIO_ERROR_FATAL) {
3536 QETH_CARD_TEXT(card, 2, "achkcond"); 3535 QETH_CARD_TEXT(card, 2, "achkcond");
3537 netif_stop_queue(card->dev); 3536 netif_stop_queue(card->dev);
3538 qeth_schedule_recovery(card); 3537 qeth_schedule_recovery(card);
@@ -4540,7 +4539,8 @@ static void qeth_determine_capabilities(struct qeth_card *card)
4540 goto out_offline; 4539 goto out_offline;
4541 } 4540 }
4542 qeth_configure_unitaddr(card, prcd); 4541 qeth_configure_unitaddr(card, prcd);
4543 qeth_configure_blkt_default(card, prcd); 4542 if (ddev_offline)
4543 qeth_configure_blkt_default(card, prcd);
4544 kfree(prcd); 4544 kfree(prcd);
4545 4545
4546 rc = qdio_get_ssqd_desc(ddev, &card->ssqd); 4546 rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
@@ -4627,7 +4627,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
4627 goto out_free_in_sbals; 4627 goto out_free_in_sbals;
4628 } 4628 }
4629 for (i = 0; i < card->qdio.no_in_queues; ++i) 4629 for (i = 0; i < card->qdio.no_in_queues; ++i)
4630 queue_start_poll[i] = card->discipline.start_poll; 4630 queue_start_poll[i] = card->discipline->start_poll;
4631 4631
4632 qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll); 4632 qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
4633 4633
@@ -4651,8 +4651,8 @@ static int qeth_qdio_establish(struct qeth_card *card)
4651 init_data.qib_param_field = qib_param_field; 4651 init_data.qib_param_field = qib_param_field;
4652 init_data.no_input_qs = card->qdio.no_in_queues; 4652 init_data.no_input_qs = card->qdio.no_in_queues;
4653 init_data.no_output_qs = card->qdio.no_out_queues; 4653 init_data.no_output_qs = card->qdio.no_out_queues;
4654 init_data.input_handler = card->discipline.input_handler; 4654 init_data.input_handler = card->discipline->input_handler;
4655 init_data.output_handler = card->discipline.output_handler; 4655 init_data.output_handler = card->discipline->output_handler;
4656 init_data.queue_start_poll_array = queue_start_poll; 4656 init_data.queue_start_poll_array = queue_start_poll;
4657 init_data.int_parm = (unsigned long) card; 4657 init_data.int_parm = (unsigned long) card;
4658 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; 4658 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
@@ -4737,13 +4737,6 @@ static struct ccw_driver qeth_ccw_driver = {
4737 .remove = ccwgroup_remove_ccwdev, 4737 .remove = ccwgroup_remove_ccwdev,
4738}; 4738};
4739 4739
4740static int qeth_core_driver_group(const char *buf, struct device *root_dev,
4741 unsigned long driver_id)
4742{
4743 return ccwgroup_create_from_string(root_dev, driver_id,
4744 &qeth_ccw_driver, 3, buf);
4745}
4746
4747int qeth_core_hardsetup_card(struct qeth_card *card) 4740int qeth_core_hardsetup_card(struct qeth_card *card)
4748{ 4741{
4749 int retries = 0; 4742 int retries = 0;
@@ -4909,11 +4902,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
4909 break; 4902 break;
4910 case QETH_HEADER_TYPE_LAYER3: 4903 case QETH_HEADER_TYPE_LAYER3:
4911 skb_len = (*hdr)->hdr.l3.length; 4904 skb_len = (*hdr)->hdr.l3.length;
4912 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || 4905 headroom = ETH_HLEN;
4913 (card->info.link_type == QETH_LINK_TYPE_HSTR))
4914 headroom = TR_HLEN;
4915 else
4916 headroom = ETH_HLEN;
4917 break; 4906 break;
4918 case QETH_HEADER_TYPE_OSN: 4907 case QETH_HEADER_TYPE_OSN:
4919 skb_len = (*hdr)->hdr.osn.pdu_length; 4908 skb_len = (*hdr)->hdr.osn.pdu_length;
@@ -5044,17 +5033,15 @@ int qeth_core_load_discipline(struct qeth_card *card,
5044 mutex_lock(&qeth_mod_mutex); 5033 mutex_lock(&qeth_mod_mutex);
5045 switch (discipline) { 5034 switch (discipline) {
5046 case QETH_DISCIPLINE_LAYER3: 5035 case QETH_DISCIPLINE_LAYER3:
5047 card->discipline.ccwgdriver = try_then_request_module( 5036 card->discipline = try_then_request_module(
5048 symbol_get(qeth_l3_ccwgroup_driver), 5037 symbol_get(qeth_l3_discipline), "qeth_l3");
5049 "qeth_l3");
5050 break; 5038 break;
5051 case QETH_DISCIPLINE_LAYER2: 5039 case QETH_DISCIPLINE_LAYER2:
5052 card->discipline.ccwgdriver = try_then_request_module( 5040 card->discipline = try_then_request_module(
5053 symbol_get(qeth_l2_ccwgroup_driver), 5041 symbol_get(qeth_l2_discipline), "qeth_l2");
5054 "qeth_l2");
5055 break; 5042 break;
5056 } 5043 }
5057 if (!card->discipline.ccwgdriver) { 5044 if (!card->discipline) {
5058 dev_err(&card->gdev->dev, "There is no kernel module to " 5045 dev_err(&card->gdev->dev, "There is no kernel module to "
5059 "support discipline %d\n", discipline); 5046 "support discipline %d\n", discipline);
5060 rc = -EINVAL; 5047 rc = -EINVAL;
@@ -5066,12 +5053,21 @@ int qeth_core_load_discipline(struct qeth_card *card,
5066void qeth_core_free_discipline(struct qeth_card *card) 5053void qeth_core_free_discipline(struct qeth_card *card)
5067{ 5054{
5068 if (card->options.layer2) 5055 if (card->options.layer2)
5069 symbol_put(qeth_l2_ccwgroup_driver); 5056 symbol_put(qeth_l2_discipline);
5070 else 5057 else
5071 symbol_put(qeth_l3_ccwgroup_driver); 5058 symbol_put(qeth_l3_discipline);
5072 card->discipline.ccwgdriver = NULL; 5059 card->discipline = NULL;
5073} 5060}
5074 5061
5062static const struct device_type qeth_generic_devtype = {
5063 .name = "qeth_generic",
5064 .groups = qeth_generic_attr_groups,
5065};
5066static const struct device_type qeth_osn_devtype = {
5067 .name = "qeth_osn",
5068 .groups = qeth_osn_attr_groups,
5069};
5070
5075static int qeth_core_probe_device(struct ccwgroup_device *gdev) 5071static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5076{ 5072{
5077 struct qeth_card *card; 5073 struct qeth_card *card;
@@ -5126,18 +5122,17 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5126 } 5122 }
5127 5123
5128 if (card->info.type == QETH_CARD_TYPE_OSN) 5124 if (card->info.type == QETH_CARD_TYPE_OSN)
5129 rc = qeth_core_create_osn_attributes(dev); 5125 gdev->dev.type = &qeth_osn_devtype;
5130 else 5126 else
5131 rc = qeth_core_create_device_attributes(dev); 5127 gdev->dev.type = &qeth_generic_devtype;
5132 if (rc) 5128
5133 goto err_dbf;
5134 switch (card->info.type) { 5129 switch (card->info.type) {
5135 case QETH_CARD_TYPE_OSN: 5130 case QETH_CARD_TYPE_OSN:
5136 case QETH_CARD_TYPE_OSM: 5131 case QETH_CARD_TYPE_OSM:
5137 rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2); 5132 rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
5138 if (rc) 5133 if (rc)
5139 goto err_attr; 5134 goto err_dbf;
5140 rc = card->discipline.ccwgdriver->probe(card->gdev); 5135 rc = card->discipline->setup(card->gdev);
5141 if (rc) 5136 if (rc)
5142 goto err_disc; 5137 goto err_disc;
5143 case QETH_CARD_TYPE_OSD: 5138 case QETH_CARD_TYPE_OSD:
@@ -5155,11 +5150,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5155 5150
5156err_disc: 5151err_disc:
5157 qeth_core_free_discipline(card); 5152 qeth_core_free_discipline(card);
5158err_attr:
5159 if (card->info.type == QETH_CARD_TYPE_OSN)
5160 qeth_core_remove_osn_attributes(dev);
5161 else
5162 qeth_core_remove_device_attributes(dev);
5163err_dbf: 5153err_dbf:
5164 debug_unregister(card->debug); 5154 debug_unregister(card->debug);
5165err_card: 5155err_card:
@@ -5176,14 +5166,8 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
5176 5166
5177 QETH_DBF_TEXT(SETUP, 2, "removedv"); 5167 QETH_DBF_TEXT(SETUP, 2, "removedv");
5178 5168
5179 if (card->info.type == QETH_CARD_TYPE_OSN) { 5169 if (card->discipline) {
5180 qeth_core_remove_osn_attributes(&gdev->dev); 5170 card->discipline->remove(gdev);
5181 } else {
5182 qeth_core_remove_device_attributes(&gdev->dev);
5183 }
5184
5185 if (card->discipline.ccwgdriver) {
5186 card->discipline.ccwgdriver->remove(gdev);
5187 qeth_core_free_discipline(card); 5171 qeth_core_free_discipline(card);
5188 } 5172 }
5189 5173
@@ -5203,7 +5187,7 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
5203 int rc = 0; 5187 int rc = 0;
5204 int def_discipline; 5188 int def_discipline;
5205 5189
5206 if (!card->discipline.ccwgdriver) { 5190 if (!card->discipline) {
5207 if (card->info.type == QETH_CARD_TYPE_IQD) 5191 if (card->info.type == QETH_CARD_TYPE_IQD)
5208 def_discipline = QETH_DISCIPLINE_LAYER3; 5192 def_discipline = QETH_DISCIPLINE_LAYER3;
5209 else 5193 else
@@ -5211,11 +5195,11 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
5211 rc = qeth_core_load_discipline(card, def_discipline); 5195 rc = qeth_core_load_discipline(card, def_discipline);
5212 if (rc) 5196 if (rc)
5213 goto err; 5197 goto err;
5214 rc = card->discipline.ccwgdriver->probe(card->gdev); 5198 rc = card->discipline->setup(card->gdev);
5215 if (rc) 5199 if (rc)
5216 goto err; 5200 goto err;
5217 } 5201 }
5218 rc = card->discipline.ccwgdriver->set_online(gdev); 5202 rc = card->discipline->set_online(gdev);
5219err: 5203err:
5220 return rc; 5204 return rc;
5221} 5205}
@@ -5223,58 +5207,52 @@ err:
5223static int qeth_core_set_offline(struct ccwgroup_device *gdev) 5207static int qeth_core_set_offline(struct ccwgroup_device *gdev)
5224{ 5208{
5225 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5209 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5226 return card->discipline.ccwgdriver->set_offline(gdev); 5210 return card->discipline->set_offline(gdev);
5227} 5211}
5228 5212
5229static void qeth_core_shutdown(struct ccwgroup_device *gdev) 5213static void qeth_core_shutdown(struct ccwgroup_device *gdev)
5230{ 5214{
5231 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5215 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5232 if (card->discipline.ccwgdriver && 5216 if (card->discipline && card->discipline->shutdown)
5233 card->discipline.ccwgdriver->shutdown) 5217 card->discipline->shutdown(gdev);
5234 card->discipline.ccwgdriver->shutdown(gdev);
5235} 5218}
5236 5219
5237static int qeth_core_prepare(struct ccwgroup_device *gdev) 5220static int qeth_core_prepare(struct ccwgroup_device *gdev)
5238{ 5221{
5239 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5222 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5240 if (card->discipline.ccwgdriver && 5223 if (card->discipline && card->discipline->prepare)
5241 card->discipline.ccwgdriver->prepare) 5224 return card->discipline->prepare(gdev);
5242 return card->discipline.ccwgdriver->prepare(gdev);
5243 return 0; 5225 return 0;
5244} 5226}
5245 5227
5246static void qeth_core_complete(struct ccwgroup_device *gdev) 5228static void qeth_core_complete(struct ccwgroup_device *gdev)
5247{ 5229{
5248 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5230 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5249 if (card->discipline.ccwgdriver && 5231 if (card->discipline && card->discipline->complete)
5250 card->discipline.ccwgdriver->complete) 5232 card->discipline->complete(gdev);
5251 card->discipline.ccwgdriver->complete(gdev);
5252} 5233}
5253 5234
5254static int qeth_core_freeze(struct ccwgroup_device *gdev) 5235static int qeth_core_freeze(struct ccwgroup_device *gdev)
5255{ 5236{
5256 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5237 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5257 if (card->discipline.ccwgdriver && 5238 if (card->discipline && card->discipline->freeze)
5258 card->discipline.ccwgdriver->freeze) 5239 return card->discipline->freeze(gdev);
5259 return card->discipline.ccwgdriver->freeze(gdev);
5260 return 0; 5240 return 0;
5261} 5241}
5262 5242
5263static int qeth_core_thaw(struct ccwgroup_device *gdev) 5243static int qeth_core_thaw(struct ccwgroup_device *gdev)
5264{ 5244{
5265 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5245 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5266 if (card->discipline.ccwgdriver && 5246 if (card->discipline && card->discipline->thaw)
5267 card->discipline.ccwgdriver->thaw) 5247 return card->discipline->thaw(gdev);
5268 return card->discipline.ccwgdriver->thaw(gdev);
5269 return 0; 5248 return 0;
5270} 5249}
5271 5250
5272static int qeth_core_restore(struct ccwgroup_device *gdev) 5251static int qeth_core_restore(struct ccwgroup_device *gdev)
5273{ 5252{
5274 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5253 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5275 if (card->discipline.ccwgdriver && 5254 if (card->discipline && card->discipline->restore)
5276 card->discipline.ccwgdriver->restore) 5255 return card->discipline->restore(gdev);
5277 return card->discipline.ccwgdriver->restore(gdev);
5278 return 0; 5256 return 0;
5279} 5257}
5280 5258
@@ -5283,8 +5261,7 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
5283 .owner = THIS_MODULE, 5261 .owner = THIS_MODULE,
5284 .name = "qeth", 5262 .name = "qeth",
5285 }, 5263 },
5286 .driver_id = 0xD8C5E3C8, 5264 .setup = qeth_core_probe_device,
5287 .probe = qeth_core_probe_device,
5288 .remove = qeth_core_remove_device, 5265 .remove = qeth_core_remove_device,
5289 .set_online = qeth_core_set_online, 5266 .set_online = qeth_core_set_online,
5290 .set_offline = qeth_core_set_offline, 5267 .set_offline = qeth_core_set_offline,
@@ -5296,21 +5273,30 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
5296 .restore = qeth_core_restore, 5273 .restore = qeth_core_restore,
5297}; 5274};
5298 5275
5299static ssize_t 5276static ssize_t qeth_core_driver_group_store(struct device_driver *ddrv,
5300qeth_core_driver_group_store(struct device_driver *ddrv, const char *buf, 5277 const char *buf, size_t count)
5301 size_t count)
5302{ 5278{
5303 int err; 5279 int err;
5304 err = qeth_core_driver_group(buf, qeth_core_root_dev,
5305 qeth_core_ccwgroup_driver.driver_id);
5306 if (err)
5307 return err;
5308 else
5309 return count;
5310}
5311 5280
5281 err = ccwgroup_create_dev(qeth_core_root_dev,
5282 &qeth_core_ccwgroup_driver, 3, buf);
5283
5284 return err ? err : count;
5285}
5312static DRIVER_ATTR(group, 0200, NULL, qeth_core_driver_group_store); 5286static DRIVER_ATTR(group, 0200, NULL, qeth_core_driver_group_store);
5313 5287
5288static struct attribute *qeth_drv_attrs[] = {
5289 &driver_attr_group.attr,
5290 NULL,
5291};
5292static struct attribute_group qeth_drv_attr_group = {
5293 .attrs = qeth_drv_attrs,
5294};
5295static const struct attribute_group *qeth_drv_attr_groups[] = {
5296 &qeth_drv_attr_group,
5297 NULL,
5298};
5299
5314static struct { 5300static struct {
5315 const char str[ETH_GSTRING_LEN]; 5301 const char str[ETH_GSTRING_LEN];
5316} qeth_ethtool_stats_keys[] = { 5302} qeth_ethtool_stats_keys[] = {
@@ -5548,49 +5534,41 @@ static int __init qeth_core_init(void)
5548 rc = qeth_register_dbf_views(); 5534 rc = qeth_register_dbf_views();
5549 if (rc) 5535 if (rc)
5550 goto out_err; 5536 goto out_err;
5551 rc = ccw_driver_register(&qeth_ccw_driver);
5552 if (rc)
5553 goto ccw_err;
5554 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
5555 if (rc)
5556 goto ccwgroup_err;
5557 rc = driver_create_file(&qeth_core_ccwgroup_driver.driver,
5558 &driver_attr_group);
5559 if (rc)
5560 goto driver_err;
5561 qeth_core_root_dev = root_device_register("qeth"); 5537 qeth_core_root_dev = root_device_register("qeth");
5562 rc = IS_ERR(qeth_core_root_dev) ? PTR_ERR(qeth_core_root_dev) : 0; 5538 rc = IS_ERR(qeth_core_root_dev) ? PTR_ERR(qeth_core_root_dev) : 0;
5563 if (rc) 5539 if (rc)
5564 goto register_err; 5540 goto register_err;
5565
5566 qeth_core_header_cache = kmem_cache_create("qeth_hdr", 5541 qeth_core_header_cache = kmem_cache_create("qeth_hdr",
5567 sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL); 5542 sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL);
5568 if (!qeth_core_header_cache) { 5543 if (!qeth_core_header_cache) {
5569 rc = -ENOMEM; 5544 rc = -ENOMEM;
5570 goto slab_err; 5545 goto slab_err;
5571 } 5546 }
5572
5573 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf", 5547 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
5574 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL); 5548 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
5575 if (!qeth_qdio_outbuf_cache) { 5549 if (!qeth_qdio_outbuf_cache) {
5576 rc = -ENOMEM; 5550 rc = -ENOMEM;
5577 goto cqslab_err; 5551 goto cqslab_err;
5578 } 5552 }
5553 rc = ccw_driver_register(&qeth_ccw_driver);
5554 if (rc)
5555 goto ccw_err;
5556 qeth_core_ccwgroup_driver.driver.groups = qeth_drv_attr_groups;
5557 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
5558 if (rc)
5559 goto ccwgroup_err;
5579 5560
5580 return 0; 5561 return 0;
5562
5563ccwgroup_err:
5564 ccw_driver_unregister(&qeth_ccw_driver);
5565ccw_err:
5566 kmem_cache_destroy(qeth_qdio_outbuf_cache);
5581cqslab_err: 5567cqslab_err:
5582 kmem_cache_destroy(qeth_core_header_cache); 5568 kmem_cache_destroy(qeth_core_header_cache);
5583slab_err: 5569slab_err:
5584 root_device_unregister(qeth_core_root_dev); 5570 root_device_unregister(qeth_core_root_dev);
5585register_err: 5571register_err:
5586 driver_remove_file(&qeth_core_ccwgroup_driver.driver,
5587 &driver_attr_group);
5588driver_err:
5589 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
5590ccwgroup_err:
5591 ccw_driver_unregister(&qeth_ccw_driver);
5592ccw_err:
5593 QETH_DBF_MESSAGE(2, "Initialization failed with code %d\n", rc);
5594 qeth_unregister_dbf_views(); 5572 qeth_unregister_dbf_views();
5595out_err: 5573out_err:
5596 pr_err("Initializing the qeth device driver failed\n"); 5574 pr_err("Initializing the qeth device driver failed\n");
@@ -5599,13 +5577,11 @@ out_err:
5599 5577
5600static void __exit qeth_core_exit(void) 5578static void __exit qeth_core_exit(void)
5601{ 5579{
5602 root_device_unregister(qeth_core_root_dev);
5603 driver_remove_file(&qeth_core_ccwgroup_driver.driver,
5604 &driver_attr_group);
5605 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); 5580 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
5606 ccw_driver_unregister(&qeth_ccw_driver); 5581 ccw_driver_unregister(&qeth_ccw_driver);
5607 kmem_cache_destroy(qeth_qdio_outbuf_cache); 5582 kmem_cache_destroy(qeth_qdio_outbuf_cache);
5608 kmem_cache_destroy(qeth_core_header_cache); 5583 kmem_cache_destroy(qeth_core_header_cache);
5584 root_device_unregister(qeth_core_root_dev);
5609 qeth_unregister_dbf_views(); 5585 qeth_unregister_dbf_views();
5610 pr_info("core functions removed\n"); 5586 pr_info("core functions removed\n");
5611} 5587}
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index ff41e42004ac..a11b30c38423 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -70,16 +70,6 @@ enum qeth_link_types {
70 QETH_LINK_TYPE_ATM_NATIVE = 0x90, 70 QETH_LINK_TYPE_ATM_NATIVE = 0x90,
71}; 71};
72 72
73enum qeth_tr_macaddr_modes {
74 QETH_TR_MACADDR_NONCANONICAL = 0,
75 QETH_TR_MACADDR_CANONICAL = 1,
76};
77
78enum qeth_tr_broadcast_modes {
79 QETH_TR_BROADCAST_ALLRINGS = 0,
80 QETH_TR_BROADCAST_LOCAL = 1,
81};
82
83/* 73/*
84 * Routing stuff 74 * Routing stuff
85 */ 75 */
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 0a8e86c1b0ea..f163af575c48 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -434,8 +434,8 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
434 goto out; 434 goto out;
435 else { 435 else {
436 card->info.mac_bits = 0; 436 card->info.mac_bits = 0;
437 if (card->discipline.ccwgdriver) { 437 if (card->discipline) {
438 card->discipline.ccwgdriver->remove(card->gdev); 438 card->discipline->remove(card->gdev);
439 qeth_core_free_discipline(card); 439 qeth_core_free_discipline(card);
440 } 440 }
441 } 441 }
@@ -444,7 +444,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
444 if (rc) 444 if (rc)
445 goto out; 445 goto out;
446 446
447 rc = card->discipline.ccwgdriver->probe(card->gdev); 447 rc = card->discipline->setup(card->gdev);
448out: 448out:
449 mutex_unlock(&card->discipline_mutex); 449 mutex_unlock(&card->discipline_mutex);
450 return rc ? rc : count; 450 return rc ? rc : count;
@@ -693,7 +693,6 @@ static struct attribute *qeth_blkt_device_attrs[] = {
693 &dev_attr_inter_jumbo.attr, 693 &dev_attr_inter_jumbo.attr,
694 NULL, 694 NULL,
695}; 695};
696
697static struct attribute_group qeth_device_blkt_group = { 696static struct attribute_group qeth_device_blkt_group = {
698 .name = "blkt", 697 .name = "blkt",
699 .attrs = qeth_blkt_device_attrs, 698 .attrs = qeth_blkt_device_attrs,
@@ -716,11 +715,16 @@ static struct attribute *qeth_device_attrs[] = {
716 &dev_attr_hw_trap.attr, 715 &dev_attr_hw_trap.attr,
717 NULL, 716 NULL,
718}; 717};
719
720static struct attribute_group qeth_device_attr_group = { 718static struct attribute_group qeth_device_attr_group = {
721 .attrs = qeth_device_attrs, 719 .attrs = qeth_device_attrs,
722}; 720};
723 721
722const struct attribute_group *qeth_generic_attr_groups[] = {
723 &qeth_device_attr_group,
724 &qeth_device_blkt_group,
725 NULL,
726};
727
724static struct attribute *qeth_osn_device_attrs[] = { 728static struct attribute *qeth_osn_device_attrs[] = {
725 &dev_attr_state.attr, 729 &dev_attr_state.attr,
726 &dev_attr_chpid.attr, 730 &dev_attr_chpid.attr,
@@ -730,37 +734,10 @@ static struct attribute *qeth_osn_device_attrs[] = {
730 &dev_attr_recover.attr, 734 &dev_attr_recover.attr,
731 NULL, 735 NULL,
732}; 736};
733
734static struct attribute_group qeth_osn_device_attr_group = { 737static struct attribute_group qeth_osn_device_attr_group = {
735 .attrs = qeth_osn_device_attrs, 738 .attrs = qeth_osn_device_attrs,
736}; 739};
737 740const struct attribute_group *qeth_osn_attr_groups[] = {
738int qeth_core_create_device_attributes(struct device *dev) 741 &qeth_osn_device_attr_group,
739{ 742 NULL,
740 int ret; 743};
741 ret = sysfs_create_group(&dev->kobj, &qeth_device_attr_group);
742 if (ret)
743 return ret;
744 ret = sysfs_create_group(&dev->kobj, &qeth_device_blkt_group);
745 if (ret)
746 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
747
748 return 0;
749}
750
751void qeth_core_remove_device_attributes(struct device *dev)
752{
753 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
754 sysfs_remove_group(&dev->kobj, &qeth_device_blkt_group);
755}
756
757int qeth_core_create_osn_attributes(struct device *dev)
758{
759 return sysfs_create_group(&dev->kobj, &qeth_osn_device_attr_group);
760}
761
762void qeth_core_remove_osn_attributes(struct device *dev)
763{
764 sysfs_remove_group(&dev->kobj, &qeth_osn_device_attr_group);
765 return;
766}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 0e7c29d1d7ef..426986518e96 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -882,12 +882,6 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
882 INIT_LIST_HEAD(&card->mc_list); 882 INIT_LIST_HEAD(&card->mc_list);
883 card->options.layer2 = 1; 883 card->options.layer2 = 1;
884 card->info.hwtrap = 0; 884 card->info.hwtrap = 0;
885 card->discipline.start_poll = qeth_qdio_start_poll;
886 card->discipline.input_handler = (qdio_handler_t *)
887 qeth_qdio_input_handler;
888 card->discipline.output_handler = (qdio_handler_t *)
889 qeth_qdio_output_handler;
890 card->discipline.recover = qeth_l2_recover;
891 return 0; 885 return 0;
892} 886}
893 887
@@ -1227,8 +1221,12 @@ out:
1227 return rc; 1221 return rc;
1228} 1222}
1229 1223
1230struct ccwgroup_driver qeth_l2_ccwgroup_driver = { 1224struct qeth_discipline qeth_l2_discipline = {
1231 .probe = qeth_l2_probe_device, 1225 .start_poll = qeth_qdio_start_poll,
1226 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
1227 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
1228 .recover = qeth_l2_recover,
1229 .setup = qeth_l2_probe_device,
1232 .remove = qeth_l2_remove_device, 1230 .remove = qeth_l2_remove_device,
1233 .set_online = qeth_l2_set_online, 1231 .set_online = qeth_l2_set_online,
1234 .set_offline = qeth_l2_set_offline, 1232 .set_offline = qeth_l2_set_offline,
@@ -1237,7 +1235,7 @@ struct ccwgroup_driver qeth_l2_ccwgroup_driver = {
1237 .thaw = qeth_l2_pm_resume, 1235 .thaw = qeth_l2_pm_resume,
1238 .restore = qeth_l2_pm_resume, 1236 .restore = qeth_l2_pm_resume,
1239}; 1237};
1240EXPORT_SYMBOL_GPL(qeth_l2_ccwgroup_driver); 1238EXPORT_SYMBOL_GPL(qeth_l2_discipline);
1241 1239
1242static int qeth_osn_send_control_data(struct qeth_card *card, int len, 1240static int qeth_osn_send_control_data(struct qeth_card *card, int len,
1243 struct qeth_cmd_buffer *iob) 1241 struct qeth_cmd_buffer *iob)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index f85921607686..7be5e9775691 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -976,57 +976,6 @@ static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type)
976 return ct | QETH_CAST_UNICAST; 976 return ct | QETH_CAST_UNICAST;
977} 977}
978 978
979static int qeth_l3_send_setadp_mode(struct qeth_card *card, __u32 command,
980 __u32 mode)
981{
982 int rc;
983 struct qeth_cmd_buffer *iob;
984 struct qeth_ipa_cmd *cmd;
985
986 QETH_CARD_TEXT(card, 4, "adpmode");
987
988 iob = qeth_get_adapter_cmd(card, command,
989 sizeof(struct qeth_ipacmd_setadpparms));
990 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
991 cmd->data.setadapterparms.data.mode = mode;
992 rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb,
993 NULL);
994 return rc;
995}
996
997static int qeth_l3_setadapter_hstr(struct qeth_card *card)
998{
999 int rc;
1000
1001 QETH_CARD_TEXT(card, 4, "adphstr");
1002
1003 if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) {
1004 rc = qeth_l3_send_setadp_mode(card,
1005 IPA_SETADP_SET_BROADCAST_MODE,
1006 card->options.broadcast_mode);
1007 if (rc)
1008 QETH_DBF_MESSAGE(2, "couldn't set broadcast mode on "
1009 "device %s: x%x\n",
1010 CARD_BUS_ID(card), rc);
1011 rc = qeth_l3_send_setadp_mode(card,
1012 IPA_SETADP_ALTER_MAC_ADDRESS,
1013 card->options.macaddr_mode);
1014 if (rc)
1015 QETH_DBF_MESSAGE(2, "couldn't set macaddr mode on "
1016 "device %s: x%x\n", CARD_BUS_ID(card), rc);
1017 return rc;
1018 }
1019 if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
1020 QETH_DBF_MESSAGE(2, "set adapter parameters not available "
1021 "to set broadcast mode, using ALLRINGS "
1022 "on device %s:\n", CARD_BUS_ID(card));
1023 if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
1024 QETH_DBF_MESSAGE(2, "set adapter parameters not available "
1025 "to set macaddr mode, using NONCANONICAL "
1026 "on device %s:\n", CARD_BUS_ID(card));
1027 return 0;
1028}
1029
1030static int qeth_l3_setadapter_parms(struct qeth_card *card) 979static int qeth_l3_setadapter_parms(struct qeth_card *card)
1031{ 980{
1032 int rc; 981 int rc;
@@ -1052,10 +1001,6 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card)
1052 " address failed\n"); 1001 " address failed\n");
1053 } 1002 }
1054 1003
1055 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
1056 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
1057 rc = qeth_l3_setadapter_hstr(card);
1058
1059 return rc; 1004 return rc;
1060} 1005}
1061 1006
@@ -1671,10 +1616,7 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
1671static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac, 1616static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac,
1672 struct net_device *dev) 1617 struct net_device *dev)
1673{ 1618{
1674 if (dev->type == ARPHRD_IEEE802_TR) 1619 ip_eth_mc_map(ipm, mac);
1675 ip_tr_mc_map(ipm, mac);
1676 else
1677 ip_eth_mc_map(ipm, mac);
1678} 1620}
1679 1621
1680static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev) 1622static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev)
@@ -1922,8 +1864,6 @@ static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
1922#endif 1864#endif
1923 case __constant_htons(ETH_P_IP): 1865 case __constant_htons(ETH_P_IP):
1924 ip_hdr = (struct iphdr *)skb->data; 1866 ip_hdr = (struct iphdr *)skb->data;
1925 (card->dev->type == ARPHRD_IEEE802_TR) ?
1926 ip_tr_mc_map(ip_hdr->daddr, tg_addr):
1927 ip_eth_mc_map(ip_hdr->daddr, tg_addr); 1867 ip_eth_mc_map(ip_hdr->daddr, tg_addr);
1928 break; 1868 break;
1929 default: 1869 default:
@@ -1959,12 +1899,7 @@ static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
1959 tg_addr, "FAKELL", card->dev->addr_len); 1899 tg_addr, "FAKELL", card->dev->addr_len);
1960 } 1900 }
1961 1901
1962#ifdef CONFIG_TR 1902 skb->protocol = eth_type_trans(skb, card->dev);
1963 if (card->dev->type == ARPHRD_IEEE802_TR)
1964 skb->protocol = tr_type_trans(skb, card->dev);
1965 else
1966#endif
1967 skb->protocol = eth_type_trans(skb, card->dev);
1968 1903
1969 if (hdr->hdr.l3.ext_flags & 1904 if (hdr->hdr.l3.ext_flags &
1970 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) { 1905 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
@@ -2138,7 +2073,7 @@ static int qeth_l3_verify_vlan_dev(struct net_device *dev,
2138 struct net_device *netdev; 2073 struct net_device *netdev;
2139 2074
2140 rcu_read_lock(); 2075 rcu_read_lock();
2141 netdev = __vlan_find_dev_deep(dev, vid); 2076 netdev = __vlan_find_dev_deep(card->dev, vid);
2142 rcu_read_unlock(); 2077 rcu_read_unlock();
2143 if (netdev == dev) { 2078 if (netdev == dev) {
2144 rc = QETH_VLAN_CARD; 2079 rc = QETH_VLAN_CARD;
@@ -2883,13 +2818,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2883 hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU; 2818 hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU;
2884 memcpy(hdr->hdr.l3.dest_addr, pkey, 16); 2819 memcpy(hdr->hdr.l3.dest_addr, pkey, 16);
2885 } else { 2820 } else {
2886 /* passthrough */ 2821 if (!memcmp(skb->data + sizeof(struct qeth_hdr),
2887 if ((skb->dev->type == ARPHRD_IEEE802_TR) &&
2888 !memcmp(skb->data + sizeof(struct qeth_hdr) +
2889 sizeof(__u16), skb->dev->broadcast, 6)) {
2890 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
2891 QETH_HDR_PASSTHRU;
2892 } else if (!memcmp(skb->data + sizeof(struct qeth_hdr),
2893 skb->dev->broadcast, 6)) { 2822 skb->dev->broadcast, 6)) {
2894 /* broadcast? */ 2823 /* broadcast? */
2895 hdr->hdr.l3.flags = QETH_CAST_BROADCAST | 2824 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
@@ -3031,10 +2960,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3031 skb_pull(new_skb, ETH_HLEN); 2960 skb_pull(new_skb, ETH_HLEN);
3032 } else { 2961 } else {
3033 if (ipv == 4) { 2962 if (ipv == 4) {
3034 if (card->dev->type == ARPHRD_IEEE802_TR) 2963 skb_pull(new_skb, ETH_HLEN);
3035 skb_pull(new_skb, TR_HLEN);
3036 else
3037 skb_pull(new_skb, ETH_HLEN);
3038 } 2964 }
3039 2965
3040 if (ipv != 4 && vlan_tx_tag_present(new_skb)) { 2966 if (ipv != 4 && vlan_tx_tag_present(new_skb)) {
@@ -3318,12 +3244,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3318 card->info.type == QETH_CARD_TYPE_OSX) { 3244 card->info.type == QETH_CARD_TYPE_OSX) {
3319 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || 3245 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
3320 (card->info.link_type == QETH_LINK_TYPE_HSTR)) { 3246 (card->info.link_type == QETH_LINK_TYPE_HSTR)) {
3321#ifdef CONFIG_TR 3247 pr_info("qeth_l3: ignoring TR device\n");
3322 card->dev = alloc_trdev(0); 3248 return -ENODEV;
3323#endif
3324 if (!card->dev)
3325 return -ENODEV;
3326 card->dev->netdev_ops = &qeth_l3_netdev_ops;
3327 } else { 3249 } else {
3328 card->dev = alloc_etherdev(0); 3250 card->dev = alloc_etherdev(0);
3329 if (!card->dev) 3251 if (!card->dev)
@@ -3376,12 +3298,6 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
3376 qeth_l3_create_device_attributes(&gdev->dev); 3298 qeth_l3_create_device_attributes(&gdev->dev);
3377 card->options.layer2 = 0; 3299 card->options.layer2 = 0;
3378 card->info.hwtrap = 0; 3300 card->info.hwtrap = 0;
3379 card->discipline.start_poll = qeth_qdio_start_poll;
3380 card->discipline.input_handler = (qdio_handler_t *)
3381 qeth_qdio_input_handler;
3382 card->discipline.output_handler = (qdio_handler_t *)
3383 qeth_qdio_output_handler;
3384 card->discipline.recover = qeth_l3_recover;
3385 return 0; 3301 return 0;
3386} 3302}
3387 3303
@@ -3656,8 +3572,12 @@ out:
3656 return rc; 3572 return rc;
3657} 3573}
3658 3574
3659struct ccwgroup_driver qeth_l3_ccwgroup_driver = { 3575struct qeth_discipline qeth_l3_discipline = {
3660 .probe = qeth_l3_probe_device, 3576 .start_poll = qeth_qdio_start_poll,
3577 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
3578 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
3579 .recover = qeth_l3_recover,
3580 .setup = qeth_l3_probe_device,
3661 .remove = qeth_l3_remove_device, 3581 .remove = qeth_l3_remove_device,
3662 .set_online = qeth_l3_set_online, 3582 .set_online = qeth_l3_set_online,
3663 .set_offline = qeth_l3_set_offline, 3583 .set_offline = qeth_l3_set_offline,
@@ -3666,7 +3586,7 @@ struct ccwgroup_driver qeth_l3_ccwgroup_driver = {
3666 .thaw = qeth_l3_pm_resume, 3586 .thaw = qeth_l3_pm_resume,
3667 .restore = qeth_l3_pm_resume, 3587 .restore = qeth_l3_pm_resume,
3668}; 3588};
3669EXPORT_SYMBOL_GPL(qeth_l3_ccwgroup_driver); 3589EXPORT_SYMBOL_GPL(qeth_l3_discipline);
3670 3590
3671static int qeth_l3_ip_event(struct notifier_block *this, 3591static int qeth_l3_ip_event(struct notifier_block *this,
3672 unsigned long event, void *ptr) 3592 unsigned long event, void *ptr)
@@ -3680,9 +3600,9 @@ static int qeth_l3_ip_event(struct notifier_block *this,
3680 return NOTIFY_DONE; 3600 return NOTIFY_DONE;
3681 3601
3682 card = qeth_l3_get_card_from_dev(dev); 3602 card = qeth_l3_get_card_from_dev(dev);
3683 QETH_CARD_TEXT(card, 3, "ipevent");
3684 if (!card) 3603 if (!card)
3685 return NOTIFY_DONE; 3604 return NOTIFY_DONE;
3605 QETH_CARD_TEXT(card, 3, "ipevent");
3686 3606
3687 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 3607 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
3688 if (addr != NULL) { 3608 if (addr != NULL) {
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index d979bb26522f..4cafedf950ad 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -175,116 +175,6 @@ out:
175static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show, 175static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show,
176 qeth_l3_dev_fake_broadcast_store); 176 qeth_l3_dev_fake_broadcast_store);
177 177
178static ssize_t qeth_l3_dev_broadcast_mode_show(struct device *dev,
179 struct device_attribute *attr, char *buf)
180{
181 struct qeth_card *card = dev_get_drvdata(dev);
182
183 if (!card)
184 return -EINVAL;
185
186 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
187 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
188 return sprintf(buf, "n/a\n");
189
190 return sprintf(buf, "%s\n", (card->options.broadcast_mode ==
191 QETH_TR_BROADCAST_ALLRINGS)?
192 "all rings":"local");
193}
194
195static ssize_t qeth_l3_dev_broadcast_mode_store(struct device *dev,
196 struct device_attribute *attr, const char *buf, size_t count)
197{
198 struct qeth_card *card = dev_get_drvdata(dev);
199 char *tmp;
200 int rc = 0;
201
202 if (!card)
203 return -EINVAL;
204
205 mutex_lock(&card->conf_mutex);
206 if ((card->state != CARD_STATE_DOWN) &&
207 (card->state != CARD_STATE_RECOVER)) {
208 rc = -EPERM;
209 goto out;
210 }
211
212 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
213 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
214 rc = -EINVAL;
215 goto out;
216 }
217
218 tmp = strsep((char **) &buf, "\n");
219
220 if (!strcmp(tmp, "local"))
221 card->options.broadcast_mode = QETH_TR_BROADCAST_LOCAL;
222 else if (!strcmp(tmp, "all_rings"))
223 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
224 else
225 rc = -EINVAL;
226out:
227 mutex_unlock(&card->conf_mutex);
228 return rc ? rc : count;
229}
230
231static DEVICE_ATTR(broadcast_mode, 0644, qeth_l3_dev_broadcast_mode_show,
232 qeth_l3_dev_broadcast_mode_store);
233
234static ssize_t qeth_l3_dev_canonical_macaddr_show(struct device *dev,
235 struct device_attribute *attr, char *buf)
236{
237 struct qeth_card *card = dev_get_drvdata(dev);
238
239 if (!card)
240 return -EINVAL;
241
242 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
243 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
244 return sprintf(buf, "n/a\n");
245
246 return sprintf(buf, "%i\n", (card->options.macaddr_mode ==
247 QETH_TR_MACADDR_CANONICAL)? 1:0);
248}
249
250static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev,
251 struct device_attribute *attr, const char *buf, size_t count)
252{
253 struct qeth_card *card = dev_get_drvdata(dev);
254 char *tmp;
255 int i, rc = 0;
256
257 if (!card)
258 return -EINVAL;
259
260 mutex_lock(&card->conf_mutex);
261 if ((card->state != CARD_STATE_DOWN) &&
262 (card->state != CARD_STATE_RECOVER)) {
263 rc = -EPERM;
264 goto out;
265 }
266
267 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
268 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
269 rc = -EINVAL;
270 goto out;
271 }
272
273 i = simple_strtoul(buf, &tmp, 16);
274 if ((i == 0) || (i == 1))
275 card->options.macaddr_mode = i?
276 QETH_TR_MACADDR_CANONICAL :
277 QETH_TR_MACADDR_NONCANONICAL;
278 else
279 rc = -EINVAL;
280out:
281 mutex_unlock(&card->conf_mutex);
282 return rc ? rc : count;
283}
284
285static DEVICE_ATTR(canonical_macaddr, 0644, qeth_l3_dev_canonical_macaddr_show,
286 qeth_l3_dev_canonical_macaddr_store);
287
288static ssize_t qeth_l3_dev_sniffer_show(struct device *dev, 178static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
289 struct device_attribute *attr, char *buf) 179 struct device_attribute *attr, char *buf)
290{ 180{
@@ -458,8 +348,6 @@ static struct attribute *qeth_l3_device_attrs[] = {
458 &dev_attr_route4.attr, 348 &dev_attr_route4.attr,
459 &dev_attr_route6.attr, 349 &dev_attr_route6.attr,
460 &dev_attr_fake_broadcast.attr, 350 &dev_attr_fake_broadcast.attr,
461 &dev_attr_broadcast_mode.attr,
462 &dev_attr_canonical_macaddr.attr,
463 &dev_attr_sniffer.attr, 351 &dev_attr_sniffer.attr,
464 &dev_attr_hsuid.attr, 352 &dev_attr_hsuid.attr,
465 NULL, 353 NULL,
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index 04a154f87e3e..df740cbbaef4 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -572,7 +572,7 @@ static void falcon_get_lock(void)
572} 572}
573 573
574 574
575int __init atari_scsi_detect(struct scsi_host_template *host) 575static int __init atari_scsi_detect(struct scsi_host_template *host)
576{ 576{
577 static int called = 0; 577 static int called = 0;
578 struct Scsi_Host *instance; 578 struct Scsi_Host *instance;
@@ -724,7 +724,7 @@ int __init atari_scsi_detect(struct scsi_host_template *host)
724 return 1; 724 return 1;
725} 725}
726 726
727int atari_scsi_release(struct Scsi_Host *sh) 727static int atari_scsi_release(struct Scsi_Host *sh)
728{ 728{
729 if (IS_A_TT()) 729 if (IS_A_TT())
730 free_irq(IRQ_TT_MFP_SCSI, sh); 730 free_irq(IRQ_TT_MFP_SCSI, sh);
@@ -734,17 +734,21 @@ int atari_scsi_release(struct Scsi_Host *sh)
734 return 1; 734 return 1;
735} 735}
736 736
737void __init atari_scsi_setup(char *str, int *ints) 737#ifndef MODULE
738static int __init atari_scsi_setup(char *str)
738{ 739{
739 /* Format of atascsi parameter is: 740 /* Format of atascsi parameter is:
740 * atascsi=<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags> 741 * atascsi=<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>
741 * Defaults depend on TT or Falcon, hostid determined at run time. 742 * Defaults depend on TT or Falcon, hostid determined at run time.
742 * Negative values mean don't change. 743 * Negative values mean don't change.
743 */ 744 */
745 int ints[6];
746
747 get_options(str, ARRAY_SIZE(ints), ints);
744 748
745 if (ints[0] < 1) { 749 if (ints[0] < 1) {
746 printk("atari_scsi_setup: no arguments!\n"); 750 printk("atari_scsi_setup: no arguments!\n");
747 return; 751 return 0;
748 } 752 }
749 753
750 if (ints[0] >= 1) { 754 if (ints[0] >= 1) {
@@ -777,9 +781,14 @@ void __init atari_scsi_setup(char *str, int *ints)
777 setup_use_tagged_queuing = !!ints[5]; 781 setup_use_tagged_queuing = !!ints[5];
778 } 782 }
779#endif 783#endif
784
785 return 1;
780} 786}
781 787
782int atari_scsi_bus_reset(Scsi_Cmnd *cmd) 788__setup("atascsi=", atari_scsi_setup);
789#endif /* !MODULE */
790
791static int atari_scsi_bus_reset(Scsi_Cmnd *cmd)
783{ 792{
784 int rv; 793 int rv;
785 struct NCR5380_hostdata *hostdata = 794 struct NCR5380_hostdata *hostdata =
@@ -852,7 +861,7 @@ static void __init atari_scsi_reset_boot(void)
852#endif 861#endif
853 862
854 863
855const char *atari_scsi_info(struct Scsi_Host *host) 864static const char *atari_scsi_info(struct Scsi_Host *host)
856{ 865{
857 /* atari_scsi_detect() is verbose enough... */ 866 /* atari_scsi_detect() is verbose enough... */
858 static const char string[] = "Atari native SCSI"; 867 static const char string[] = "Atari native SCSI";
@@ -862,8 +871,9 @@ const char *atari_scsi_info(struct Scsi_Host *host)
862 871
863#if defined(REAL_DMA) 872#if defined(REAL_DMA)
864 873
865unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, void *data, 874static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
866 unsigned long count, int dir) 875 void *data, unsigned long count,
876 int dir)
867{ 877{
868 unsigned long addr = virt_to_phys(data); 878 unsigned long addr = virt_to_phys(data);
869 879
diff --git a/drivers/scsi/atari_scsi.h b/drivers/scsi/atari_scsi.h
index efadb8d567c2..bd52df78b209 100644
--- a/drivers/scsi/atari_scsi.h
+++ b/drivers/scsi/atari_scsi.h
@@ -18,11 +18,6 @@
18/* (I_HAVE_OVERRUNS stuff removed) */ 18/* (I_HAVE_OVERRUNS stuff removed) */
19 19
20#ifndef ASM 20#ifndef ASM
21int atari_scsi_detect (struct scsi_host_template *);
22const char *atari_scsi_info (struct Scsi_Host *);
23int atari_scsi_reset (Scsi_Cmnd *, unsigned int);
24int atari_scsi_release (struct Scsi_Host *);
25
26/* The values for CMD_PER_LUN and CAN_QUEUE are somehow arbitrary. Higher 21/* The values for CMD_PER_LUN and CAN_QUEUE are somehow arbitrary. Higher
27 * values should work, too; try it! (but cmd_per_lun costs memory!) */ 22 * values should work, too; try it! (but cmd_per_lun costs memory!) */
28 23
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 351dc0b86fab..a3a056a9db67 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -218,6 +218,9 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
218 218
219 if (!shost->shost_gendev.parent) 219 if (!shost->shost_gendev.parent)
220 shost->shost_gendev.parent = dev ? dev : &platform_bus; 220 shost->shost_gendev.parent = dev ? dev : &platform_bus;
221 if (!dma_dev)
222 dma_dev = shost->shost_gendev.parent;
223
221 shost->dma_dev = dma_dev; 224 shost->dma_dev = dma_dev;
222 225
223 error = device_add(&shost->shost_gendev); 226 error = device_add(&shost->shost_gendev);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index e002cd466e9a..467dc38246f9 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4549,8 +4549,12 @@ static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4549 ENTER; 4549 ENTER;
4550 if (sdev->sdev_target) 4550 if (sdev->sdev_target)
4551 sata_port = sdev->sdev_target->hostdata; 4551 sata_port = sdev->sdev_target->hostdata;
4552 if (sata_port) 4552 if (sata_port) {
4553 rc = ata_sas_port_init(sata_port->ap); 4553 rc = ata_sas_port_init(sata_port->ap);
4554 if (rc == 0)
4555 rc = ata_sas_sync_probe(sata_port->ap);
4556 }
4557
4554 if (rc) 4558 if (rc)
4555 ipr_slave_destroy(sdev); 4559 ipr_slave_destroy(sdev);
4556 4560
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 453a740fa68e..922086105b4b 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -662,7 +662,7 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
662 662
663 /* setup Socket parameters */ 663 /* setup Socket parameters */
664 sk = sock->sk; 664 sk = sock->sk;
665 sk->sk_reuse = 1; 665 sk->sk_reuse = SK_CAN_REUSE;
666 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */ 666 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
667 sk->sk_allocation = GFP_ATOMIC; 667 sk->sk_allocation = GFP_ATOMIC;
668 668
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index ef9560dff295..cc83b66d45b7 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1742,17 +1742,19 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1742 1742
1743 mfs = ntohs(flp->fl_csp.sp_bb_data) & 1743 mfs = ntohs(flp->fl_csp.sp_bb_data) &
1744 FC_SP_BB_DATA_MASK; 1744 FC_SP_BB_DATA_MASK;
1745 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && 1745
1746 mfs <= lport->mfs) { 1746 if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
1747 lport->mfs = mfs;
1748 fc_host_maxframe_size(lport->host) = mfs;
1749 } else {
1750 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " 1747 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
1751 "lport->mfs:%hu\n", mfs, lport->mfs); 1748 "lport->mfs:%hu\n", mfs, lport->mfs);
1752 fc_lport_error(lport, fp); 1749 fc_lport_error(lport, fp);
1753 goto err; 1750 goto err;
1754 } 1751 }
1755 1752
1753 if (mfs <= lport->mfs) {
1754 lport->mfs = mfs;
1755 fc_host_maxframe_size(lport->host) = mfs;
1756 }
1757
1756 csp_flags = ntohs(flp->fl_csp.sp_features); 1758 csp_flags = ntohs(flp->fl_csp.sp_features);
1757 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); 1759 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
1758 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); 1760 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index bc0cecc6ad62..441d88ad99a7 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -546,11 +546,12 @@ static struct ata_port_info sata_port_info = {
546 .port_ops = &sas_sata_ops 546 .port_ops = &sas_sata_ops
547}; 547};
548 548
549int sas_ata_init_host_and_port(struct domain_device *found_dev) 549int sas_ata_init(struct domain_device *found_dev)
550{ 550{
551 struct sas_ha_struct *ha = found_dev->port->ha; 551 struct sas_ha_struct *ha = found_dev->port->ha;
552 struct Scsi_Host *shost = ha->core.shost; 552 struct Scsi_Host *shost = ha->core.shost;
553 struct ata_port *ap; 553 struct ata_port *ap;
554 int rc;
554 555
555 ata_host_init(&found_dev->sata_dev.ata_host, 556 ata_host_init(&found_dev->sata_dev.ata_host,
556 ha->dev, 557 ha->dev,
@@ -567,8 +568,11 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev)
567 ap->private_data = found_dev; 568 ap->private_data = found_dev;
568 ap->cbl = ATA_CBL_SATA; 569 ap->cbl = ATA_CBL_SATA;
569 ap->scsi_host = shost; 570 ap->scsi_host = shost;
570 /* publish initialized ata port */ 571 rc = ata_sas_port_init(ap);
571 smp_wmb(); 572 if (rc) {
573 ata_sas_port_destroy(ap);
574 return rc;
575 }
572 found_dev->sata_dev.ap = ap; 576 found_dev->sata_dev.ap = ap;
573 577
574 return 0; 578 return 0;
@@ -648,18 +652,13 @@ static void sas_get_ata_command_set(struct domain_device *dev)
648void sas_probe_sata(struct asd_sas_port *port) 652void sas_probe_sata(struct asd_sas_port *port)
649{ 653{
650 struct domain_device *dev, *n; 654 struct domain_device *dev, *n;
651 int err;
652 655
653 mutex_lock(&port->ha->disco_mutex); 656 mutex_lock(&port->ha->disco_mutex);
654 list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) { 657 list_for_each_entry(dev, &port->disco_list, disco_list_node) {
655 if (!dev_is_sata(dev)) 658 if (!dev_is_sata(dev))
656 continue; 659 continue;
657 660
658 err = sas_ata_init_host_and_port(dev); 661 ata_sas_async_probe(dev->sata_dev.ap);
659 if (err)
660 sas_fail_probe(dev, __func__, err);
661 else
662 ata_sas_async_port_init(dev->sata_dev.ap);
663 } 662 }
664 mutex_unlock(&port->ha->disco_mutex); 663 mutex_unlock(&port->ha->disco_mutex);
665 664
@@ -718,18 +717,6 @@ static void async_sas_ata_eh(void *data, async_cookie_t cookie)
718 sas_put_device(dev); 717 sas_put_device(dev);
719} 718}
720 719
721static bool sas_ata_dev_eh_valid(struct domain_device *dev)
722{
723 struct ata_port *ap;
724
725 if (!dev_is_sata(dev))
726 return false;
727 ap = dev->sata_dev.ap;
728 /* consume fully initialized ata ports */
729 smp_rmb();
730 return !!ap;
731}
732
733void sas_ata_strategy_handler(struct Scsi_Host *shost) 720void sas_ata_strategy_handler(struct Scsi_Host *shost)
734{ 721{
735 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); 722 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
@@ -753,7 +740,7 @@ void sas_ata_strategy_handler(struct Scsi_Host *shost)
753 740
754 spin_lock(&port->dev_list_lock); 741 spin_lock(&port->dev_list_lock);
755 list_for_each_entry(dev, &port->dev_list, dev_list_node) { 742 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
756 if (!sas_ata_dev_eh_valid(dev)) 743 if (!dev_is_sata(dev))
757 continue; 744 continue;
758 async_schedule_domain(async_sas_ata_eh, dev, &async); 745 async_schedule_domain(async_sas_ata_eh, dev, &async);
759 } 746 }
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 364679675602..629a0865b130 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -72,6 +72,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
72 struct asd_sas_phy *phy; 72 struct asd_sas_phy *phy;
73 struct sas_rphy *rphy; 73 struct sas_rphy *rphy;
74 struct domain_device *dev; 74 struct domain_device *dev;
75 int rc = -ENODEV;
75 76
76 dev = sas_alloc_device(); 77 dev = sas_alloc_device();
77 if (!dev) 78 if (!dev)
@@ -110,9 +111,16 @@ static int sas_get_port_device(struct asd_sas_port *port)
110 111
111 sas_init_dev(dev); 112 sas_init_dev(dev);
112 113
114 dev->port = port;
113 switch (dev->dev_type) { 115 switch (dev->dev_type) {
114 case SAS_END_DEV:
115 case SATA_DEV: 116 case SATA_DEV:
117 rc = sas_ata_init(dev);
118 if (rc) {
119 rphy = NULL;
120 break;
121 }
122 /* fall through */
123 case SAS_END_DEV:
116 rphy = sas_end_device_alloc(port->port); 124 rphy = sas_end_device_alloc(port->port);
117 break; 125 break;
118 case EDGE_DEV: 126 case EDGE_DEV:
@@ -131,19 +139,14 @@ static int sas_get_port_device(struct asd_sas_port *port)
131 139
132 if (!rphy) { 140 if (!rphy) {
133 sas_put_device(dev); 141 sas_put_device(dev);
134 return -ENODEV; 142 return rc;
135 } 143 }
136 144
137 spin_lock_irq(&port->phy_list_lock);
138 list_for_each_entry(phy, &port->phy_list, port_phy_el)
139 sas_phy_set_target(phy, dev);
140 spin_unlock_irq(&port->phy_list_lock);
141 rphy->identify.phy_identifier = phy->phy->identify.phy_identifier; 145 rphy->identify.phy_identifier = phy->phy->identify.phy_identifier;
142 memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE); 146 memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE);
143 sas_fill_in_rphy(dev, rphy); 147 sas_fill_in_rphy(dev, rphy);
144 sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr); 148 sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
145 port->port_dev = dev; 149 port->port_dev = dev;
146 dev->port = port;
147 dev->linkrate = port->linkrate; 150 dev->linkrate = port->linkrate;
148 dev->min_linkrate = port->linkrate; 151 dev->min_linkrate = port->linkrate;
149 dev->max_linkrate = port->linkrate; 152 dev->max_linkrate = port->linkrate;
@@ -155,6 +158,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
155 sas_device_set_phy(dev, port->port); 158 sas_device_set_phy(dev, port->port);
156 159
157 dev->rphy = rphy; 160 dev->rphy = rphy;
161 get_device(&dev->rphy->dev);
158 162
159 if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV) 163 if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV)
160 list_add_tail(&dev->disco_list_node, &port->disco_list); 164 list_add_tail(&dev->disco_list_node, &port->disco_list);
@@ -164,6 +168,11 @@ static int sas_get_port_device(struct asd_sas_port *port)
164 spin_unlock_irq(&port->dev_list_lock); 168 spin_unlock_irq(&port->dev_list_lock);
165 } 169 }
166 170
171 spin_lock_irq(&port->phy_list_lock);
172 list_for_each_entry(phy, &port->phy_list, port_phy_el)
173 sas_phy_set_target(phy, dev);
174 spin_unlock_irq(&port->phy_list_lock);
175
167 return 0; 176 return 0;
168} 177}
169 178
@@ -205,8 +214,7 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
205static void sas_probe_devices(struct work_struct *work) 214static void sas_probe_devices(struct work_struct *work)
206{ 215{
207 struct domain_device *dev, *n; 216 struct domain_device *dev, *n;
208 struct sas_discovery_event *ev = 217 struct sas_discovery_event *ev = to_sas_discovery_event(work);
209 container_of(work, struct sas_discovery_event, work);
210 struct asd_sas_port *port = ev->port; 218 struct asd_sas_port *port = ev->port;
211 219
212 clear_bit(DISCE_PROBE, &port->disc.pending); 220 clear_bit(DISCE_PROBE, &port->disc.pending);
@@ -255,6 +263,9 @@ void sas_free_device(struct kref *kref)
255{ 263{
256 struct domain_device *dev = container_of(kref, typeof(*dev), kref); 264 struct domain_device *dev = container_of(kref, typeof(*dev), kref);
257 265
266 put_device(&dev->rphy->dev);
267 dev->rphy = NULL;
268
258 if (dev->parent) 269 if (dev->parent)
259 sas_put_device(dev->parent); 270 sas_put_device(dev->parent);
260 271
@@ -291,8 +302,7 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
291static void sas_destruct_devices(struct work_struct *work) 302static void sas_destruct_devices(struct work_struct *work)
292{ 303{
293 struct domain_device *dev, *n; 304 struct domain_device *dev, *n;
294 struct sas_discovery_event *ev = 305 struct sas_discovery_event *ev = to_sas_discovery_event(work);
295 container_of(work, struct sas_discovery_event, work);
296 struct asd_sas_port *port = ev->port; 306 struct asd_sas_port *port = ev->port;
297 307
298 clear_bit(DISCE_DESTRUCT, &port->disc.pending); 308 clear_bit(DISCE_DESTRUCT, &port->disc.pending);
@@ -302,7 +312,6 @@ static void sas_destruct_devices(struct work_struct *work)
302 312
303 sas_remove_children(&dev->rphy->dev); 313 sas_remove_children(&dev->rphy->dev);
304 sas_rphy_delete(dev->rphy); 314 sas_rphy_delete(dev->rphy);
305 dev->rphy = NULL;
306 sas_unregister_common_dev(port, dev); 315 sas_unregister_common_dev(port, dev);
307 } 316 }
308} 317}
@@ -314,11 +323,11 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
314 /* this rphy never saw sas_rphy_add */ 323 /* this rphy never saw sas_rphy_add */
315 list_del_init(&dev->disco_list_node); 324 list_del_init(&dev->disco_list_node);
316 sas_rphy_free(dev->rphy); 325 sas_rphy_free(dev->rphy);
317 dev->rphy = NULL;
318 sas_unregister_common_dev(port, dev); 326 sas_unregister_common_dev(port, dev);
327 return;
319 } 328 }
320 329
321 if (dev->rphy && !test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) { 330 if (!test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) {
322 sas_rphy_unlink(dev->rphy); 331 sas_rphy_unlink(dev->rphy);
323 list_move_tail(&dev->disco_list_node, &port->destroy_list); 332 list_move_tail(&dev->disco_list_node, &port->destroy_list);
324 sas_discover_event(dev->port, DISCE_DESTRUCT); 333 sas_discover_event(dev->port, DISCE_DESTRUCT);
@@ -377,8 +386,7 @@ static void sas_discover_domain(struct work_struct *work)
377{ 386{
378 struct domain_device *dev; 387 struct domain_device *dev;
379 int error = 0; 388 int error = 0;
380 struct sas_discovery_event *ev = 389 struct sas_discovery_event *ev = to_sas_discovery_event(work);
381 container_of(work, struct sas_discovery_event, work);
382 struct asd_sas_port *port = ev->port; 390 struct asd_sas_port *port = ev->port;
383 391
384 clear_bit(DISCE_DISCOVER_DOMAIN, &port->disc.pending); 392 clear_bit(DISCE_DISCOVER_DOMAIN, &port->disc.pending);
@@ -419,8 +427,6 @@ static void sas_discover_domain(struct work_struct *work)
419 427
420 if (error) { 428 if (error) {
421 sas_rphy_free(dev->rphy); 429 sas_rphy_free(dev->rphy);
422 dev->rphy = NULL;
423
424 list_del_init(&dev->disco_list_node); 430 list_del_init(&dev->disco_list_node);
425 spin_lock_irq(&port->dev_list_lock); 431 spin_lock_irq(&port->dev_list_lock);
426 list_del_init(&dev->dev_list_node); 432 list_del_init(&dev->dev_list_node);
@@ -437,8 +443,7 @@ static void sas_discover_domain(struct work_struct *work)
437static void sas_revalidate_domain(struct work_struct *work) 443static void sas_revalidate_domain(struct work_struct *work)
438{ 444{
439 int res = 0; 445 int res = 0;
440 struct sas_discovery_event *ev = 446 struct sas_discovery_event *ev = to_sas_discovery_event(work);
441 container_of(work, struct sas_discovery_event, work);
442 struct asd_sas_port *port = ev->port; 447 struct asd_sas_port *port = ev->port;
443 struct sas_ha_struct *ha = port->ha; 448 struct sas_ha_struct *ha = port->ha;
444 449
@@ -466,21 +471,25 @@ static void sas_revalidate_domain(struct work_struct *work)
466 471
467/* ---------- Events ---------- */ 472/* ---------- Events ---------- */
468 473
469static void sas_chain_work(struct sas_ha_struct *ha, struct work_struct *work) 474static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw)
470{ 475{
471 /* chained work is not subject to SA_HA_DRAINING or SAS_HA_REGISTERED */ 476 /* chained work is not subject to SA_HA_DRAINING or
472 scsi_queue_work(ha->core.shost, work); 477 * SAS_HA_REGISTERED, because it is either submitted in the
478 * workqueue, or known to be submitted from a context that is
479 * not racing against draining
480 */
481 scsi_queue_work(ha->core.shost, &sw->work);
473} 482}
474 483
475static void sas_chain_event(int event, unsigned long *pending, 484static void sas_chain_event(int event, unsigned long *pending,
476 struct work_struct *work, 485 struct sas_work *sw,
477 struct sas_ha_struct *ha) 486 struct sas_ha_struct *ha)
478{ 487{
479 if (!test_and_set_bit(event, pending)) { 488 if (!test_and_set_bit(event, pending)) {
480 unsigned long flags; 489 unsigned long flags;
481 490
482 spin_lock_irqsave(&ha->state_lock, flags); 491 spin_lock_irqsave(&ha->state_lock, flags);
483 sas_chain_work(ha, work); 492 sas_chain_work(ha, sw);
484 spin_unlock_irqrestore(&ha->state_lock, flags); 493 spin_unlock_irqrestore(&ha->state_lock, flags);
485 } 494 }
486} 495}
@@ -519,7 +528,7 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
519 528
520 disc->pending = 0; 529 disc->pending = 0;
521 for (i = 0; i < DISC_NUM_EVENTS; i++) { 530 for (i = 0; i < DISC_NUM_EVENTS; i++) {
522 INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]); 531 INIT_SAS_WORK(&disc->disc_work[i].work, sas_event_fns[i]);
523 disc->disc_work[i].port = port; 532 disc->disc_work[i].port = port;
524 } 533 }
525} 534}
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 16639bbae629..4e4292d210c1 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -27,19 +27,21 @@
27#include "sas_internal.h" 27#include "sas_internal.h"
28#include "sas_dump.h" 28#include "sas_dump.h"
29 29
30void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work) 30void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
31{ 31{
32 if (!test_bit(SAS_HA_REGISTERED, &ha->state)) 32 if (!test_bit(SAS_HA_REGISTERED, &ha->state))
33 return; 33 return;
34 34
35 if (test_bit(SAS_HA_DRAINING, &ha->state)) 35 if (test_bit(SAS_HA_DRAINING, &ha->state)) {
36 list_add(&work->entry, &ha->defer_q); 36 /* add it to the defer list, if not already pending */
37 else 37 if (list_empty(&sw->drain_node))
38 scsi_queue_work(ha->core.shost, work); 38 list_add(&sw->drain_node, &ha->defer_q);
39 } else
40 scsi_queue_work(ha->core.shost, &sw->work);
39} 41}
40 42
41static void sas_queue_event(int event, unsigned long *pending, 43static void sas_queue_event(int event, unsigned long *pending,
42 struct work_struct *work, 44 struct sas_work *work,
43 struct sas_ha_struct *ha) 45 struct sas_ha_struct *ha)
44{ 46{
45 if (!test_and_set_bit(event, pending)) { 47 if (!test_and_set_bit(event, pending)) {
@@ -55,7 +57,7 @@ static void sas_queue_event(int event, unsigned long *pending,
55void __sas_drain_work(struct sas_ha_struct *ha) 57void __sas_drain_work(struct sas_ha_struct *ha)
56{ 58{
57 struct workqueue_struct *wq = ha->core.shost->work_q; 59 struct workqueue_struct *wq = ha->core.shost->work_q;
58 struct work_struct *w, *_w; 60 struct sas_work *sw, *_sw;
59 61
60 set_bit(SAS_HA_DRAINING, &ha->state); 62 set_bit(SAS_HA_DRAINING, &ha->state);
61 /* flush submitters */ 63 /* flush submitters */
@@ -66,9 +68,9 @@ void __sas_drain_work(struct sas_ha_struct *ha)
66 68
67 spin_lock_irq(&ha->state_lock); 69 spin_lock_irq(&ha->state_lock);
68 clear_bit(SAS_HA_DRAINING, &ha->state); 70 clear_bit(SAS_HA_DRAINING, &ha->state);
69 list_for_each_entry_safe(w, _w, &ha->defer_q, entry) { 71 list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
70 list_del_init(&w->entry); 72 list_del_init(&sw->drain_node);
71 sas_queue_work(ha, w); 73 sas_queue_work(ha, sw);
72 } 74 }
73 spin_unlock_irq(&ha->state_lock); 75 spin_unlock_irq(&ha->state_lock);
74} 76}
@@ -151,7 +153,7 @@ int sas_init_events(struct sas_ha_struct *sas_ha)
151 int i; 153 int i;
152 154
153 for (i = 0; i < HA_NUM_EVENTS; i++) { 155 for (i = 0; i < HA_NUM_EVENTS; i++) {
154 INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]); 156 INIT_SAS_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
155 sas_ha->ha_events[i].ha = sas_ha; 157 sas_ha->ha_events[i].ha = sas_ha;
156 } 158 }
157 159
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 05acd9e35fc4..caa0525d2523 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -202,6 +202,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
202 u8 sas_addr[SAS_ADDR_SIZE]; 202 u8 sas_addr[SAS_ADDR_SIZE];
203 struct smp_resp *resp = rsp; 203 struct smp_resp *resp = rsp;
204 struct discover_resp *dr = &resp->disc; 204 struct discover_resp *dr = &resp->disc;
205 struct sas_ha_struct *ha = dev->port->ha;
205 struct expander_device *ex = &dev->ex_dev; 206 struct expander_device *ex = &dev->ex_dev;
206 struct ex_phy *phy = &ex->ex_phy[phy_id]; 207 struct ex_phy *phy = &ex->ex_phy[phy_id];
207 struct sas_rphy *rphy = dev->rphy; 208 struct sas_rphy *rphy = dev->rphy;
@@ -209,6 +210,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
209 char *type; 210 char *type;
210 211
211 if (new_phy) { 212 if (new_phy) {
213 if (WARN_ON_ONCE(test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)))
214 return;
212 phy->phy = sas_phy_alloc(&rphy->dev, phy_id); 215 phy->phy = sas_phy_alloc(&rphy->dev, phy_id);
213 216
214 /* FIXME: error_handling */ 217 /* FIXME: error_handling */
@@ -233,6 +236,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
233 memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); 236 memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
234 237
235 phy->attached_dev_type = to_dev_type(dr); 238 phy->attached_dev_type = to_dev_type(dr);
239 if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
240 goto out;
236 phy->phy_id = phy_id; 241 phy->phy_id = phy_id;
237 phy->linkrate = dr->linkrate; 242 phy->linkrate = dr->linkrate;
238 phy->attached_sata_host = dr->attached_sata_host; 243 phy->attached_sata_host = dr->attached_sata_host;
@@ -240,7 +245,14 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
240 phy->attached_sata_ps = dr->attached_sata_ps; 245 phy->attached_sata_ps = dr->attached_sata_ps;
241 phy->attached_iproto = dr->iproto << 1; 246 phy->attached_iproto = dr->iproto << 1;
242 phy->attached_tproto = dr->tproto << 1; 247 phy->attached_tproto = dr->tproto << 1;
243 memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); 248 /* help some expanders that fail to zero sas_address in the 'no
249 * device' case
250 */
251 if (phy->attached_dev_type == NO_DEVICE ||
252 phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
253 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
254 else
255 memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
244 phy->attached_phy_id = dr->attached_phy_id; 256 phy->attached_phy_id = dr->attached_phy_id;
245 phy->phy_change_count = dr->change_count; 257 phy->phy_change_count = dr->change_count;
246 phy->routing_attr = dr->routing_attr; 258 phy->routing_attr = dr->routing_attr;
@@ -266,6 +278,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
266 return; 278 return;
267 } 279 }
268 280
281 out:
269 switch (phy->attached_dev_type) { 282 switch (phy->attached_dev_type) {
270 case SATA_PENDING: 283 case SATA_PENDING:
271 type = "stp pending"; 284 type = "stp pending";
@@ -304,7 +317,15 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
304 else 317 else
305 return; 318 return;
306 319
307 SAS_DPRINTK("ex %016llx phy%02d:%c:%X attached: %016llx (%s)\n", 320 /* if the attached device type changed and ata_eh is active,
321 * make sure we run revalidation when eh completes (see:
322 * sas_enable_revalidation)
323 */
324 if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
325 set_bit(DISCE_REVALIDATE_DOMAIN, &dev->port->disc.pending);
326
327 SAS_DPRINTK("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n",
328 test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "",
308 SAS_ADDR(dev->sas_addr), phy->phy_id, 329 SAS_ADDR(dev->sas_addr), phy->phy_id,
309 sas_route_char(dev, phy), phy->linkrate, 330 sas_route_char(dev, phy), phy->linkrate,
310 SAS_ADDR(phy->attached_sas_addr), type); 331 SAS_ADDR(phy->attached_sas_addr), type);
@@ -776,13 +797,16 @@ static struct domain_device *sas_ex_discover_end_dev(
776 if (res) 797 if (res)
777 goto out_free; 798 goto out_free;
778 799
800 sas_init_dev(child);
801 res = sas_ata_init(child);
802 if (res)
803 goto out_free;
779 rphy = sas_end_device_alloc(phy->port); 804 rphy = sas_end_device_alloc(phy->port);
780 if (unlikely(!rphy)) 805 if (!rphy)
781 goto out_free; 806 goto out_free;
782 807
783 sas_init_dev(child);
784
785 child->rphy = rphy; 808 child->rphy = rphy;
809 get_device(&rphy->dev);
786 810
787 list_add_tail(&child->disco_list_node, &parent->port->disco_list); 811 list_add_tail(&child->disco_list_node, &parent->port->disco_list);
788 812
@@ -806,6 +830,7 @@ static struct domain_device *sas_ex_discover_end_dev(
806 sas_init_dev(child); 830 sas_init_dev(child);
807 831
808 child->rphy = rphy; 832 child->rphy = rphy;
833 get_device(&rphy->dev);
809 sas_fill_in_rphy(child, rphy); 834 sas_fill_in_rphy(child, rphy);
810 835
811 list_add_tail(&child->disco_list_node, &parent->port->disco_list); 836 list_add_tail(&child->disco_list_node, &parent->port->disco_list);
@@ -830,8 +855,6 @@ static struct domain_device *sas_ex_discover_end_dev(
830 855
831 out_list_del: 856 out_list_del:
832 sas_rphy_free(child->rphy); 857 sas_rphy_free(child->rphy);
833 child->rphy = NULL;
834
835 list_del(&child->disco_list_node); 858 list_del(&child->disco_list_node);
836 spin_lock_irq(&parent->port->dev_list_lock); 859 spin_lock_irq(&parent->port->dev_list_lock);
837 list_del(&child->dev_list_node); 860 list_del(&child->dev_list_node);
@@ -911,6 +934,7 @@ static struct domain_device *sas_ex_discover_expander(
911 } 934 }
912 port = parent->port; 935 port = parent->port;
913 child->rphy = rphy; 936 child->rphy = rphy;
937 get_device(&rphy->dev);
914 edev = rphy_to_expander_device(rphy); 938 edev = rphy_to_expander_device(rphy);
915 child->dev_type = phy->attached_dev_type; 939 child->dev_type = phy->attached_dev_type;
916 kref_get(&parent->kref); 940 kref_get(&parent->kref);
@@ -934,6 +958,7 @@ static struct domain_device *sas_ex_discover_expander(
934 958
935 res = sas_discover_expander(child); 959 res = sas_discover_expander(child);
936 if (res) { 960 if (res) {
961 sas_rphy_delete(rphy);
937 spin_lock_irq(&parent->port->dev_list_lock); 962 spin_lock_irq(&parent->port->dev_list_lock);
938 list_del(&child->dev_list_node); 963 list_del(&child->dev_list_node);
939 spin_unlock_irq(&parent->port->dev_list_lock); 964 spin_unlock_irq(&parent->port->dev_list_lock);
@@ -1718,9 +1743,17 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
1718 int phy_change_count = 0; 1743 int phy_change_count = 0;
1719 1744
1720 res = sas_get_phy_change_count(dev, i, &phy_change_count); 1745 res = sas_get_phy_change_count(dev, i, &phy_change_count);
1721 if (res) 1746 switch (res) {
1722 goto out; 1747 case SMP_RESP_PHY_VACANT:
1723 else if (phy_change_count != ex->ex_phy[i].phy_change_count) { 1748 case SMP_RESP_NO_PHY:
1749 continue;
1750 case SMP_RESP_FUNC_ACC:
1751 break;
1752 default:
1753 return res;
1754 }
1755
1756 if (phy_change_count != ex->ex_phy[i].phy_change_count) {
1724 if (update) 1757 if (update)
1725 ex->ex_phy[i].phy_change_count = 1758 ex->ex_phy[i].phy_change_count =
1726 phy_change_count; 1759 phy_change_count;
@@ -1728,8 +1761,7 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
1728 return 0; 1761 return 0;
1729 } 1762 }
1730 } 1763 }
1731out: 1764 return 0;
1732 return res;
1733} 1765}
1734 1766
1735static int sas_get_ex_change_count(struct domain_device *dev, int *ecc) 1767static int sas_get_ex_change_count(struct domain_device *dev, int *ecc)
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 120bff64be30..10cb5ae30977 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -94,8 +94,7 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
94 94
95void sas_hae_reset(struct work_struct *work) 95void sas_hae_reset(struct work_struct *work)
96{ 96{
97 struct sas_ha_event *ev = 97 struct sas_ha_event *ev = to_sas_ha_event(work);
98 container_of(work, struct sas_ha_event, work);
99 struct sas_ha_struct *ha = ev->ha; 98 struct sas_ha_struct *ha = ev->ha;
100 99
101 clear_bit(HAE_RESET, &ha->pending); 100 clear_bit(HAE_RESET, &ha->pending);
@@ -369,14 +368,14 @@ static void sas_phy_release(struct sas_phy *phy)
369 368
370static void phy_reset_work(struct work_struct *work) 369static void phy_reset_work(struct work_struct *work)
371{ 370{
372 struct sas_phy_data *d = container_of(work, typeof(*d), reset_work); 371 struct sas_phy_data *d = container_of(work, typeof(*d), reset_work.work);
373 372
374 d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset); 373 d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset);
375} 374}
376 375
377static void phy_enable_work(struct work_struct *work) 376static void phy_enable_work(struct work_struct *work)
378{ 377{
379 struct sas_phy_data *d = container_of(work, typeof(*d), enable_work); 378 struct sas_phy_data *d = container_of(work, typeof(*d), enable_work.work);
380 379
381 d->enable_result = sas_phy_enable(d->phy, d->enable); 380 d->enable_result = sas_phy_enable(d->phy, d->enable);
382} 381}
@@ -389,8 +388,8 @@ static int sas_phy_setup(struct sas_phy *phy)
389 return -ENOMEM; 388 return -ENOMEM;
390 389
391 mutex_init(&d->event_lock); 390 mutex_init(&d->event_lock);
392 INIT_WORK(&d->reset_work, phy_reset_work); 391 INIT_SAS_WORK(&d->reset_work, phy_reset_work);
393 INIT_WORK(&d->enable_work, phy_enable_work); 392 INIT_SAS_WORK(&d->enable_work, phy_enable_work);
394 d->phy = phy; 393 d->phy = phy;
395 phy->hostdata = d; 394 phy->hostdata = d;
396 395
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index f05c63879949..507e4cf12e56 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -45,10 +45,10 @@ struct sas_phy_data {
45 struct mutex event_lock; 45 struct mutex event_lock;
46 int hard_reset; 46 int hard_reset;
47 int reset_result; 47 int reset_result;
48 struct work_struct reset_work; 48 struct sas_work reset_work;
49 int enable; 49 int enable;
50 int enable_result; 50 int enable_result;
51 struct work_struct enable_work; 51 struct sas_work enable_work;
52}; 52};
53 53
54void sas_scsi_recover_host(struct Scsi_Host *shost); 54void sas_scsi_recover_host(struct Scsi_Host *shost);
@@ -80,7 +80,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work);
80void sas_porte_link_reset_err(struct work_struct *work); 80void sas_porte_link_reset_err(struct work_struct *work);
81void sas_porte_timer_event(struct work_struct *work); 81void sas_porte_timer_event(struct work_struct *work);
82void sas_porte_hard_reset(struct work_struct *work); 82void sas_porte_hard_reset(struct work_struct *work);
83void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work); 83void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
84 84
85int sas_notify_lldd_dev_found(struct domain_device *); 85int sas_notify_lldd_dev_found(struct domain_device *);
86void sas_notify_lldd_dev_gone(struct domain_device *); 86void sas_notify_lldd_dev_gone(struct domain_device *);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index dcfd4a9105c5..521422e857ab 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -32,8 +32,7 @@
32 32
33static void sas_phye_loss_of_signal(struct work_struct *work) 33static void sas_phye_loss_of_signal(struct work_struct *work)
34{ 34{
35 struct asd_sas_event *ev = 35 struct asd_sas_event *ev = to_asd_sas_event(work);
36 container_of(work, struct asd_sas_event, work);
37 struct asd_sas_phy *phy = ev->phy; 36 struct asd_sas_phy *phy = ev->phy;
38 37
39 clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending); 38 clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending);
@@ -43,8 +42,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
43 42
44static void sas_phye_oob_done(struct work_struct *work) 43static void sas_phye_oob_done(struct work_struct *work)
45{ 44{
46 struct asd_sas_event *ev = 45 struct asd_sas_event *ev = to_asd_sas_event(work);
47 container_of(work, struct asd_sas_event, work);
48 struct asd_sas_phy *phy = ev->phy; 46 struct asd_sas_phy *phy = ev->phy;
49 47
50 clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending); 48 clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending);
@@ -53,8 +51,7 @@ static void sas_phye_oob_done(struct work_struct *work)
53 51
54static void sas_phye_oob_error(struct work_struct *work) 52static void sas_phye_oob_error(struct work_struct *work)
55{ 53{
56 struct asd_sas_event *ev = 54 struct asd_sas_event *ev = to_asd_sas_event(work);
57 container_of(work, struct asd_sas_event, work);
58 struct asd_sas_phy *phy = ev->phy; 55 struct asd_sas_phy *phy = ev->phy;
59 struct sas_ha_struct *sas_ha = phy->ha; 56 struct sas_ha_struct *sas_ha = phy->ha;
60 struct asd_sas_port *port = phy->port; 57 struct asd_sas_port *port = phy->port;
@@ -85,8 +82,7 @@ static void sas_phye_oob_error(struct work_struct *work)
85 82
86static void sas_phye_spinup_hold(struct work_struct *work) 83static void sas_phye_spinup_hold(struct work_struct *work)
87{ 84{
88 struct asd_sas_event *ev = 85 struct asd_sas_event *ev = to_asd_sas_event(work);
89 container_of(work, struct asd_sas_event, work);
90 struct asd_sas_phy *phy = ev->phy; 86 struct asd_sas_phy *phy = ev->phy;
91 struct sas_ha_struct *sas_ha = phy->ha; 87 struct sas_ha_struct *sas_ha = phy->ha;
92 struct sas_internal *i = 88 struct sas_internal *i =
@@ -127,14 +123,12 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
127 phy->error = 0; 123 phy->error = 0;
128 INIT_LIST_HEAD(&phy->port_phy_el); 124 INIT_LIST_HEAD(&phy->port_phy_el);
129 for (k = 0; k < PORT_NUM_EVENTS; k++) { 125 for (k = 0; k < PORT_NUM_EVENTS; k++) {
130 INIT_WORK(&phy->port_events[k].work, 126 INIT_SAS_WORK(&phy->port_events[k].work, sas_port_event_fns[k]);
131 sas_port_event_fns[k]);
132 phy->port_events[k].phy = phy; 127 phy->port_events[k].phy = phy;
133 } 128 }
134 129
135 for (k = 0; k < PHY_NUM_EVENTS; k++) { 130 for (k = 0; k < PHY_NUM_EVENTS; k++) {
136 INIT_WORK(&phy->phy_events[k].work, 131 INIT_SAS_WORK(&phy->phy_events[k].work, sas_phy_event_fns[k]);
137 sas_phy_event_fns[k]);
138 phy->phy_events[k].phy = phy; 132 phy->phy_events[k].phy = phy;
139 } 133 }
140 134
@@ -144,8 +138,7 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
144 spin_lock_init(&phy->sas_prim_lock); 138 spin_lock_init(&phy->sas_prim_lock);
145 phy->frame_rcvd_size = 0; 139 phy->frame_rcvd_size = 0;
146 140
147 phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev, 141 phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev, i);
148 i);
149 if (!phy->phy) 142 if (!phy->phy)
150 return -ENOMEM; 143 return -ENOMEM;
151 144
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index eb19c016d500..e884a8c58a0c 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -123,7 +123,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
123 spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags); 123 spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
124 124
125 if (!port->port) { 125 if (!port->port) {
126 port->port = sas_port_alloc(phy->phy->dev.parent, phy->id); 126 port->port = sas_port_alloc(phy->phy->dev.parent, port->id);
127 BUG_ON(!port->port); 127 BUG_ON(!port->port);
128 sas_port_add(port->port); 128 sas_port_add(port->port);
129 } 129 }
@@ -208,8 +208,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
208 208
209void sas_porte_bytes_dmaed(struct work_struct *work) 209void sas_porte_bytes_dmaed(struct work_struct *work)
210{ 210{
211 struct asd_sas_event *ev = 211 struct asd_sas_event *ev = to_asd_sas_event(work);
212 container_of(work, struct asd_sas_event, work);
213 struct asd_sas_phy *phy = ev->phy; 212 struct asd_sas_phy *phy = ev->phy;
214 213
215 clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending); 214 clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending);
@@ -219,8 +218,7 @@ void sas_porte_bytes_dmaed(struct work_struct *work)
219 218
220void sas_porte_broadcast_rcvd(struct work_struct *work) 219void sas_porte_broadcast_rcvd(struct work_struct *work)
221{ 220{
222 struct asd_sas_event *ev = 221 struct asd_sas_event *ev = to_asd_sas_event(work);
223 container_of(work, struct asd_sas_event, work);
224 struct asd_sas_phy *phy = ev->phy; 222 struct asd_sas_phy *phy = ev->phy;
225 unsigned long flags; 223 unsigned long flags;
226 u32 prim; 224 u32 prim;
@@ -237,8 +235,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work)
237 235
238void sas_porte_link_reset_err(struct work_struct *work) 236void sas_porte_link_reset_err(struct work_struct *work)
239{ 237{
240 struct asd_sas_event *ev = 238 struct asd_sas_event *ev = to_asd_sas_event(work);
241 container_of(work, struct asd_sas_event, work);
242 struct asd_sas_phy *phy = ev->phy; 239 struct asd_sas_phy *phy = ev->phy;
243 240
244 clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending); 241 clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending);
@@ -248,8 +245,7 @@ void sas_porte_link_reset_err(struct work_struct *work)
248 245
249void sas_porte_timer_event(struct work_struct *work) 246void sas_porte_timer_event(struct work_struct *work)
250{ 247{
251 struct asd_sas_event *ev = 248 struct asd_sas_event *ev = to_asd_sas_event(work);
252 container_of(work, struct asd_sas_event, work);
253 struct asd_sas_phy *phy = ev->phy; 249 struct asd_sas_phy *phy = ev->phy;
254 250
255 clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending); 251 clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending);
@@ -259,8 +255,7 @@ void sas_porte_timer_event(struct work_struct *work)
259 255
260void sas_porte_hard_reset(struct work_struct *work) 256void sas_porte_hard_reset(struct work_struct *work)
261{ 257{
262 struct asd_sas_event *ev = 258 struct asd_sas_event *ev = to_asd_sas_event(work);
263 container_of(work, struct asd_sas_event, work);
264 struct asd_sas_phy *phy = ev->phy; 259 struct asd_sas_phy *phy = ev->phy;
265 260
266 clear_bit(PORTE_HARD_RESET, &phy->port_events_pending); 261 clear_bit(PORTE_HARD_RESET, &phy->port_events_pending);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index f74cc0602f3b..bc3cc6d91117 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1367,6 +1367,9 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1367 struct qla_hw_data *ha = vha->hw; 1367 struct qla_hw_data *ha = vha->hw;
1368 int rval = 0; 1368 int rval = 0;
1369 1369
1370 if (ha->flags.isp82xx_reset_hdlr_active)
1371 return -EBUSY;
1372
1370 rval = qla2x00_optrom_setup(bsg_job, vha, 0); 1373 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1371 if (rval) 1374 if (rval)
1372 return rval; 1375 return rval;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 897731b93df2..62324a1d5573 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -15,7 +15,7 @@
15 * | Mailbox commands | 0x113e | 0x112c-0x112e | 15 * | Mailbox commands | 0x113e | 0x112c-0x112e |
16 * | | | 0x113a | 16 * | | | 0x113a |
17 * | Device Discovery | 0x2086 | 0x2020-0x2022 | 17 * | Device Discovery | 0x2086 | 0x2020-0x2022 |
18 * | Queue Command and IO tracing | 0x302f | 0x3006,0x3008 | 18 * | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 |
19 * | | | 0x302d-0x302e | 19 * | | | 0x302d-0x302e |
20 * | DPC Thread | 0x401c | | 20 * | DPC Thread | 0x401c | |
21 * | Async Events | 0x505d | 0x502b-0x502f | 21 * | Async Events | 0x505d | 0x502b-0x502f |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index f79844ce7122..ce42288049b5 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1715,13 +1715,24 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1715 res = DID_ERROR << 16; 1715 res = DID_ERROR << 16;
1716 break; 1716 break;
1717 } 1717 }
1718 } else { 1718 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
1719 lscsi_status != SAM_STAT_BUSY) {
1720 /*
1721 * scsi status of task set and busy are considered to be
1722 * task not completed.
1723 */
1724
1719 ql_dbg(ql_dbg_io, fcport->vha, 0x301f, 1725 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
1720 "Dropped frame(s) detected (0x%x " 1726 "Dropped frame(s) detected (0x%x "
1721 "of 0x%x bytes).\n", resid, scsi_bufflen(cp)); 1727 "of 0x%x bytes).\n", resid,
1728 scsi_bufflen(cp));
1722 1729
1723 res = DID_ERROR << 16 | lscsi_status; 1730 res = DID_ERROR << 16 | lscsi_status;
1724 goto check_scsi_status; 1731 goto check_scsi_status;
1732 } else {
1733 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
1734 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
1735 scsi_status, lscsi_status);
1725 } 1736 }
1726 1737
1727 res = DID_OK << 16 | lscsi_status; 1738 res = DID_OK << 16 | lscsi_status;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index f0528539bbbc..de722a933438 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -3125,6 +3125,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3125 ql_log(ql_log_info, vha, 0x00b7, 3125 ql_log(ql_log_info, vha, 0x00b7,
3126 "HW State: COLD/RE-INIT.\n"); 3126 "HW State: COLD/RE-INIT.\n");
3127 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); 3127 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
3128 qla82xx_set_rst_ready(ha);
3128 if (ql2xmdenable) { 3129 if (ql2xmdenable) {
3129 if (qla82xx_md_collect(vha)) 3130 if (qla82xx_md_collect(vha))
3130 ql_log(ql_log_warn, vha, 0xb02c, 3131 ql_log(ql_log_warn, vha, 0xb02c,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index a2f999273a5f..7db803377c64 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3577,9 +3577,25 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
3577 continue; 3577 continue;
3578 /* Attempt a retry. */ 3578 /* Attempt a retry. */
3579 status = 1; 3579 status = 1;
3580 } else 3580 } else {
3581 status = qla2x00_fabric_login(vha, 3581 status = qla2x00_fabric_login(vha,
3582 fcport, &next_loopid); 3582 fcport, &next_loopid);
3583 if (status == QLA_SUCCESS) {
3584 int status2;
3585 uint8_t opts;
3586
3587 opts = 0;
3588 if (fcport->flags &
3589 FCF_FCP2_DEVICE)
3590 opts |= BIT_1;
3591 status2 =
3592 qla2x00_get_port_database(
3593 vha, fcport,
3594 opts);
3595 if (status2 != QLA_SUCCESS)
3596 status = 1;
3597 }
3598 }
3583 } else 3599 } else
3584 status = qla2x00_local_device_login(vha, 3600 status = qla2x00_local_device_login(vha,
3585 fcport); 3601 fcport);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 3c13c0a6be63..a683e766d1ae 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1017,6 +1017,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
1017 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) 1017 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
1018 return; 1018 return;
1019 1019
1020 if (ha->flags.isp82xx_reset_hdlr_active)
1021 return;
1022
1020 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr, 1023 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
1021 ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header)); 1024 ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
1022 if (hdr.version == __constant_cpu_to_le16(0xffff)) 1025 if (hdr.version == __constant_cpu_to_le16(0xffff))
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 29d780c38040..f5fdb16bec9b 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.07.13-k" 10#define QLA2XXX_VERSION "8.04.00.03-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 4
14#define QLA_DRIVER_PATCH_VER 7 14#define QLA_DRIVER_PATCH_VER 0
15#define QLA_DRIVER_BETA_VER 3 15#define QLA_DRIVER_BETA_VER 3
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ead6405f3e51..5dfd7495d1a1 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1638,7 +1638,7 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1638 request_fn_proc *request_fn) 1638 request_fn_proc *request_fn)
1639{ 1639{
1640 struct request_queue *q; 1640 struct request_queue *q;
1641 struct device *dev = shost->shost_gendev.parent; 1641 struct device *dev = shost->dma_dev;
1642 1642
1643 q = blk_init_queue(request_fn, NULL); 1643 q = blk_init_queue(request_fn, NULL);
1644 if (!q) 1644 if (!q)
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index efccd72c4a3e..1b3843117268 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -175,7 +175,8 @@ static void virtscsi_complete_free(void *buf)
175 175
176 if (cmd->comp) 176 if (cmd->comp)
177 complete_all(cmd->comp); 177 complete_all(cmd->comp);
178 mempool_free(cmd, virtscsi_cmd_pool); 178 else
179 mempool_free(cmd, virtscsi_cmd_pool);
179} 180}
180 181
181static void virtscsi_ctrl_done(struct virtqueue *vq) 182static void virtscsi_ctrl_done(struct virtqueue *vq)
@@ -311,21 +312,22 @@ out:
311static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) 312static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
312{ 313{
313 DECLARE_COMPLETION_ONSTACK(comp); 314 DECLARE_COMPLETION_ONSTACK(comp);
314 int ret; 315 int ret = FAILED;
315 316
316 cmd->comp = &comp; 317 cmd->comp = &comp;
317 ret = virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd, 318 if (virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd,
318 sizeof cmd->req.tmf, sizeof cmd->resp.tmf, 319 sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
319 GFP_NOIO); 320 GFP_NOIO) < 0)
320 if (ret < 0) 321 goto out;
321 return FAILED;
322 322
323 wait_for_completion(&comp); 323 wait_for_completion(&comp);
324 if (cmd->resp.tmf.response != VIRTIO_SCSI_S_OK && 324 if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
325 cmd->resp.tmf.response != VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) 325 cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
326 return FAILED; 326 ret = SUCCESS;
327 327
328 return SUCCESS; 328out:
329 mempool_free(cmd, virtscsi_cmd_pool);
330 return ret;
329} 331}
330 332
331static int virtscsi_device_reset(struct scsi_cmnd *sc) 333static int virtscsi_device_reset(struct scsi_cmnd *sc)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 3ed748355b98..00c024039c97 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -74,7 +74,7 @@ config SPI_ATMEL
74 This selects a driver for the Atmel SPI Controller, present on 74 This selects a driver for the Atmel SPI Controller, present on
75 many AT32 (AVR32) and AT91 (ARM) chips. 75 many AT32 (AVR32) and AT91 (ARM) chips.
76 76
77config SPI_BFIN 77config SPI_BFIN5XX
78 tristate "SPI controller driver for ADI Blackfin5xx" 78 tristate "SPI controller driver for ADI Blackfin5xx"
79 depends on BLACKFIN 79 depends on BLACKFIN
80 help 80 help
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index a1d48e0ba3dc..9d75d2198ff5 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -15,7 +15,7 @@ obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
15obj-$(CONFIG_SPI_ATH79) += spi-ath79.o 15obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
16obj-$(CONFIG_SPI_AU1550) += spi-au1550.o 16obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
17obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o 17obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
18obj-$(CONFIG_SPI_BFIN) += spi-bfin5xx.o 18obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o
19obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o 19obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o
20obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o 20obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
21obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o 21obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index f01b2648452e..7491971139a6 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Broadcom BCM63xx SPI controller support 2 * Broadcom BCM63xx SPI controller support
3 * 3 *
4 * Copyright (C) 2009-2011 Florian Fainelli <florian@openwrt.org> 4 * Copyright (C) 2009-2012 Florian Fainelli <florian@openwrt.org>
5 * Copyright (C) 2010 Tanguy Bouzeloc <tanguy.bouzeloc@efixo.com> 5 * Copyright (C) 2010 Tanguy Bouzeloc <tanguy.bouzeloc@efixo.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
@@ -30,6 +30,8 @@
30#include <linux/spi/spi.h> 30#include <linux/spi/spi.h>
31#include <linux/completion.h> 31#include <linux/completion.h>
32#include <linux/err.h> 32#include <linux/err.h>
33#include <linux/workqueue.h>
34#include <linux/pm_runtime.h>
33 35
34#include <bcm63xx_dev_spi.h> 36#include <bcm63xx_dev_spi.h>
35 37
@@ -37,8 +39,6 @@
37#define DRV_VER "0.1.2" 39#define DRV_VER "0.1.2"
38 40
39struct bcm63xx_spi { 41struct bcm63xx_spi {
40 spinlock_t lock;
41 int stopping;
42 struct completion done; 42 struct completion done;
43 43
44 void __iomem *regs; 44 void __iomem *regs;
@@ -96,17 +96,12 @@ static const unsigned bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = {
96 { 391000, SPI_CLK_0_391MHZ } 96 { 391000, SPI_CLK_0_391MHZ }
97}; 97};
98 98
99static int bcm63xx_spi_setup_transfer(struct spi_device *spi, 99static int bcm63xx_spi_check_transfer(struct spi_device *spi,
100 struct spi_transfer *t) 100 struct spi_transfer *t)
101{ 101{
102 struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
103 u8 bits_per_word; 102 u8 bits_per_word;
104 u8 clk_cfg, reg;
105 u32 hz;
106 int i;
107 103
108 bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; 104 bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
109 hz = (t) ? t->speed_hz : spi->max_speed_hz;
110 if (bits_per_word != 8) { 105 if (bits_per_word != 8) {
111 dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", 106 dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
112 __func__, bits_per_word); 107 __func__, bits_per_word);
@@ -119,6 +114,19 @@ static int bcm63xx_spi_setup_transfer(struct spi_device *spi,
119 return -EINVAL; 114 return -EINVAL;
120 } 115 }
121 116
117 return 0;
118}
119
120static void bcm63xx_spi_setup_transfer(struct spi_device *spi,
121 struct spi_transfer *t)
122{
123 struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
124 u32 hz;
125 u8 clk_cfg, reg;
126 int i;
127
128 hz = (t) ? t->speed_hz : spi->max_speed_hz;
129
122 /* Find the closest clock configuration */ 130 /* Find the closest clock configuration */
123 for (i = 0; i < SPI_CLK_MASK; i++) { 131 for (i = 0; i < SPI_CLK_MASK; i++) {
124 if (hz <= bcm63xx_spi_freq_table[i][0]) { 132 if (hz <= bcm63xx_spi_freq_table[i][0]) {
@@ -139,8 +147,6 @@ static int bcm63xx_spi_setup_transfer(struct spi_device *spi,
139 bcm_spi_writeb(bs, reg, SPI_CLK_CFG); 147 bcm_spi_writeb(bs, reg, SPI_CLK_CFG);
140 dev_dbg(&spi->dev, "Setting clock register to %02x (hz %d)\n", 148 dev_dbg(&spi->dev, "Setting clock register to %02x (hz %d)\n",
141 clk_cfg, hz); 149 clk_cfg, hz);
142
143 return 0;
144} 150}
145 151
146/* the spi->mode bits understood by this driver: */ 152/* the spi->mode bits understood by this driver: */
@@ -153,9 +159,6 @@ static int bcm63xx_spi_setup(struct spi_device *spi)
153 159
154 bs = spi_master_get_devdata(spi->master); 160 bs = spi_master_get_devdata(spi->master);
155 161
156 if (bs->stopping)
157 return -ESHUTDOWN;
158
159 if (!spi->bits_per_word) 162 if (!spi->bits_per_word)
160 spi->bits_per_word = 8; 163 spi->bits_per_word = 8;
161 164
@@ -165,7 +168,7 @@ static int bcm63xx_spi_setup(struct spi_device *spi)
165 return -EINVAL; 168 return -EINVAL;
166 } 169 }
167 170
168 ret = bcm63xx_spi_setup_transfer(spi, NULL); 171 ret = bcm63xx_spi_check_transfer(spi, NULL);
169 if (ret < 0) { 172 if (ret < 0) {
170 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 173 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
171 spi->mode & ~MODEBITS); 174 spi->mode & ~MODEBITS);
@@ -190,28 +193,29 @@ static void bcm63xx_spi_fill_tx_fifo(struct bcm63xx_spi *bs)
190 bs->remaining_bytes -= size; 193 bs->remaining_bytes -= size;
191} 194}
192 195
193static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) 196static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,
197 struct spi_transfer *t)
194{ 198{
195 struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master); 199 struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
196 u16 msg_ctl; 200 u16 msg_ctl;
197 u16 cmd; 201 u16 cmd;
198 202
203 /* Disable the CMD_DONE interrupt */
204 bcm_spi_writeb(bs, 0, SPI_INT_MASK);
205
199 dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n", 206 dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
200 t->tx_buf, t->rx_buf, t->len); 207 t->tx_buf, t->rx_buf, t->len);
201 208
202 /* Transmitter is inhibited */ 209 /* Transmitter is inhibited */
203 bs->tx_ptr = t->tx_buf; 210 bs->tx_ptr = t->tx_buf;
204 bs->rx_ptr = t->rx_buf; 211 bs->rx_ptr = t->rx_buf;
205 init_completion(&bs->done);
206 212
207 if (t->tx_buf) { 213 if (t->tx_buf) {
208 bs->remaining_bytes = t->len; 214 bs->remaining_bytes = t->len;
209 bcm63xx_spi_fill_tx_fifo(bs); 215 bcm63xx_spi_fill_tx_fifo(bs);
210 } 216 }
211 217
212 /* Enable the command done interrupt which 218 init_completion(&bs->done);
213 * we use to determine completion of a command */
214 bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
215 219
216 /* Fill in the Message control register */ 220 /* Fill in the Message control register */
217 msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT); 221 msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT);
@@ -230,33 +234,76 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
230 cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT); 234 cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
231 cmd |= (spi->chip_select << SPI_CMD_DEVICE_ID_SHIFT); 235 cmd |= (spi->chip_select << SPI_CMD_DEVICE_ID_SHIFT);
232 bcm_spi_writew(bs, cmd, SPI_CMD); 236 bcm_spi_writew(bs, cmd, SPI_CMD);
233 wait_for_completion(&bs->done);
234 237
235 /* Disable the CMD_DONE interrupt */ 238 /* Enable the CMD_DONE interrupt */
236 bcm_spi_writeb(bs, 0, SPI_INT_MASK); 239 bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
237 240
238 return t->len - bs->remaining_bytes; 241 return t->len - bs->remaining_bytes;
239} 242}
240 243
241static int bcm63xx_transfer(struct spi_device *spi, struct spi_message *m) 244static int bcm63xx_spi_prepare_transfer(struct spi_master *master)
242{ 245{
243 struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master); 246 struct bcm63xx_spi *bs = spi_master_get_devdata(master);
244 struct spi_transfer *t;
245 int ret = 0;
246 247
247 if (unlikely(list_empty(&m->transfers))) 248 pm_runtime_get_sync(&bs->pdev->dev);
248 return -EINVAL;
249 249
250 if (bs->stopping) 250 return 0;
251 return -ESHUTDOWN; 251}
252
253static int bcm63xx_spi_unprepare_transfer(struct spi_master *master)
254{
255 struct bcm63xx_spi *bs = spi_master_get_devdata(master);
256
257 pm_runtime_put(&bs->pdev->dev);
258
259 return 0;
260}
261
262static int bcm63xx_spi_transfer_one(struct spi_master *master,
263 struct spi_message *m)
264{
265 struct bcm63xx_spi *bs = spi_master_get_devdata(master);
266 struct spi_transfer *t;
267 struct spi_device *spi = m->spi;
268 int status = 0;
269 unsigned int timeout = 0;
252 270
253 list_for_each_entry(t, &m->transfers, transfer_list) { 271 list_for_each_entry(t, &m->transfers, transfer_list) {
254 ret += bcm63xx_txrx_bufs(spi, t); 272 unsigned int len = t->len;
255 } 273 u8 rx_tail;
256 274
257 m->complete(m->context); 275 status = bcm63xx_spi_check_transfer(spi, t);
276 if (status < 0)
277 goto exit;
258 278
259 return ret; 279 /* configure adapter for a new transfer */
280 bcm63xx_spi_setup_transfer(spi, t);
281
282 while (len) {
283 /* send the data */
284 len -= bcm63xx_txrx_bufs(spi, t);
285
286 timeout = wait_for_completion_timeout(&bs->done, HZ);
287 if (!timeout) {
288 status = -ETIMEDOUT;
289 goto exit;
290 }
291
292 /* read out all data */
293 rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL);
294
295 /* Read out all the data */
296 if (rx_tail)
297 memcpy_fromio(bs->rx_ptr, bs->rx_io, rx_tail);
298 }
299
300 m->actual_length += t->len;
301 }
302exit:
303 m->status = status;
304 spi_finalize_current_message(master);
305
306 return 0;
260} 307}
261 308
262/* This driver supports single master mode only. Hence 309/* This driver supports single master mode only. Hence
@@ -267,39 +314,15 @@ static irqreturn_t bcm63xx_spi_interrupt(int irq, void *dev_id)
267 struct spi_master *master = (struct spi_master *)dev_id; 314 struct spi_master *master = (struct spi_master *)dev_id;
268 struct bcm63xx_spi *bs = spi_master_get_devdata(master); 315 struct bcm63xx_spi *bs = spi_master_get_devdata(master);
269 u8 intr; 316 u8 intr;
270 u16 cmd;
271 317
272 /* Read interupts and clear them immediately */ 318 /* Read interupts and clear them immediately */
273 intr = bcm_spi_readb(bs, SPI_INT_STATUS); 319 intr = bcm_spi_readb(bs, SPI_INT_STATUS);
274 bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS); 320 bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
275 bcm_spi_writeb(bs, 0, SPI_INT_MASK); 321 bcm_spi_writeb(bs, 0, SPI_INT_MASK);
276 322
277 /* A tansfer completed */ 323 /* A transfer completed */
278 if (intr & SPI_INTR_CMD_DONE) { 324 if (intr & SPI_INTR_CMD_DONE)
279 u8 rx_tail; 325 complete(&bs->done);
280
281 rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL);
282
283 /* Read out all the data */
284 if (rx_tail)
285 memcpy_fromio(bs->rx_ptr, bs->rx_io, rx_tail);
286
287 /* See if there is more data to send */
288 if (bs->remaining_bytes > 0) {
289 bcm63xx_spi_fill_tx_fifo(bs);
290
291 /* Start the transfer */
292 bcm_spi_writew(bs, SPI_HD_W << SPI_MSG_TYPE_SHIFT,
293 SPI_MSG_CTL);
294 cmd = bcm_spi_readw(bs, SPI_CMD);
295 cmd |= SPI_CMD_START_IMMEDIATE;
296 cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
297 bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
298 bcm_spi_writew(bs, cmd, SPI_CMD);
299 } else {
300 complete(&bs->done);
301 }
302 }
303 326
304 return IRQ_HANDLED; 327 return IRQ_HANDLED;
305} 328}
@@ -345,7 +368,6 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev)
345 } 368 }
346 369
347 bs = spi_master_get_devdata(master); 370 bs = spi_master_get_devdata(master);
348 init_completion(&bs->done);
349 371
350 platform_set_drvdata(pdev, master); 372 platform_set_drvdata(pdev, master);
351 bs->pdev = pdev; 373 bs->pdev = pdev;
@@ -379,12 +401,13 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev)
379 master->bus_num = pdata->bus_num; 401 master->bus_num = pdata->bus_num;
380 master->num_chipselect = pdata->num_chipselect; 402 master->num_chipselect = pdata->num_chipselect;
381 master->setup = bcm63xx_spi_setup; 403 master->setup = bcm63xx_spi_setup;
382 master->transfer = bcm63xx_transfer; 404 master->prepare_transfer_hardware = bcm63xx_spi_prepare_transfer;
405 master->unprepare_transfer_hardware = bcm63xx_spi_unprepare_transfer;
406 master->transfer_one_message = bcm63xx_spi_transfer_one;
407 master->mode_bits = MODEBITS;
383 bs->speed_hz = pdata->speed_hz; 408 bs->speed_hz = pdata->speed_hz;
384 bs->stopping = 0;
385 bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA)); 409 bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA));
386 bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA)); 410 bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA));
387 spin_lock_init(&bs->lock);
388 411
389 /* Initialize hardware */ 412 /* Initialize hardware */
390 clk_enable(bs->clk); 413 clk_enable(bs->clk);
@@ -418,18 +441,16 @@ static int __devexit bcm63xx_spi_remove(struct platform_device *pdev)
418 struct spi_master *master = platform_get_drvdata(pdev); 441 struct spi_master *master = platform_get_drvdata(pdev);
419 struct bcm63xx_spi *bs = spi_master_get_devdata(master); 442 struct bcm63xx_spi *bs = spi_master_get_devdata(master);
420 443
444 spi_unregister_master(master);
445
421 /* reset spi block */ 446 /* reset spi block */
422 bcm_spi_writeb(bs, 0, SPI_INT_MASK); 447 bcm_spi_writeb(bs, 0, SPI_INT_MASK);
423 spin_lock(&bs->lock);
424 bs->stopping = 1;
425 448
426 /* HW shutdown */ 449 /* HW shutdown */
427 clk_disable(bs->clk); 450 clk_disable(bs->clk);
428 clk_put(bs->clk); 451 clk_put(bs->clk);
429 452
430 spin_unlock(&bs->lock);
431 platform_set_drvdata(pdev, 0); 453 platform_set_drvdata(pdev, 0);
432 spi_unregister_master(master);
433 454
434 return 0; 455 return 0;
435} 456}
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c
index 248a2cc671a9..1fe51198a622 100644
--- a/drivers/spi/spi-bfin-sport.c
+++ b/drivers/spi/spi-bfin-sport.c
@@ -252,19 +252,15 @@ static void
252bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data) 252bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data)
253{ 253{
254 struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip; 254 struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
255 unsigned int bits = (drv_data->ops == &bfin_sport_transfer_ops_u8 ? 7 : 15);
256 255
257 bfin_sport_spi_disable(drv_data); 256 bfin_sport_spi_disable(drv_data);
258 dev_dbg(drv_data->dev, "restoring spi ctl state\n"); 257 dev_dbg(drv_data->dev, "restoring spi ctl state\n");
259 258
260 bfin_write(&drv_data->regs->tcr1, chip->ctl_reg); 259 bfin_write(&drv_data->regs->tcr1, chip->ctl_reg);
261 bfin_write(&drv_data->regs->tcr2, bits);
262 bfin_write(&drv_data->regs->tclkdiv, chip->baud); 260 bfin_write(&drv_data->regs->tclkdiv, chip->baud);
263 bfin_write(&drv_data->regs->tfsdiv, bits);
264 SSYNC(); 261 SSYNC();
265 262
266 bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS)); 263 bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS));
267 bfin_write(&drv_data->regs->rcr2, bits);
268 SSYNC(); 264 SSYNC();
269 265
270 bfin_sport_spi_cs_active(chip); 266 bfin_sport_spi_cs_active(chip);
@@ -420,11 +416,15 @@ bfin_sport_spi_pump_transfers(unsigned long data)
420 drv_data->cs_change = transfer->cs_change; 416 drv_data->cs_change = transfer->cs_change;
421 417
422 /* Bits per word setup */ 418 /* Bits per word setup */
423 bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word; 419 bits_per_word = transfer->bits_per_word ? :
424 if (bits_per_word == 8) 420 message->spi->bits_per_word ? : 8;
425 drv_data->ops = &bfin_sport_transfer_ops_u8; 421 if (bits_per_word % 16 == 0)
426 else
427 drv_data->ops = &bfin_sport_transfer_ops_u16; 422 drv_data->ops = &bfin_sport_transfer_ops_u16;
423 else
424 drv_data->ops = &bfin_sport_transfer_ops_u8;
425 bfin_write(&drv_data->regs->tcr2, bits_per_word - 1);
426 bfin_write(&drv_data->regs->tfsdiv, bits_per_word - 1);
427 bfin_write(&drv_data->regs->rcr2, bits_per_word - 1);
428 428
429 drv_data->state = RUNNING_STATE; 429 drv_data->state = RUNNING_STATE;
430 430
@@ -598,11 +598,12 @@ bfin_sport_spi_setup(struct spi_device *spi)
598 } 598 }
599 chip->cs_chg_udelay = chip_info->cs_chg_udelay; 599 chip->cs_chg_udelay = chip_info->cs_chg_udelay;
600 chip->idle_tx_val = chip_info->idle_tx_val; 600 chip->idle_tx_val = chip_info->idle_tx_val;
601 spi->bits_per_word = chip_info->bits_per_word;
602 } 601 }
603 } 602 }
604 603
605 if (spi->bits_per_word != 8 && spi->bits_per_word != 16) { 604 if (spi->bits_per_word % 8) {
605 dev_err(&spi->dev, "%d bits_per_word is not supported\n",
606 spi->bits_per_word);
606 ret = -EINVAL; 607 ret = -EINVAL;
607 goto error; 608 goto error;
608 } 609 }
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index 3b83ff8b1e2b..9bb4d4af8547 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -396,7 +396,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
396 /* last read */ 396 /* last read */
397 if (drv_data->rx) { 397 if (drv_data->rx) {
398 dev_dbg(&drv_data->pdev->dev, "last read\n"); 398 dev_dbg(&drv_data->pdev->dev, "last read\n");
399 if (n_bytes % 2) { 399 if (!(n_bytes % 2)) {
400 u16 *buf = (u16 *)drv_data->rx; 400 u16 *buf = (u16 *)drv_data->rx;
401 for (loop = 0; loop < n_bytes / 2; loop++) 401 for (loop = 0; loop < n_bytes / 2; loop++)
402 *buf++ = bfin_read(&drv_data->regs->rdbr); 402 *buf++ = bfin_read(&drv_data->regs->rdbr);
@@ -424,7 +424,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
424 if (drv_data->rx && drv_data->tx) { 424 if (drv_data->rx && drv_data->tx) {
425 /* duplex */ 425 /* duplex */
426 dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n"); 426 dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n");
427 if (n_bytes % 2) { 427 if (!(n_bytes % 2)) {
428 u16 *buf = (u16 *)drv_data->rx; 428 u16 *buf = (u16 *)drv_data->rx;
429 u16 *buf2 = (u16 *)drv_data->tx; 429 u16 *buf2 = (u16 *)drv_data->tx;
430 for (loop = 0; loop < n_bytes / 2; loop++) { 430 for (loop = 0; loop < n_bytes / 2; loop++) {
@@ -442,7 +442,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
442 } else if (drv_data->rx) { 442 } else if (drv_data->rx) {
443 /* read */ 443 /* read */
444 dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n"); 444 dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n");
445 if (n_bytes % 2) { 445 if (!(n_bytes % 2)) {
446 u16 *buf = (u16 *)drv_data->rx; 446 u16 *buf = (u16 *)drv_data->rx;
447 for (loop = 0; loop < n_bytes / 2; loop++) { 447 for (loop = 0; loop < n_bytes / 2; loop++) {
448 *buf++ = bfin_read(&drv_data->regs->rdbr); 448 *buf++ = bfin_read(&drv_data->regs->rdbr);
@@ -458,7 +458,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
458 } else if (drv_data->tx) { 458 } else if (drv_data->tx) {
459 /* write */ 459 /* write */
460 dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n"); 460 dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n");
461 if (n_bytes % 2) { 461 if (!(n_bytes % 2)) {
462 u16 *buf = (u16 *)drv_data->tx; 462 u16 *buf = (u16 *)drv_data->tx;
463 for (loop = 0; loop < n_bytes / 2; loop++) { 463 for (loop = 0; loop < n_bytes / 2; loop++) {
464 bfin_read(&drv_data->regs->rdbr); 464 bfin_read(&drv_data->regs->rdbr);
@@ -587,6 +587,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
587 if (message->state == DONE_STATE) { 587 if (message->state == DONE_STATE) {
588 dev_dbg(&drv_data->pdev->dev, "transfer: all done!\n"); 588 dev_dbg(&drv_data->pdev->dev, "transfer: all done!\n");
589 message->status = 0; 589 message->status = 0;
590 bfin_spi_flush(drv_data);
590 bfin_spi_giveback(drv_data); 591 bfin_spi_giveback(drv_data);
591 return; 592 return;
592 } 593 }
@@ -870,8 +871,10 @@ static void bfin_spi_pump_transfers(unsigned long data)
870 message->actual_length += drv_data->len_in_bytes; 871 message->actual_length += drv_data->len_in_bytes;
871 /* Move to next transfer of this msg */ 872 /* Move to next transfer of this msg */
872 message->state = bfin_spi_next_transfer(drv_data); 873 message->state = bfin_spi_next_transfer(drv_data);
873 if (drv_data->cs_change) 874 if (drv_data->cs_change && message->state != DONE_STATE) {
875 bfin_spi_flush(drv_data);
874 bfin_spi_cs_deactive(drv_data, chip); 876 bfin_spi_cs_deactive(drv_data, chip);
877 }
875 } 878 }
876 879
877 /* Schedule next transfer tasklet */ 880 /* Schedule next transfer tasklet */
@@ -1026,7 +1029,6 @@ static int bfin_spi_setup(struct spi_device *spi)
1026 chip->cs_chg_udelay = chip_info->cs_chg_udelay; 1029 chip->cs_chg_udelay = chip_info->cs_chg_udelay;
1027 chip->idle_tx_val = chip_info->idle_tx_val; 1030 chip->idle_tx_val = chip_info->idle_tx_val;
1028 chip->pio_interrupt = chip_info->pio_interrupt; 1031 chip->pio_interrupt = chip_info->pio_interrupt;
1029 spi->bits_per_word = chip_info->bits_per_word;
1030 } else { 1032 } else {
1031 /* force a default base state */ 1033 /* force a default base state */
1032 chip->ctl_reg &= bfin_ctl_reg; 1034 chip->ctl_reg &= bfin_ctl_reg;
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 6db2887852d6..e8055073e84d 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -545,13 +545,12 @@ static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
545 * in case of failure. 545 * in case of failure.
546 */ 546 */
547static struct dma_async_tx_descriptor * 547static struct dma_async_tx_descriptor *
548ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) 548ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
549{ 549{
550 struct spi_transfer *t = espi->current_msg->state; 550 struct spi_transfer *t = espi->current_msg->state;
551 struct dma_async_tx_descriptor *txd; 551 struct dma_async_tx_descriptor *txd;
552 enum dma_slave_buswidth buswidth; 552 enum dma_slave_buswidth buswidth;
553 struct dma_slave_config conf; 553 struct dma_slave_config conf;
554 enum dma_transfer_direction slave_dirn;
555 struct scatterlist *sg; 554 struct scatterlist *sg;
556 struct sg_table *sgt; 555 struct sg_table *sgt;
557 struct dma_chan *chan; 556 struct dma_chan *chan;
@@ -567,14 +566,13 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
567 memset(&conf, 0, sizeof(conf)); 566 memset(&conf, 0, sizeof(conf));
568 conf.direction = dir; 567 conf.direction = dir;
569 568
570 if (dir == DMA_FROM_DEVICE) { 569 if (dir == DMA_DEV_TO_MEM) {
571 chan = espi->dma_rx; 570 chan = espi->dma_rx;
572 buf = t->rx_buf; 571 buf = t->rx_buf;
573 sgt = &espi->rx_sgt; 572 sgt = &espi->rx_sgt;
574 573
575 conf.src_addr = espi->sspdr_phys; 574 conf.src_addr = espi->sspdr_phys;
576 conf.src_addr_width = buswidth; 575 conf.src_addr_width = buswidth;
577 slave_dirn = DMA_DEV_TO_MEM;
578 } else { 576 } else {
579 chan = espi->dma_tx; 577 chan = espi->dma_tx;
580 buf = t->tx_buf; 578 buf = t->tx_buf;
@@ -582,7 +580,6 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
582 580
583 conf.dst_addr = espi->sspdr_phys; 581 conf.dst_addr = espi->sspdr_phys;
584 conf.dst_addr_width = buswidth; 582 conf.dst_addr_width = buswidth;
585 slave_dirn = DMA_MEM_TO_DEV;
586 } 583 }
587 584
588 ret = dmaengine_slave_config(chan, &conf); 585 ret = dmaengine_slave_config(chan, &conf);
@@ -633,8 +630,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
633 if (!nents) 630 if (!nents)
634 return ERR_PTR(-ENOMEM); 631 return ERR_PTR(-ENOMEM);
635 632
636 txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, 633 txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
637 slave_dirn, DMA_CTRL_ACK);
638 if (!txd) { 634 if (!txd) {
639 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 635 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
640 return ERR_PTR(-ENOMEM); 636 return ERR_PTR(-ENOMEM);
@@ -651,12 +647,12 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
651 * unmapped. 647 * unmapped.
652 */ 648 */
653static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi, 649static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
654 enum dma_data_direction dir) 650 enum dma_transfer_direction dir)
655{ 651{
656 struct dma_chan *chan; 652 struct dma_chan *chan;
657 struct sg_table *sgt; 653 struct sg_table *sgt;
658 654
659 if (dir == DMA_FROM_DEVICE) { 655 if (dir == DMA_DEV_TO_MEM) {
660 chan = espi->dma_rx; 656 chan = espi->dma_rx;
661 sgt = &espi->rx_sgt; 657 sgt = &espi->rx_sgt;
662 } else { 658 } else {
@@ -677,16 +673,16 @@ static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
677 struct spi_message *msg = espi->current_msg; 673 struct spi_message *msg = espi->current_msg;
678 struct dma_async_tx_descriptor *rxd, *txd; 674 struct dma_async_tx_descriptor *rxd, *txd;
679 675
680 rxd = ep93xx_spi_dma_prepare(espi, DMA_FROM_DEVICE); 676 rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM);
681 if (IS_ERR(rxd)) { 677 if (IS_ERR(rxd)) {
682 dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd)); 678 dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
683 msg->status = PTR_ERR(rxd); 679 msg->status = PTR_ERR(rxd);
684 return; 680 return;
685 } 681 }
686 682
687 txd = ep93xx_spi_dma_prepare(espi, DMA_TO_DEVICE); 683 txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV);
688 if (IS_ERR(txd)) { 684 if (IS_ERR(txd)) {
689 ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE); 685 ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
690 dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd)); 686 dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
691 msg->status = PTR_ERR(txd); 687 msg->status = PTR_ERR(txd);
692 return; 688 return;
@@ -705,8 +701,8 @@ static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
705 701
706 wait_for_completion(&espi->wait); 702 wait_for_completion(&espi->wait);
707 703
708 ep93xx_spi_dma_finish(espi, DMA_TO_DEVICE); 704 ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV);
709 ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE); 705 ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
710} 706}
711 707
712/** 708/**
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 09c925aaf320..400ae2121a2a 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1667,9 +1667,15 @@ static int calculate_effective_freq(struct pl022 *pl022, int freq, struct
1667 /* cpsdvsr = 254 & scr = 255 */ 1667 /* cpsdvsr = 254 & scr = 255 */
1668 min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX); 1668 min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX);
1669 1669
1670 if (!((freq <= max_tclk) && (freq >= min_tclk))) { 1670 if (freq > max_tclk)
1671 dev_warn(&pl022->adev->dev,
1672 "Max speed that can be programmed is %d Hz, you requested %d\n",
1673 max_tclk, freq);
1674
1675 if (freq < min_tclk) {
1671 dev_err(&pl022->adev->dev, 1676 dev_err(&pl022->adev->dev,
1672 "controller data is incorrect: out of range frequency"); 1677 "Requested frequency: %d Hz is less than minimum possible %d Hz\n",
1678 freq, min_tclk);
1673 return -EINVAL; 1679 return -EINVAL;
1674 } 1680 }
1675 1681
@@ -1681,26 +1687,37 @@ static int calculate_effective_freq(struct pl022 *pl022, int freq, struct
1681 while (scr <= SCR_MAX) { 1687 while (scr <= SCR_MAX) {
1682 tmp = spi_rate(rate, cpsdvsr, scr); 1688 tmp = spi_rate(rate, cpsdvsr, scr);
1683 1689
1684 if (tmp > freq) 1690 if (tmp > freq) {
1691 /* we need lower freq */
1685 scr++; 1692 scr++;
1693 continue;
1694 }
1695
1686 /* 1696 /*
1687 * If found exact value, update and break. 1697 * If found exact value, mark found and break.
1688 * If found more closer value, update and continue. 1698 * If found more closer value, update and break.
1689 */ 1699 */
1690 else if ((tmp == freq) || (tmp > best_freq)) { 1700 if (tmp > best_freq) {
1691 best_freq = tmp; 1701 best_freq = tmp;
1692 best_cpsdvsr = cpsdvsr; 1702 best_cpsdvsr = cpsdvsr;
1693 best_scr = scr; 1703 best_scr = scr;
1694 1704
1695 if (tmp == freq) 1705 if (tmp == freq)
1696 break; 1706 found = 1;
1697 } 1707 }
1698 scr++; 1708 /*
1709 * increased scr will give lower rates, which are not
1710 * required
1711 */
1712 break;
1699 } 1713 }
1700 cpsdvsr += 2; 1714 cpsdvsr += 2;
1701 scr = SCR_MIN; 1715 scr = SCR_MIN;
1702 } 1716 }
1703 1717
1718 WARN(!best_freq, "pl022: Matching cpsdvsr and scr not found for %d Hz rate \n",
1719 freq);
1720
1704 clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF); 1721 clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF);
1705 clk_freq->scr = (u8) (best_scr & 0xFF); 1722 clk_freq->scr = (u8) (best_scr & 0xFF);
1706 dev_dbg(&pl022->adev->dev, 1723 dev_dbg(&pl022->adev->dev,
@@ -1823,9 +1840,12 @@ static int pl022_setup(struct spi_device *spi)
1823 } else 1840 } else
1824 chip->cs_control = chip_info->cs_control; 1841 chip->cs_control = chip_info->cs_control;
1825 1842
1826 if (bits <= 3) { 1843 /* Check bits per word with vendor specific range */
1827 /* PL022 doesn't support less than 4-bits */ 1844 if ((bits <= 3) || (bits > pl022->vendor->max_bpw)) {
1828 status = -ENOTSUPP; 1845 status = -ENOTSUPP;
1846 dev_err(&spi->dev, "illegal data size for this controller!\n");
1847 dev_err(&spi->dev, "This controller can only handle 4 <= n <= %d bit words\n",
1848 pl022->vendor->max_bpw);
1829 goto err_config_params; 1849 goto err_config_params;
1830 } else if (bits <= 8) { 1850 } else if (bits <= 8) {
1831 dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n"); 1851 dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n");
@@ -1838,20 +1858,10 @@ static int pl022_setup(struct spi_device *spi)
1838 chip->read = READING_U16; 1858 chip->read = READING_U16;
1839 chip->write = WRITING_U16; 1859 chip->write = WRITING_U16;
1840 } else { 1860 } else {
1841 if (pl022->vendor->max_bpw >= 32) { 1861 dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
1842 dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n"); 1862 chip->n_bytes = 4;
1843 chip->n_bytes = 4; 1863 chip->read = READING_U32;
1844 chip->read = READING_U32; 1864 chip->write = WRITING_U32;
1845 chip->write = WRITING_U32;
1846 } else {
1847 dev_err(&spi->dev,
1848 "illegal data size for this controller!\n");
1849 dev_err(&spi->dev,
1850 "a standard pl022 can only handle "
1851 "1 <= n <= 16 bit words\n");
1852 status = -ENOTSUPP;
1853 goto err_config_params;
1854 }
1855 } 1865 }
1856 1866
1857 /* Now Initialize all register settings required for this chip */ 1867 /* Now Initialize all register settings required for this chip */
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 400df8cbee53..d91751f9ffe8 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -36,6 +36,7 @@
36#include <linux/prefetch.h> 36#include <linux/prefetch.h>
37#include <linux/ratelimit.h> 37#include <linux/ratelimit.h>
38#include <linux/smp.h> 38#include <linux/smp.h>
39#include <linux/interrupt.h>
39#include <net/dst.h> 40#include <net/dst.h>
40#ifdef CONFIG_XFRM 41#ifdef CONFIG_XFRM
41#include <linux/xfrm.h> 42#include <linux/xfrm.h>
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 56d74dc2fbd5..5877b2c64e2a 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -32,6 +32,7 @@
32#include <linux/ip.h> 32#include <linux/ip.h>
33#include <linux/ratelimit.h> 33#include <linux/ratelimit.h>
34#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/interrupt.h>
35#include <net/dst.h> 36#include <net/dst.h>
36#ifdef CONFIG_XFRM 37#ifdef CONFIG_XFRM
37#include <linux/xfrm.h> 38#include <linux/xfrm.h>
@@ -344,7 +345,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
344 } 345 }
345 if (unlikely 346 if (unlikely
346 (skb->truesize != 347 (skb->truesize !=
347 sizeof(*skb) + skb_end_pointer(skb) - skb->head)) { 348 sizeof(*skb) + skb_end_offset(skb))) {
348 /* 349 /*
349 printk("TX buffer truesize has been changed\n"); 350 printk("TX buffer truesize has been changed\n");
350 */ 351 */
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 9112cd882154..60cba8194de3 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -31,6 +31,7 @@
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/phy.h> 32#include <linux/phy.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/interrupt.h>
34 35
35#include <net/dst.h> 36#include <net/dst.h>
36 37
diff --git a/drivers/staging/ozwpan/ozpd.c b/drivers/staging/ozwpan/ozpd.c
index 2b45d3d1800c..04cd57f2a6da 100644
--- a/drivers/staging/ozwpan/ozpd.c
+++ b/drivers/staging/ozwpan/ozpd.c
@@ -383,8 +383,6 @@ static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
383 pd->tx_pool = &f->link; 383 pd->tx_pool = &f->link;
384 pd->tx_pool_count++; 384 pd->tx_pool_count++;
385 f = 0; 385 f = 0;
386 } else {
387 kfree(f);
388 } 386 }
389 spin_unlock_bh(&pd->tx_frame_lock); 387 spin_unlock_bh(&pd->tx_frame_lock);
390 if (f) 388 if (f)
diff --git a/drivers/staging/ramster/cluster/tcp.c b/drivers/staging/ramster/cluster/tcp.c
index 3af1b2c51b78..b9721c1055b1 100644
--- a/drivers/staging/ramster/cluster/tcp.c
+++ b/drivers/staging/ramster/cluster/tcp.c
@@ -2106,7 +2106,7 @@ static int r2net_open_listening_sock(__be32 addr, __be16 port)
2106 r2net_listen_sock = sock; 2106 r2net_listen_sock = sock;
2107 INIT_WORK(&r2net_listen_work, r2net_accept_many); 2107 INIT_WORK(&r2net_listen_work, r2net_accept_many);
2108 2108
2109 sock->sk->sk_reuse = 1; 2109 sock->sk->sk_reuse = SK_CAN_REUSE;
2110 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); 2110 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
2111 if (ret < 0) { 2111 if (ret < 0) {
2112 printk(KERN_ERR "ramster: Error %d while binding socket at " 2112 printk(KERN_ERR "ramster: Error %d while binding socket at "
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index 7862513cc295..9cf29fcea11e 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -79,10 +79,6 @@
79#define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190) 79#define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
80#define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194) 80#define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
81 81
82#define OMAP343X_CTRL_REGADDR(reg) \
83 OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg))
84
85
86/* Forward Declarations: */ 82/* Forward Declarations: */
87static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt); 83static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
88static int bridge_brd_read(struct bridge_dev_context *dev_ctxt, 84static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
@@ -418,19 +414,27 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
418 414
419 /* Assert RST1 i.e only the RST only for DSP megacell */ 415 /* Assert RST1 i.e only the RST only for DSP megacell */
420 if (!status) { 416 if (!status) {
417 /*
418 * XXX: ioremapping MUST be removed once ctrl
419 * function is made available.
420 */
421 void __iomem *ctrl = ioremap(OMAP343X_CTRL_BASE, SZ_4K);
422 if (!ctrl)
423 return -ENOMEM;
424
421 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 425 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
422 OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, 426 OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
423 OMAP2_RM_RSTCTRL); 427 OMAP2_RM_RSTCTRL);
424 /* Mask address with 1K for compatibility */ 428 /* Mask address with 1K for compatibility */
425 __raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK, 429 __raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK,
426 OMAP343X_CTRL_REGADDR( 430 ctrl + OMAP343X_CONTROL_IVA2_BOOTADDR);
427 OMAP343X_CONTROL_IVA2_BOOTADDR));
428 /* 431 /*
429 * Set bootmode to self loop if dsp_debug flag is true 432 * Set bootmode to self loop if dsp_debug flag is true
430 */ 433 */
431 __raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0, 434 __raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0,
432 OMAP343X_CTRL_REGADDR( 435 ctrl + OMAP343X_CONTROL_IVA2_BOOTMOD);
433 OMAP343X_CONTROL_IVA2_BOOTMOD)); 436
437 iounmap(ctrl);
434 } 438 }
435 } 439 }
436 if (!status) { 440 if (!status) {
diff --git a/drivers/staging/tidspbridge/core/wdt.c b/drivers/staging/tidspbridge/core/wdt.c
index 70055c8111ed..870f934f4f3b 100644
--- a/drivers/staging/tidspbridge/core/wdt.c
+++ b/drivers/staging/tidspbridge/core/wdt.c
@@ -53,7 +53,10 @@ int dsp_wdt_init(void)
53 int ret = 0; 53 int ret = 0;
54 54
55 dsp_wdt.sm_wdt = NULL; 55 dsp_wdt.sm_wdt = NULL;
56 dsp_wdt.reg_base = OMAP2_L4_IO_ADDRESS(OMAP34XX_WDT3_BASE); 56 dsp_wdt.reg_base = ioremap(OMAP34XX_WDT3_BASE, SZ_4K);
57 if (!dsp_wdt.reg_base)
58 return -ENOMEM;
59
57 tasklet_init(&dsp_wdt.wdt3_tasklet, dsp_wdt_dpc, 0); 60 tasklet_init(&dsp_wdt.wdt3_tasklet, dsp_wdt_dpc, 0);
58 61
59 dsp_wdt.fclk = clk_get(NULL, "wdt3_fck"); 62 dsp_wdt.fclk = clk_get(NULL, "wdt3_fck");
@@ -99,6 +102,9 @@ void dsp_wdt_exit(void)
99 dsp_wdt.fclk = NULL; 102 dsp_wdt.fclk = NULL;
100 dsp_wdt.iclk = NULL; 103 dsp_wdt.iclk = NULL;
101 dsp_wdt.sm_wdt = NULL; 104 dsp_wdt.sm_wdt = NULL;
105
106 if (dsp_wdt.reg_base)
107 iounmap(dsp_wdt.reg_base);
102 dsp_wdt.reg_base = NULL; 108 dsp_wdt.reg_base = NULL;
103} 109}
104 110
diff --git a/drivers/staging/zcache/Kconfig b/drivers/staging/zcache/Kconfig
index 3ed2c8f656a5..7048e01f0817 100644
--- a/drivers/staging/zcache/Kconfig
+++ b/drivers/staging/zcache/Kconfig
@@ -2,7 +2,7 @@ config ZCACHE
2 bool "Dynamic compression of swap pages and clean pagecache pages" 2 bool "Dynamic compression of swap pages and clean pagecache pages"
3 # X86 dependency is because zsmalloc uses non-portable pte/tlb 3 # X86 dependency is because zsmalloc uses non-portable pte/tlb
4 # functions 4 # functions
5 depends on (CLEANCACHE || FRONTSWAP) && CRYPTO && X86 5 depends on (CLEANCACHE || FRONTSWAP) && CRYPTO=y && X86
6 select ZSMALLOC 6 select ZSMALLOC
7 select CRYPTO_LZO 7 select CRYPTO_LZO
8 default n 8 default n
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 7ed58e2df791..f286955331a2 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -169,6 +169,7 @@ static struct se_device *fd_create_virtdevice(
169 inode = file->f_mapping->host; 169 inode = file->f_mapping->host;
170 if (S_ISBLK(inode->i_mode)) { 170 if (S_ISBLK(inode->i_mode)) {
171 struct request_queue *q; 171 struct request_queue *q;
172 unsigned long long dev_size;
172 /* 173 /*
173 * Setup the local scope queue_limits from struct request_queue->limits 174 * Setup the local scope queue_limits from struct request_queue->limits
174 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. 175 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
@@ -183,13 +184,12 @@ static struct se_device *fd_create_virtdevice(
183 * one (1) logical sector from underlying struct block_device 184 * one (1) logical sector from underlying struct block_device
184 */ 185 */
185 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); 186 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
186 fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) - 187 dev_size = (i_size_read(file->f_mapping->host) -
187 fd_dev->fd_block_size); 188 fd_dev->fd_block_size);
188 189
189 pr_debug("FILEIO: Using size: %llu bytes from struct" 190 pr_debug("FILEIO: Using size: %llu bytes from struct"
190 " block_device blocks: %llu logical_block_size: %d\n", 191 " block_device blocks: %llu logical_block_size: %d\n",
191 fd_dev->fd_dev_size, 192 dev_size, div_u64(dev_size, fd_dev->fd_block_size),
192 div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
193 fd_dev->fd_block_size); 193 fd_dev->fd_block_size);
194 } else { 194 } else {
195 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { 195 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
@@ -605,10 +605,20 @@ static u32 fd_get_device_type(struct se_device *dev)
605static sector_t fd_get_blocks(struct se_device *dev) 605static sector_t fd_get_blocks(struct se_device *dev)
606{ 606{
607 struct fd_dev *fd_dev = dev->dev_ptr; 607 struct fd_dev *fd_dev = dev->dev_ptr;
608 unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size, 608 struct file *f = fd_dev->fd_file;
609 dev->se_sub_dev->se_dev_attrib.block_size); 609 struct inode *i = f->f_mapping->host;
610 unsigned long long dev_size;
611 /*
612 * When using a file that references an underlying struct block_device,
613 * ensure dev_size is always based on the current inode size in order
614 * to handle underlying block_device resize operations.
615 */
616 if (S_ISBLK(i->i_mode))
617 dev_size = (i_size_read(i) - fd_dev->fd_block_size);
618 else
619 dev_size = fd_dev->fd_dev_size;
610 620
611 return blocks_long; 621 return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size);
612} 622}
613 623
614static struct se_subsystem_api fileio_template = { 624static struct se_subsystem_api fileio_template = {
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 86f0c3b5d500..c3148b10b4b3 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -220,6 +220,9 @@ int target_scsi2_reservation_release(struct se_task *task)
220 if (dev->dev_reserved_node_acl != sess->se_node_acl) 220 if (dev->dev_reserved_node_acl != sess->se_node_acl)
221 goto out_unlock; 221 goto out_unlock;
222 222
223 if (dev->dev_res_bin_isid != sess->sess_bin_isid)
224 goto out_unlock;
225
223 dev->dev_reserved_node_acl = NULL; 226 dev->dev_reserved_node_acl = NULL;
224 dev->dev_flags &= ~DF_SPC2_RESERVATIONS; 227 dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
225 if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) { 228 if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 70c3ffb981e7..e320ec24aa1b 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -60,7 +60,6 @@ static void core_clear_initiator_node_from_tpg(
60 int i; 60 int i;
61 struct se_dev_entry *deve; 61 struct se_dev_entry *deve;
62 struct se_lun *lun; 62 struct se_lun *lun;
63 struct se_lun_acl *acl, *acl_tmp;
64 63
65 spin_lock_irq(&nacl->device_list_lock); 64 spin_lock_irq(&nacl->device_list_lock);
66 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 65 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
@@ -81,28 +80,7 @@ static void core_clear_initiator_node_from_tpg(
81 core_update_device_list_for_node(lun, NULL, deve->mapped_lun, 80 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
82 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); 81 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
83 82
84 spin_lock(&lun->lun_acl_lock);
85 list_for_each_entry_safe(acl, acl_tmp,
86 &lun->lun_acl_list, lacl_list) {
87 if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
88 (acl->mapped_lun == deve->mapped_lun))
89 break;
90 }
91
92 if (!acl) {
93 pr_err("Unable to locate struct se_lun_acl for %s,"
94 " mapped_lun: %u\n", nacl->initiatorname,
95 deve->mapped_lun);
96 spin_unlock(&lun->lun_acl_lock);
97 spin_lock_irq(&nacl->device_list_lock);
98 continue;
99 }
100
101 list_del(&acl->lacl_list);
102 spin_unlock(&lun->lun_acl_lock);
103
104 spin_lock_irq(&nacl->device_list_lock); 83 spin_lock_irq(&nacl->device_list_lock);
105 kfree(acl);
106 } 84 }
107 spin_unlock_irq(&nacl->device_list_lock); 85 spin_unlock_irq(&nacl->device_list_lock);
108} 86}
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 08ebe901bb59..654755a990df 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -469,7 +469,7 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
469 tty = NULL; 469 tty = NULL;
470 if (r3 & (CHAEXT | CHATxIP | CHARxIP)) { 470 if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
471 if (!ZS_IS_OPEN(uap_a)) { 471 if (!ZS_IS_OPEN(uap_a)) {
472 pmz_debug("ChanA interrupt while open !\n"); 472 pmz_debug("ChanA interrupt while not open !\n");
473 goto skip_a; 473 goto skip_a;
474 } 474 }
475 write_zsreg(uap_a, R0, RES_H_IUS); 475 write_zsreg(uap_a, R0, RES_H_IUS);
@@ -493,8 +493,8 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
493 spin_lock(&uap_b->port.lock); 493 spin_lock(&uap_b->port.lock);
494 tty = NULL; 494 tty = NULL;
495 if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) { 495 if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
496 if (!ZS_IS_OPEN(uap_a)) { 496 if (!ZS_IS_OPEN(uap_b)) {
497 pmz_debug("ChanB interrupt while open !\n"); 497 pmz_debug("ChanB interrupt while not open !\n");
498 goto skip_b; 498 goto skip_b;
499 } 499 }
500 write_zsreg(uap_b, R0, RES_H_IUS); 500 write_zsreg(uap_b, R0, RES_H_IUS);
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 86dd1e302bb3..3b0c4e32ed7b 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -1085,15 +1085,21 @@ void vt_set_led_state(int console, int leds)
1085 * 1085 *
1086 * Handle console start. This is a wrapper for the VT layer 1086 * Handle console start. This is a wrapper for the VT layer
1087 * so that we can keep kbd knowledge internal 1087 * so that we can keep kbd knowledge internal
1088 *
1089 * FIXME: We eventually need to hold the kbd lock here to protect
1090 * the LED updating. We can't do it yet because fn_hold calls stop_tty
1091 * and start_tty under the kbd_event_lock, while normal tty paths
1092 * don't hold the lock. We probably need to split out an LED lock
1093 * but not during an -rc release!
1088 */ 1094 */
1089void vt_kbd_con_start(int console) 1095void vt_kbd_con_start(int console)
1090{ 1096{
1091 struct kbd_struct * kbd = kbd_table + console; 1097 struct kbd_struct * kbd = kbd_table + console;
1092 unsigned long flags; 1098/* unsigned long flags; */
1093 spin_lock_irqsave(&kbd_event_lock, flags); 1099/* spin_lock_irqsave(&kbd_event_lock, flags); */
1094 clr_vc_kbd_led(kbd, VC_SCROLLOCK); 1100 clr_vc_kbd_led(kbd, VC_SCROLLOCK);
1095 set_leds(); 1101 set_leds();
1096 spin_unlock_irqrestore(&kbd_event_lock, flags); 1102/* spin_unlock_irqrestore(&kbd_event_lock, flags); */
1097} 1103}
1098 1104
1099/** 1105/**
@@ -1102,22 +1108,28 @@ void vt_kbd_con_start(int console)
1102 * 1108 *
1103 * Handle console stop. This is a wrapper for the VT layer 1109 * Handle console stop. This is a wrapper for the VT layer
1104 * so that we can keep kbd knowledge internal 1110 * so that we can keep kbd knowledge internal
1111 *
1112 * FIXME: We eventually need to hold the kbd lock here to protect
1113 * the LED updating. We can't do it yet because fn_hold calls stop_tty
1114 * and start_tty under the kbd_event_lock, while normal tty paths
1115 * don't hold the lock. We probably need to split out an LED lock
1116 * but not during an -rc release!
1105 */ 1117 */
1106void vt_kbd_con_stop(int console) 1118void vt_kbd_con_stop(int console)
1107{ 1119{
1108 struct kbd_struct * kbd = kbd_table + console; 1120 struct kbd_struct * kbd = kbd_table + console;
1109 unsigned long flags; 1121/* unsigned long flags; */
1110 spin_lock_irqsave(&kbd_event_lock, flags); 1122/* spin_lock_irqsave(&kbd_event_lock, flags); */
1111 set_vc_kbd_led(kbd, VC_SCROLLOCK); 1123 set_vc_kbd_led(kbd, VC_SCROLLOCK);
1112 set_leds(); 1124 set_leds();
1113 spin_unlock_irqrestore(&kbd_event_lock, flags); 1125/* spin_unlock_irqrestore(&kbd_event_lock, flags); */
1114} 1126}
1115 1127
1116/* 1128/*
1117 * This is the tasklet that updates LED state on all keyboards 1129 * This is the tasklet that updates LED state on all keyboards
1118 * attached to the box. The reason we use tasklet is that we 1130 * attached to the box. The reason we use tasklet is that we
1119 * need to handle the scenario when keyboard handler is not 1131 * need to handle the scenario when keyboard handler is not
1120 * registered yet but we already getting updates form VT to 1132 * registered yet but we already getting updates from the VT to
1121 * update led state. 1133 * update led state.
1122 */ 1134 */
1123static void kbd_bh(unsigned long dummy) 1135static void kbd_bh(unsigned long dummy)
@@ -2032,7 +2044,7 @@ int vt_do_kdskled(int console, int cmd, unsigned long arg, int perm)
2032 kbd->default_ledflagstate = ((arg >> 4) & 7); 2044 kbd->default_ledflagstate = ((arg >> 4) & 7);
2033 set_leds(); 2045 set_leds();
2034 spin_unlock_irqrestore(&kbd_event_lock, flags); 2046 spin_unlock_irqrestore(&kbd_event_lock, flags);
2035 break; 2047 return 0;
2036 2048
2037 /* the ioctls below only set the lights, not the functions */ 2049 /* the ioctls below only set the lights, not the functions */
2038 /* for those, see KDGKBLED and KDSKBLED above */ 2050 /* for those, see KDGKBLED and KDSKBLED above */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index c6f6560d436c..0bb2b3248dad 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -157,8 +157,9 @@ static void wdm_out_callback(struct urb *urb)
157 spin_lock(&desc->iuspin); 157 spin_lock(&desc->iuspin);
158 desc->werr = urb->status; 158 desc->werr = urb->status;
159 spin_unlock(&desc->iuspin); 159 spin_unlock(&desc->iuspin);
160 clear_bit(WDM_IN_USE, &desc->flags);
161 kfree(desc->outbuf); 160 kfree(desc->outbuf);
161 desc->outbuf = NULL;
162 clear_bit(WDM_IN_USE, &desc->flags);
162 wake_up(&desc->wait); 163 wake_up(&desc->wait);
163} 164}
164 165
@@ -338,7 +339,7 @@ static ssize_t wdm_write
338 if (we < 0) 339 if (we < 0)
339 return -EIO; 340 return -EIO;
340 341
341 desc->outbuf = buf = kmalloc(count, GFP_KERNEL); 342 buf = kmalloc(count, GFP_KERNEL);
342 if (!buf) { 343 if (!buf) {
343 rv = -ENOMEM; 344 rv = -ENOMEM;
344 goto outnl; 345 goto outnl;
@@ -406,10 +407,12 @@ static ssize_t wdm_write
406 req->wIndex = desc->inum; 407 req->wIndex = desc->inum;
407 req->wLength = cpu_to_le16(count); 408 req->wLength = cpu_to_le16(count);
408 set_bit(WDM_IN_USE, &desc->flags); 409 set_bit(WDM_IN_USE, &desc->flags);
410 desc->outbuf = buf;
409 411
410 rv = usb_submit_urb(desc->command, GFP_KERNEL); 412 rv = usb_submit_urb(desc->command, GFP_KERNEL);
411 if (rv < 0) { 413 if (rv < 0) {
412 kfree(buf); 414 kfree(buf);
415 desc->outbuf = NULL;
413 clear_bit(WDM_IN_USE, &desc->flags); 416 clear_bit(WDM_IN_USE, &desc->flags);
414 dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv); 417 dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
415 } else { 418 } else {
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 622b4a48e732..57ed9e400c06 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -493,6 +493,15 @@ static int hcd_pci_suspend_noirq(struct device *dev)
493 493
494 pci_save_state(pci_dev); 494 pci_save_state(pci_dev);
495 495
496 /*
497 * Some systems crash if an EHCI controller is in D3 during
498 * a sleep transition. We have to leave such controllers in D0.
499 */
500 if (hcd->broken_pci_sleep) {
501 dev_dbg(dev, "Staying in PCI D0\n");
502 return retval;
503 }
504
496 /* If the root hub is dead rather than suspended, disallow remote 505 /* If the root hub is dead rather than suspended, disallow remote
497 * wakeup. usb_hc_died() should ensure that both hosts are marked as 506 * wakeup. usb_hc_died() should ensure that both hosts are marked as
498 * dying, so we only need to check the primary roothub. 507 * dying, so we only need to check the primary roothub.
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index a6dfd2164166..170cbe89d9f8 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -927,7 +927,6 @@ static int dummy_udc_stop(struct usb_gadget *g,
927 927
928 dum->driver = NULL; 928 dum->driver = NULL;
929 929
930 dummy_pullup(&dum->gadget, 0);
931 return 0; 930 return 0;
932} 931}
933 932
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index a371e966425f..cb8c162cae5a 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -2189,7 +2189,7 @@ unknown_cmnd:
2189 common->data_size_from_cmnd = 0; 2189 common->data_size_from_cmnd = 0;
2190 sprintf(unknown, "Unknown x%02x", common->cmnd[0]); 2190 sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
2191 reply = check_command(common, common->cmnd_size, 2191 reply = check_command(common, common->cmnd_size,
2192 DATA_DIR_UNKNOWN, 0xff, 0, unknown); 2192 DATA_DIR_UNKNOWN, ~0, 0, unknown);
2193 if (reply == 0) { 2193 if (reply == 0) {
2194 common->curlun->sense_data = SS_INVALID_COMMAND; 2194 common->curlun->sense_data = SS_INVALID_COMMAND;
2195 reply = -EINVAL; 2195 reply = -EINVAL;
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 52343654f5df..d4f823f463e9 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -637,7 +637,7 @@ static void rndis_open(struct gether *geth)
637 637
638 DBG(cdev, "%s\n", __func__); 638 DBG(cdev, "%s\n", __func__);
639 639
640 rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, 640 rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3,
641 bitrate(cdev->gadget) / 100); 641 bitrate(cdev->gadget) / 100);
642 rndis_signal_connect(rndis->config); 642 rndis_signal_connect(rndis->config);
643} 643}
@@ -648,7 +648,7 @@ static void rndis_close(struct gether *geth)
648 648
649 DBG(geth->func.config->cdev, "%s\n", __func__); 649 DBG(geth->func.config->cdev, "%s\n", __func__);
650 650
651 rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, 0); 651 rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0);
652 rndis_signal_disconnect(rndis->config); 652 rndis_signal_disconnect(rndis->config);
653} 653}
654 654
@@ -765,7 +765,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
765 goto fail; 765 goto fail;
766 rndis->config = status; 766 rndis->config = status;
767 767
768 rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, 0); 768 rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0);
769 rndis_set_host_mac(rndis->config, rndis->ethaddr); 769 rndis_set_host_mac(rndis->config, rndis->ethaddr);
770 770
771#if 0 771#if 0
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 4fac56927741..a896d73f7a93 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -2579,7 +2579,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2579 fsg->data_size_from_cmnd = 0; 2579 fsg->data_size_from_cmnd = 0;
2580 sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]); 2580 sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
2581 if ((reply = check_command(fsg, fsg->cmnd_size, 2581 if ((reply = check_command(fsg, fsg->cmnd_size,
2582 DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) { 2582 DATA_DIR_UNKNOWN, ~0, 0, unknown)) == 0) {
2583 fsg->curlun->sense_data = SS_INVALID_COMMAND; 2583 fsg->curlun->sense_data = SS_INVALID_COMMAND;
2584 reply = -EINVAL; 2584 reply = -EINVAL;
2585 } 2585 }
diff --git a/drivers/usb/gadget/ndis.h b/drivers/usb/gadget/ndis.h
index b0e52fc277b4..a19f72dec0cd 100644
--- a/drivers/usb/gadget/ndis.h
+++ b/drivers/usb/gadget/ndis.h
@@ -15,11 +15,6 @@
15#ifndef _LINUX_NDIS_H 15#ifndef _LINUX_NDIS_H
16#define _LINUX_NDIS_H 16#define _LINUX_NDIS_H
17 17
18
19#define NDIS_STATUS_MULTICAST_FULL 0xC0010009
20#define NDIS_STATUS_MULTICAST_EXISTS 0xC001000A
21#define NDIS_STATUS_MULTICAST_NOT_FOUND 0xC001000B
22
23enum NDIS_DEVICE_POWER_STATE { 18enum NDIS_DEVICE_POWER_STATE {
24 NdisDeviceStateUnspecified = 0, 19 NdisDeviceStateUnspecified = 0,
25 NdisDeviceStateD0, 20 NdisDeviceStateD0,
@@ -35,11 +30,6 @@ struct NDIS_PM_WAKE_UP_CAPABILITIES {
35 enum NDIS_DEVICE_POWER_STATE MinLinkChangeWakeUp; 30 enum NDIS_DEVICE_POWER_STATE MinLinkChangeWakeUp;
36}; 31};
37 32
38/* NDIS_PNP_CAPABILITIES.Flags constants */
39#define NDIS_DEVICE_WAKE_UP_ENABLE 0x00000001
40#define NDIS_DEVICE_WAKE_ON_PATTERN_MATCH_ENABLE 0x00000002
41#define NDIS_DEVICE_WAKE_ON_MAGIC_PACKET_ENABLE 0x00000004
42
43struct NDIS_PNP_CAPABILITIES { 33struct NDIS_PNP_CAPABILITIES {
44 __le32 Flags; 34 __le32 Flags;
45 struct NDIS_PM_WAKE_UP_CAPABILITIES WakeUpCapabilities; 35 struct NDIS_PM_WAKE_UP_CAPABILITIES WakeUpCapabilities;
@@ -54,158 +44,4 @@ struct NDIS_PM_PACKET_PATTERN {
54 __le32 PatternFlags; 44 __le32 PatternFlags;
55}; 45};
56 46
57
58/* Required Object IDs (OIDs) */
59#define OID_GEN_SUPPORTED_LIST 0x00010101
60#define OID_GEN_HARDWARE_STATUS 0x00010102
61#define OID_GEN_MEDIA_SUPPORTED 0x00010103
62#define OID_GEN_MEDIA_IN_USE 0x00010104
63#define OID_GEN_MAXIMUM_LOOKAHEAD 0x00010105
64#define OID_GEN_MAXIMUM_FRAME_SIZE 0x00010106
65#define OID_GEN_LINK_SPEED 0x00010107
66#define OID_GEN_TRANSMIT_BUFFER_SPACE 0x00010108
67#define OID_GEN_RECEIVE_BUFFER_SPACE 0x00010109
68#define OID_GEN_TRANSMIT_BLOCK_SIZE 0x0001010A
69#define OID_GEN_RECEIVE_BLOCK_SIZE 0x0001010B
70#define OID_GEN_VENDOR_ID 0x0001010C
71#define OID_GEN_VENDOR_DESCRIPTION 0x0001010D
72#define OID_GEN_CURRENT_PACKET_FILTER 0x0001010E
73#define OID_GEN_CURRENT_LOOKAHEAD 0x0001010F
74#define OID_GEN_DRIVER_VERSION 0x00010110
75#define OID_GEN_MAXIMUM_TOTAL_SIZE 0x00010111
76#define OID_GEN_PROTOCOL_OPTIONS 0x00010112
77#define OID_GEN_MAC_OPTIONS 0x00010113
78#define OID_GEN_MEDIA_CONNECT_STATUS 0x00010114
79#define OID_GEN_MAXIMUM_SEND_PACKETS 0x00010115
80#define OID_GEN_VENDOR_DRIVER_VERSION 0x00010116
81#define OID_GEN_SUPPORTED_GUIDS 0x00010117
82#define OID_GEN_NETWORK_LAYER_ADDRESSES 0x00010118
83#define OID_GEN_TRANSPORT_HEADER_OFFSET 0x00010119
84#define OID_GEN_MACHINE_NAME 0x0001021A
85#define OID_GEN_RNDIS_CONFIG_PARAMETER 0x0001021B
86#define OID_GEN_VLAN_ID 0x0001021C
87
88/* Optional OIDs */
89#define OID_GEN_MEDIA_CAPABILITIES 0x00010201
90#define OID_GEN_PHYSICAL_MEDIUM 0x00010202
91
92/* Required statistics OIDs */
93#define OID_GEN_XMIT_OK 0x00020101
94#define OID_GEN_RCV_OK 0x00020102
95#define OID_GEN_XMIT_ERROR 0x00020103
96#define OID_GEN_RCV_ERROR 0x00020104
97#define OID_GEN_RCV_NO_BUFFER 0x00020105
98
99/* Optional statistics OIDs */
100#define OID_GEN_DIRECTED_BYTES_XMIT 0x00020201
101#define OID_GEN_DIRECTED_FRAMES_XMIT 0x00020202
102#define OID_GEN_MULTICAST_BYTES_XMIT 0x00020203
103#define OID_GEN_MULTICAST_FRAMES_XMIT 0x00020204
104#define OID_GEN_BROADCAST_BYTES_XMIT 0x00020205
105#define OID_GEN_BROADCAST_FRAMES_XMIT 0x00020206
106#define OID_GEN_DIRECTED_BYTES_RCV 0x00020207
107#define OID_GEN_DIRECTED_FRAMES_RCV 0x00020208
108#define OID_GEN_MULTICAST_BYTES_RCV 0x00020209
109#define OID_GEN_MULTICAST_FRAMES_RCV 0x0002020A
110#define OID_GEN_BROADCAST_BYTES_RCV 0x0002020B
111#define OID_GEN_BROADCAST_FRAMES_RCV 0x0002020C
112#define OID_GEN_RCV_CRC_ERROR 0x0002020D
113#define OID_GEN_TRANSMIT_QUEUE_LENGTH 0x0002020E
114#define OID_GEN_GET_TIME_CAPS 0x0002020F
115#define OID_GEN_GET_NETCARD_TIME 0x00020210
116#define OID_GEN_NETCARD_LOAD 0x00020211
117#define OID_GEN_DEVICE_PROFILE 0x00020212
118#define OID_GEN_INIT_TIME_MS 0x00020213
119#define OID_GEN_RESET_COUNTS 0x00020214
120#define OID_GEN_MEDIA_SENSE_COUNTS 0x00020215
121#define OID_GEN_FRIENDLY_NAME 0x00020216
122#define OID_GEN_MINIPORT_INFO 0x00020217
123#define OID_GEN_RESET_VERIFY_PARAMETERS 0x00020218
124
125/* IEEE 802.3 (Ethernet) OIDs */
126#define NDIS_802_3_MAC_OPTION_PRIORITY 0x00000001
127
128#define OID_802_3_PERMANENT_ADDRESS 0x01010101
129#define OID_802_3_CURRENT_ADDRESS 0x01010102
130#define OID_802_3_MULTICAST_LIST 0x01010103
131#define OID_802_3_MAXIMUM_LIST_SIZE 0x01010104
132#define OID_802_3_MAC_OPTIONS 0x01010105
133#define OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101
134#define OID_802_3_XMIT_ONE_COLLISION 0x01020102
135#define OID_802_3_XMIT_MORE_COLLISIONS 0x01020103
136#define OID_802_3_XMIT_DEFERRED 0x01020201
137#define OID_802_3_XMIT_MAX_COLLISIONS 0x01020202
138#define OID_802_3_RCV_OVERRUN 0x01020203
139#define OID_802_3_XMIT_UNDERRUN 0x01020204
140#define OID_802_3_XMIT_HEARTBEAT_FAILURE 0x01020205
141#define OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206
142#define OID_802_3_XMIT_LATE_COLLISIONS 0x01020207
143
144/* OID_GEN_MINIPORT_INFO constants */
145#define NDIS_MINIPORT_BUS_MASTER 0x00000001
146#define NDIS_MINIPORT_WDM_DRIVER 0x00000002
147#define NDIS_MINIPORT_SG_LIST 0x00000004
148#define NDIS_MINIPORT_SUPPORTS_MEDIA_QUERY 0x00000008
149#define NDIS_MINIPORT_INDICATES_PACKETS 0x00000010
150#define NDIS_MINIPORT_IGNORE_PACKET_QUEUE 0x00000020
151#define NDIS_MINIPORT_IGNORE_REQUEST_QUEUE 0x00000040
152#define NDIS_MINIPORT_IGNORE_TOKEN_RING_ERRORS 0x00000080
153#define NDIS_MINIPORT_INTERMEDIATE_DRIVER 0x00000100
154#define NDIS_MINIPORT_IS_NDIS_5 0x00000200
155#define NDIS_MINIPORT_IS_CO 0x00000400
156#define NDIS_MINIPORT_DESERIALIZE 0x00000800
157#define NDIS_MINIPORT_REQUIRES_MEDIA_POLLING 0x00001000
158#define NDIS_MINIPORT_SUPPORTS_MEDIA_SENSE 0x00002000
159#define NDIS_MINIPORT_NETBOOT_CARD 0x00004000
160#define NDIS_MINIPORT_PM_SUPPORTED 0x00008000
161#define NDIS_MINIPORT_SUPPORTS_MAC_ADDRESS_OVERWRITE 0x00010000
162#define NDIS_MINIPORT_USES_SAFE_BUFFER_APIS 0x00020000
163#define NDIS_MINIPORT_HIDDEN 0x00040000
164#define NDIS_MINIPORT_SWENUM 0x00080000
165#define NDIS_MINIPORT_SURPRISE_REMOVE_OK 0x00100000
166#define NDIS_MINIPORT_NO_HALT_ON_SUSPEND 0x00200000
167#define NDIS_MINIPORT_HARDWARE_DEVICE 0x00400000
168#define NDIS_MINIPORT_SUPPORTS_CANCEL_SEND_PACKETS 0x00800000
169#define NDIS_MINIPORT_64BITS_DMA 0x01000000
170
171#define NDIS_MEDIUM_802_3 0x00000000
172#define NDIS_MEDIUM_802_5 0x00000001
173#define NDIS_MEDIUM_FDDI 0x00000002
174#define NDIS_MEDIUM_WAN 0x00000003
175#define NDIS_MEDIUM_LOCAL_TALK 0x00000004
176#define NDIS_MEDIUM_DIX 0x00000005
177#define NDIS_MEDIUM_ARCENT_RAW 0x00000006
178#define NDIS_MEDIUM_ARCENT_878_2 0x00000007
179#define NDIS_MEDIUM_ATM 0x00000008
180#define NDIS_MEDIUM_WIRELESS_LAN 0x00000009
181#define NDIS_MEDIUM_IRDA 0x0000000A
182#define NDIS_MEDIUM_BPC 0x0000000B
183#define NDIS_MEDIUM_CO_WAN 0x0000000C
184#define NDIS_MEDIUM_1394 0x0000000D
185
186#define NDIS_PACKET_TYPE_DIRECTED 0x00000001
187#define NDIS_PACKET_TYPE_MULTICAST 0x00000002
188#define NDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004
189#define NDIS_PACKET_TYPE_BROADCAST 0x00000008
190#define NDIS_PACKET_TYPE_SOURCE_ROUTING 0x00000010
191#define NDIS_PACKET_TYPE_PROMISCUOUS 0x00000020
192#define NDIS_PACKET_TYPE_SMT 0x00000040
193#define NDIS_PACKET_TYPE_ALL_LOCAL 0x00000080
194#define NDIS_PACKET_TYPE_GROUP 0x00000100
195#define NDIS_PACKET_TYPE_ALL_FUNCTIONAL 0x00000200
196#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00000400
197#define NDIS_PACKET_TYPE_MAC_FRAME 0x00000800
198
199#define NDIS_MEDIA_STATE_CONNECTED 0x00000000
200#define NDIS_MEDIA_STATE_DISCONNECTED 0x00000001
201
202#define NDIS_MAC_OPTION_COPY_LOOKAHEAD_DATA 0x00000001
203#define NDIS_MAC_OPTION_RECEIVE_SERIALIZED 0x00000002
204#define NDIS_MAC_OPTION_TRANSFERS_NOT_PEND 0x00000004
205#define NDIS_MAC_OPTION_NO_LOOPBACK 0x00000008
206#define NDIS_MAC_OPTION_FULL_DUPLEX 0x00000010
207#define NDIS_MAC_OPTION_EOTX_INDICATION 0x00000020
208#define NDIS_MAC_OPTION_8021P_PRIORITY 0x00000040
209#define NDIS_MAC_OPTION_RESERVED 0x80000000
210
211#endif /* _LINUX_NDIS_H */ 47#endif /* _LINUX_NDIS_H */
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 73a934a170d1..b35babed6fcb 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -73,65 +73,65 @@ static rndis_resp_t *rndis_add_response(int configNr, u32 length);
73static const u32 oid_supported_list[] = 73static const u32 oid_supported_list[] =
74{ 74{
75 /* the general stuff */ 75 /* the general stuff */
76 OID_GEN_SUPPORTED_LIST, 76 RNDIS_OID_GEN_SUPPORTED_LIST,
77 OID_GEN_HARDWARE_STATUS, 77 RNDIS_OID_GEN_HARDWARE_STATUS,
78 OID_GEN_MEDIA_SUPPORTED, 78 RNDIS_OID_GEN_MEDIA_SUPPORTED,
79 OID_GEN_MEDIA_IN_USE, 79 RNDIS_OID_GEN_MEDIA_IN_USE,
80 OID_GEN_MAXIMUM_FRAME_SIZE, 80 RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
81 OID_GEN_LINK_SPEED, 81 RNDIS_OID_GEN_LINK_SPEED,
82 OID_GEN_TRANSMIT_BLOCK_SIZE, 82 RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE,
83 OID_GEN_RECEIVE_BLOCK_SIZE, 83 RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE,
84 OID_GEN_VENDOR_ID, 84 RNDIS_OID_GEN_VENDOR_ID,
85 OID_GEN_VENDOR_DESCRIPTION, 85 RNDIS_OID_GEN_VENDOR_DESCRIPTION,
86 OID_GEN_VENDOR_DRIVER_VERSION, 86 RNDIS_OID_GEN_VENDOR_DRIVER_VERSION,
87 OID_GEN_CURRENT_PACKET_FILTER, 87 RNDIS_OID_GEN_CURRENT_PACKET_FILTER,
88 OID_GEN_MAXIMUM_TOTAL_SIZE, 88 RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE,
89 OID_GEN_MEDIA_CONNECT_STATUS, 89 RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
90 OID_GEN_PHYSICAL_MEDIUM, 90 RNDIS_OID_GEN_PHYSICAL_MEDIUM,
91 91
92 /* the statistical stuff */ 92 /* the statistical stuff */
93 OID_GEN_XMIT_OK, 93 RNDIS_OID_GEN_XMIT_OK,
94 OID_GEN_RCV_OK, 94 RNDIS_OID_GEN_RCV_OK,
95 OID_GEN_XMIT_ERROR, 95 RNDIS_OID_GEN_XMIT_ERROR,
96 OID_GEN_RCV_ERROR, 96 RNDIS_OID_GEN_RCV_ERROR,
97 OID_GEN_RCV_NO_BUFFER, 97 RNDIS_OID_GEN_RCV_NO_BUFFER,
98#ifdef RNDIS_OPTIONAL_STATS 98#ifdef RNDIS_OPTIONAL_STATS
99 OID_GEN_DIRECTED_BYTES_XMIT, 99 RNDIS_OID_GEN_DIRECTED_BYTES_XMIT,
100 OID_GEN_DIRECTED_FRAMES_XMIT, 100 RNDIS_OID_GEN_DIRECTED_FRAMES_XMIT,
101 OID_GEN_MULTICAST_BYTES_XMIT, 101 RNDIS_OID_GEN_MULTICAST_BYTES_XMIT,
102 OID_GEN_MULTICAST_FRAMES_XMIT, 102 RNDIS_OID_GEN_MULTICAST_FRAMES_XMIT,
103 OID_GEN_BROADCAST_BYTES_XMIT, 103 RNDIS_OID_GEN_BROADCAST_BYTES_XMIT,
104 OID_GEN_BROADCAST_FRAMES_XMIT, 104 RNDIS_OID_GEN_BROADCAST_FRAMES_XMIT,
105 OID_GEN_DIRECTED_BYTES_RCV, 105 RNDIS_OID_GEN_DIRECTED_BYTES_RCV,
106 OID_GEN_DIRECTED_FRAMES_RCV, 106 RNDIS_OID_GEN_DIRECTED_FRAMES_RCV,
107 OID_GEN_MULTICAST_BYTES_RCV, 107 RNDIS_OID_GEN_MULTICAST_BYTES_RCV,
108 OID_GEN_MULTICAST_FRAMES_RCV, 108 RNDIS_OID_GEN_MULTICAST_FRAMES_RCV,
109 OID_GEN_BROADCAST_BYTES_RCV, 109 RNDIS_OID_GEN_BROADCAST_BYTES_RCV,
110 OID_GEN_BROADCAST_FRAMES_RCV, 110 RNDIS_OID_GEN_BROADCAST_FRAMES_RCV,
111 OID_GEN_RCV_CRC_ERROR, 111 RNDIS_OID_GEN_RCV_CRC_ERROR,
112 OID_GEN_TRANSMIT_QUEUE_LENGTH, 112 RNDIS_OID_GEN_TRANSMIT_QUEUE_LENGTH,
113#endif /* RNDIS_OPTIONAL_STATS */ 113#endif /* RNDIS_OPTIONAL_STATS */
114 114
115 /* mandatory 802.3 */ 115 /* mandatory 802.3 */
116 /* the general stuff */ 116 /* the general stuff */
117 OID_802_3_PERMANENT_ADDRESS, 117 RNDIS_OID_802_3_PERMANENT_ADDRESS,
118 OID_802_3_CURRENT_ADDRESS, 118 RNDIS_OID_802_3_CURRENT_ADDRESS,
119 OID_802_3_MULTICAST_LIST, 119 RNDIS_OID_802_3_MULTICAST_LIST,
120 OID_802_3_MAC_OPTIONS, 120 RNDIS_OID_802_3_MAC_OPTIONS,
121 OID_802_3_MAXIMUM_LIST_SIZE, 121 RNDIS_OID_802_3_MAXIMUM_LIST_SIZE,
122 122
123 /* the statistical stuff */ 123 /* the statistical stuff */
124 OID_802_3_RCV_ERROR_ALIGNMENT, 124 RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT,
125 OID_802_3_XMIT_ONE_COLLISION, 125 RNDIS_OID_802_3_XMIT_ONE_COLLISION,
126 OID_802_3_XMIT_MORE_COLLISIONS, 126 RNDIS_OID_802_3_XMIT_MORE_COLLISIONS,
127#ifdef RNDIS_OPTIONAL_STATS 127#ifdef RNDIS_OPTIONAL_STATS
128 OID_802_3_XMIT_DEFERRED, 128 RNDIS_OID_802_3_XMIT_DEFERRED,
129 OID_802_3_XMIT_MAX_COLLISIONS, 129 RNDIS_OID_802_3_XMIT_MAX_COLLISIONS,
130 OID_802_3_RCV_OVERRUN, 130 RNDIS_OID_802_3_RCV_OVERRUN,
131 OID_802_3_XMIT_UNDERRUN, 131 RNDIS_OID_802_3_XMIT_UNDERRUN,
132 OID_802_3_XMIT_HEARTBEAT_FAILURE, 132 RNDIS_OID_802_3_XMIT_HEARTBEAT_FAILURE,
133 OID_802_3_XMIT_TIMES_CRS_LOST, 133 RNDIS_OID_802_3_XMIT_TIMES_CRS_LOST,
134 OID_802_3_XMIT_LATE_COLLISIONS, 134 RNDIS_OID_802_3_XMIT_LATE_COLLISIONS,
135#endif /* RNDIS_OPTIONAL_STATS */ 135#endif /* RNDIS_OPTIONAL_STATS */
136 136
137#ifdef RNDIS_PM 137#ifdef RNDIS_PM
@@ -200,8 +200,8 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
200 /* general oids (table 4-1) */ 200 /* general oids (table 4-1) */
201 201
202 /* mandatory */ 202 /* mandatory */
203 case OID_GEN_SUPPORTED_LIST: 203 case RNDIS_OID_GEN_SUPPORTED_LIST:
204 pr_debug("%s: OID_GEN_SUPPORTED_LIST\n", __func__); 204 pr_debug("%s: RNDIS_OID_GEN_SUPPORTED_LIST\n", __func__);
205 length = sizeof(oid_supported_list); 205 length = sizeof(oid_supported_list);
206 count = length / sizeof(u32); 206 count = length / sizeof(u32);
207 for (i = 0; i < count; i++) 207 for (i = 0; i < count; i++)
@@ -210,8 +210,8 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
210 break; 210 break;
211 211
212 /* mandatory */ 212 /* mandatory */
213 case OID_GEN_HARDWARE_STATUS: 213 case RNDIS_OID_GEN_HARDWARE_STATUS:
214 pr_debug("%s: OID_GEN_HARDWARE_STATUS\n", __func__); 214 pr_debug("%s: RNDIS_OID_GEN_HARDWARE_STATUS\n", __func__);
215 /* Bogus question! 215 /* Bogus question!
216 * Hardware must be ready to receive high level protocols. 216 * Hardware must be ready to receive high level protocols.
217 * BTW: 217 * BTW:
@@ -223,23 +223,23 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
223 break; 223 break;
224 224
225 /* mandatory */ 225 /* mandatory */
226 case OID_GEN_MEDIA_SUPPORTED: 226 case RNDIS_OID_GEN_MEDIA_SUPPORTED:
227 pr_debug("%s: OID_GEN_MEDIA_SUPPORTED\n", __func__); 227 pr_debug("%s: RNDIS_OID_GEN_MEDIA_SUPPORTED\n", __func__);
228 *outbuf = cpu_to_le32(rndis_per_dev_params[configNr].medium); 228 *outbuf = cpu_to_le32(rndis_per_dev_params[configNr].medium);
229 retval = 0; 229 retval = 0;
230 break; 230 break;
231 231
232 /* mandatory */ 232 /* mandatory */
233 case OID_GEN_MEDIA_IN_USE: 233 case RNDIS_OID_GEN_MEDIA_IN_USE:
234 pr_debug("%s: OID_GEN_MEDIA_IN_USE\n", __func__); 234 pr_debug("%s: RNDIS_OID_GEN_MEDIA_IN_USE\n", __func__);
235 /* one medium, one transport... (maybe you do it better) */ 235 /* one medium, one transport... (maybe you do it better) */
236 *outbuf = cpu_to_le32(rndis_per_dev_params[configNr].medium); 236 *outbuf = cpu_to_le32(rndis_per_dev_params[configNr].medium);
237 retval = 0; 237 retval = 0;
238 break; 238 break;
239 239
240 /* mandatory */ 240 /* mandatory */
241 case OID_GEN_MAXIMUM_FRAME_SIZE: 241 case RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE:
242 pr_debug("%s: OID_GEN_MAXIMUM_FRAME_SIZE\n", __func__); 242 pr_debug("%s: RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE\n", __func__);
243 if (rndis_per_dev_params[configNr].dev) { 243 if (rndis_per_dev_params[configNr].dev) {
244 *outbuf = cpu_to_le32( 244 *outbuf = cpu_to_le32(
245 rndis_per_dev_params[configNr].dev->mtu); 245 rndis_per_dev_params[configNr].dev->mtu);
@@ -248,11 +248,11 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
248 break; 248 break;
249 249
250 /* mandatory */ 250 /* mandatory */
251 case OID_GEN_LINK_SPEED: 251 case RNDIS_OID_GEN_LINK_SPEED:
252 if (rndis_debug > 1) 252 if (rndis_debug > 1)
253 pr_debug("%s: OID_GEN_LINK_SPEED\n", __func__); 253 pr_debug("%s: RNDIS_OID_GEN_LINK_SPEED\n", __func__);
254 if (rndis_per_dev_params[configNr].media_state 254 if (rndis_per_dev_params[configNr].media_state
255 == NDIS_MEDIA_STATE_DISCONNECTED) 255 == RNDIS_MEDIA_STATE_DISCONNECTED)
256 *outbuf = cpu_to_le32(0); 256 *outbuf = cpu_to_le32(0);
257 else 257 else
258 *outbuf = cpu_to_le32( 258 *outbuf = cpu_to_le32(
@@ -261,8 +261,8 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
261 break; 261 break;
262 262
263 /* mandatory */ 263 /* mandatory */
264 case OID_GEN_TRANSMIT_BLOCK_SIZE: 264 case RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE:
265 pr_debug("%s: OID_GEN_TRANSMIT_BLOCK_SIZE\n", __func__); 265 pr_debug("%s: RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE\n", __func__);
266 if (rndis_per_dev_params[configNr].dev) { 266 if (rndis_per_dev_params[configNr].dev) {
267 *outbuf = cpu_to_le32( 267 *outbuf = cpu_to_le32(
268 rndis_per_dev_params[configNr].dev->mtu); 268 rndis_per_dev_params[configNr].dev->mtu);
@@ -271,8 +271,8 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
271 break; 271 break;
272 272
273 /* mandatory */ 273 /* mandatory */
274 case OID_GEN_RECEIVE_BLOCK_SIZE: 274 case RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE:
275 pr_debug("%s: OID_GEN_RECEIVE_BLOCK_SIZE\n", __func__); 275 pr_debug("%s: RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE\n", __func__);
276 if (rndis_per_dev_params[configNr].dev) { 276 if (rndis_per_dev_params[configNr].dev) {
277 *outbuf = cpu_to_le32( 277 *outbuf = cpu_to_le32(
278 rndis_per_dev_params[configNr].dev->mtu); 278 rndis_per_dev_params[configNr].dev->mtu);
@@ -281,16 +281,16 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
281 break; 281 break;
282 282
283 /* mandatory */ 283 /* mandatory */
284 case OID_GEN_VENDOR_ID: 284 case RNDIS_OID_GEN_VENDOR_ID:
285 pr_debug("%s: OID_GEN_VENDOR_ID\n", __func__); 285 pr_debug("%s: RNDIS_OID_GEN_VENDOR_ID\n", __func__);
286 *outbuf = cpu_to_le32( 286 *outbuf = cpu_to_le32(
287 rndis_per_dev_params[configNr].vendorID); 287 rndis_per_dev_params[configNr].vendorID);
288 retval = 0; 288 retval = 0;
289 break; 289 break;
290 290
291 /* mandatory */ 291 /* mandatory */
292 case OID_GEN_VENDOR_DESCRIPTION: 292 case RNDIS_OID_GEN_VENDOR_DESCRIPTION:
293 pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__); 293 pr_debug("%s: RNDIS_OID_GEN_VENDOR_DESCRIPTION\n", __func__);
294 if (rndis_per_dev_params[configNr].vendorDescr) { 294 if (rndis_per_dev_params[configNr].vendorDescr) {
295 length = strlen(rndis_per_dev_params[configNr]. 295 length = strlen(rndis_per_dev_params[configNr].
296 vendorDescr); 296 vendorDescr);
@@ -303,38 +303,38 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
303 retval = 0; 303 retval = 0;
304 break; 304 break;
305 305
306 case OID_GEN_VENDOR_DRIVER_VERSION: 306 case RNDIS_OID_GEN_VENDOR_DRIVER_VERSION:
307 pr_debug("%s: OID_GEN_VENDOR_DRIVER_VERSION\n", __func__); 307 pr_debug("%s: RNDIS_OID_GEN_VENDOR_DRIVER_VERSION\n", __func__);
308 /* Created as LE */ 308 /* Created as LE */
309 *outbuf = rndis_driver_version; 309 *outbuf = rndis_driver_version;
310 retval = 0; 310 retval = 0;
311 break; 311 break;
312 312
313 /* mandatory */ 313 /* mandatory */
314 case OID_GEN_CURRENT_PACKET_FILTER: 314 case RNDIS_OID_GEN_CURRENT_PACKET_FILTER:
315 pr_debug("%s: OID_GEN_CURRENT_PACKET_FILTER\n", __func__); 315 pr_debug("%s: RNDIS_OID_GEN_CURRENT_PACKET_FILTER\n", __func__);
316 *outbuf = cpu_to_le32(*rndis_per_dev_params[configNr].filter); 316 *outbuf = cpu_to_le32(*rndis_per_dev_params[configNr].filter);
317 retval = 0; 317 retval = 0;
318 break; 318 break;
319 319
320 /* mandatory */ 320 /* mandatory */
321 case OID_GEN_MAXIMUM_TOTAL_SIZE: 321 case RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE:
322 pr_debug("%s: OID_GEN_MAXIMUM_TOTAL_SIZE\n", __func__); 322 pr_debug("%s: RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE\n", __func__);
323 *outbuf = cpu_to_le32(RNDIS_MAX_TOTAL_SIZE); 323 *outbuf = cpu_to_le32(RNDIS_MAX_TOTAL_SIZE);
324 retval = 0; 324 retval = 0;
325 break; 325 break;
326 326
327 /* mandatory */ 327 /* mandatory */
328 case OID_GEN_MEDIA_CONNECT_STATUS: 328 case RNDIS_OID_GEN_MEDIA_CONNECT_STATUS:
329 if (rndis_debug > 1) 329 if (rndis_debug > 1)
330 pr_debug("%s: OID_GEN_MEDIA_CONNECT_STATUS\n", __func__); 330 pr_debug("%s: RNDIS_OID_GEN_MEDIA_CONNECT_STATUS\n", __func__);
331 *outbuf = cpu_to_le32(rndis_per_dev_params[configNr] 331 *outbuf = cpu_to_le32(rndis_per_dev_params[configNr]
332 .media_state); 332 .media_state);
333 retval = 0; 333 retval = 0;
334 break; 334 break;
335 335
336 case OID_GEN_PHYSICAL_MEDIUM: 336 case RNDIS_OID_GEN_PHYSICAL_MEDIUM:
337 pr_debug("%s: OID_GEN_PHYSICAL_MEDIUM\n", __func__); 337 pr_debug("%s: RNDIS_OID_GEN_PHYSICAL_MEDIUM\n", __func__);
338 *outbuf = cpu_to_le32(0); 338 *outbuf = cpu_to_le32(0);
339 retval = 0; 339 retval = 0;
340 break; 340 break;
@@ -343,20 +343,20 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
343 * of MS-Windows expect OIDs that aren't specified there. Other 343 * of MS-Windows expect OIDs that aren't specified there. Other
344 * versions emit undefined RNDIS messages. DOCUMENT ALL THESE! 344 * versions emit undefined RNDIS messages. DOCUMENT ALL THESE!
345 */ 345 */
346 case OID_GEN_MAC_OPTIONS: /* from WinME */ 346 case RNDIS_OID_GEN_MAC_OPTIONS: /* from WinME */
347 pr_debug("%s: OID_GEN_MAC_OPTIONS\n", __func__); 347 pr_debug("%s: RNDIS_OID_GEN_MAC_OPTIONS\n", __func__);
348 *outbuf = cpu_to_le32( 348 *outbuf = cpu_to_le32(
349 NDIS_MAC_OPTION_RECEIVE_SERIALIZED 349 RNDIS_MAC_OPTION_RECEIVE_SERIALIZED
350 | NDIS_MAC_OPTION_FULL_DUPLEX); 350 | RNDIS_MAC_OPTION_FULL_DUPLEX);
351 retval = 0; 351 retval = 0;
352 break; 352 break;
353 353
354 /* statistics OIDs (table 4-2) */ 354 /* statistics OIDs (table 4-2) */
355 355
356 /* mandatory */ 356 /* mandatory */
357 case OID_GEN_XMIT_OK: 357 case RNDIS_OID_GEN_XMIT_OK:
358 if (rndis_debug > 1) 358 if (rndis_debug > 1)
359 pr_debug("%s: OID_GEN_XMIT_OK\n", __func__); 359 pr_debug("%s: RNDIS_OID_GEN_XMIT_OK\n", __func__);
360 if (stats) { 360 if (stats) {
361 *outbuf = cpu_to_le32(stats->tx_packets 361 *outbuf = cpu_to_le32(stats->tx_packets
362 - stats->tx_errors - stats->tx_dropped); 362 - stats->tx_errors - stats->tx_dropped);
@@ -365,9 +365,9 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
365 break; 365 break;
366 366
367 /* mandatory */ 367 /* mandatory */
368 case OID_GEN_RCV_OK: 368 case RNDIS_OID_GEN_RCV_OK:
369 if (rndis_debug > 1) 369 if (rndis_debug > 1)
370 pr_debug("%s: OID_GEN_RCV_OK\n", __func__); 370 pr_debug("%s: RNDIS_OID_GEN_RCV_OK\n", __func__);
371 if (stats) { 371 if (stats) {
372 *outbuf = cpu_to_le32(stats->rx_packets 372 *outbuf = cpu_to_le32(stats->rx_packets
373 - stats->rx_errors - stats->rx_dropped); 373 - stats->rx_errors - stats->rx_dropped);
@@ -376,9 +376,9 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
376 break; 376 break;
377 377
378 /* mandatory */ 378 /* mandatory */
379 case OID_GEN_XMIT_ERROR: 379 case RNDIS_OID_GEN_XMIT_ERROR:
380 if (rndis_debug > 1) 380 if (rndis_debug > 1)
381 pr_debug("%s: OID_GEN_XMIT_ERROR\n", __func__); 381 pr_debug("%s: RNDIS_OID_GEN_XMIT_ERROR\n", __func__);
382 if (stats) { 382 if (stats) {
383 *outbuf = cpu_to_le32(stats->tx_errors); 383 *outbuf = cpu_to_le32(stats->tx_errors);
384 retval = 0; 384 retval = 0;
@@ -386,9 +386,9 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
386 break; 386 break;
387 387
388 /* mandatory */ 388 /* mandatory */
389 case OID_GEN_RCV_ERROR: 389 case RNDIS_OID_GEN_RCV_ERROR:
390 if (rndis_debug > 1) 390 if (rndis_debug > 1)
391 pr_debug("%s: OID_GEN_RCV_ERROR\n", __func__); 391 pr_debug("%s: RNDIS_OID_GEN_RCV_ERROR\n", __func__);
392 if (stats) { 392 if (stats) {
393 *outbuf = cpu_to_le32(stats->rx_errors); 393 *outbuf = cpu_to_le32(stats->rx_errors);
394 retval = 0; 394 retval = 0;
@@ -396,8 +396,8 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
396 break; 396 break;
397 397
398 /* mandatory */ 398 /* mandatory */
399 case OID_GEN_RCV_NO_BUFFER: 399 case RNDIS_OID_GEN_RCV_NO_BUFFER:
400 pr_debug("%s: OID_GEN_RCV_NO_BUFFER\n", __func__); 400 pr_debug("%s: RNDIS_OID_GEN_RCV_NO_BUFFER\n", __func__);
401 if (stats) { 401 if (stats) {
402 *outbuf = cpu_to_le32(stats->rx_dropped); 402 *outbuf = cpu_to_le32(stats->rx_dropped);
403 retval = 0; 403 retval = 0;
@@ -407,8 +407,8 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
407 /* ieee802.3 OIDs (table 4-3) */ 407 /* ieee802.3 OIDs (table 4-3) */
408 408
409 /* mandatory */ 409 /* mandatory */
410 case OID_802_3_PERMANENT_ADDRESS: 410 case RNDIS_OID_802_3_PERMANENT_ADDRESS:
411 pr_debug("%s: OID_802_3_PERMANENT_ADDRESS\n", __func__); 411 pr_debug("%s: RNDIS_OID_802_3_PERMANENT_ADDRESS\n", __func__);
412 if (rndis_per_dev_params[configNr].dev) { 412 if (rndis_per_dev_params[configNr].dev) {
413 length = ETH_ALEN; 413 length = ETH_ALEN;
414 memcpy(outbuf, 414 memcpy(outbuf,
@@ -419,8 +419,8 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
419 break; 419 break;
420 420
421 /* mandatory */ 421 /* mandatory */
422 case OID_802_3_CURRENT_ADDRESS: 422 case RNDIS_OID_802_3_CURRENT_ADDRESS:
423 pr_debug("%s: OID_802_3_CURRENT_ADDRESS\n", __func__); 423 pr_debug("%s: RNDIS_OID_802_3_CURRENT_ADDRESS\n", __func__);
424 if (rndis_per_dev_params[configNr].dev) { 424 if (rndis_per_dev_params[configNr].dev) {
425 length = ETH_ALEN; 425 length = ETH_ALEN;
426 memcpy(outbuf, 426 memcpy(outbuf,
@@ -431,23 +431,23 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
431 break; 431 break;
432 432
433 /* mandatory */ 433 /* mandatory */
434 case OID_802_3_MULTICAST_LIST: 434 case RNDIS_OID_802_3_MULTICAST_LIST:
435 pr_debug("%s: OID_802_3_MULTICAST_LIST\n", __func__); 435 pr_debug("%s: RNDIS_OID_802_3_MULTICAST_LIST\n", __func__);
436 /* Multicast base address only */ 436 /* Multicast base address only */
437 *outbuf = cpu_to_le32(0xE0000000); 437 *outbuf = cpu_to_le32(0xE0000000);
438 retval = 0; 438 retval = 0;
439 break; 439 break;
440 440
441 /* mandatory */ 441 /* mandatory */
442 case OID_802_3_MAXIMUM_LIST_SIZE: 442 case RNDIS_OID_802_3_MAXIMUM_LIST_SIZE:
443 pr_debug("%s: OID_802_3_MAXIMUM_LIST_SIZE\n", __func__); 443 pr_debug("%s: RNDIS_OID_802_3_MAXIMUM_LIST_SIZE\n", __func__);
444 /* Multicast base address only */ 444 /* Multicast base address only */
445 *outbuf = cpu_to_le32(1); 445 *outbuf = cpu_to_le32(1);
446 retval = 0; 446 retval = 0;
447 break; 447 break;
448 448
449 case OID_802_3_MAC_OPTIONS: 449 case RNDIS_OID_802_3_MAC_OPTIONS:
450 pr_debug("%s: OID_802_3_MAC_OPTIONS\n", __func__); 450 pr_debug("%s: RNDIS_OID_802_3_MAC_OPTIONS\n", __func__);
451 *outbuf = cpu_to_le32(0); 451 *outbuf = cpu_to_le32(0);
452 retval = 0; 452 retval = 0;
453 break; 453 break;
@@ -455,8 +455,8 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
455 /* ieee802.3 statistics OIDs (table 4-4) */ 455 /* ieee802.3 statistics OIDs (table 4-4) */
456 456
457 /* mandatory */ 457 /* mandatory */
458 case OID_802_3_RCV_ERROR_ALIGNMENT: 458 case RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT:
459 pr_debug("%s: OID_802_3_RCV_ERROR_ALIGNMENT\n", __func__); 459 pr_debug("%s: RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT\n", __func__);
460 if (stats) { 460 if (stats) {
461 *outbuf = cpu_to_le32(stats->rx_frame_errors); 461 *outbuf = cpu_to_le32(stats->rx_frame_errors);
462 retval = 0; 462 retval = 0;
@@ -464,15 +464,15 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
464 break; 464 break;
465 465
466 /* mandatory */ 466 /* mandatory */
467 case OID_802_3_XMIT_ONE_COLLISION: 467 case RNDIS_OID_802_3_XMIT_ONE_COLLISION:
468 pr_debug("%s: OID_802_3_XMIT_ONE_COLLISION\n", __func__); 468 pr_debug("%s: RNDIS_OID_802_3_XMIT_ONE_COLLISION\n", __func__);
469 *outbuf = cpu_to_le32(0); 469 *outbuf = cpu_to_le32(0);
470 retval = 0; 470 retval = 0;
471 break; 471 break;
472 472
473 /* mandatory */ 473 /* mandatory */
474 case OID_802_3_XMIT_MORE_COLLISIONS: 474 case RNDIS_OID_802_3_XMIT_MORE_COLLISIONS:
475 pr_debug("%s: OID_802_3_XMIT_MORE_COLLISIONS\n", __func__); 475 pr_debug("%s: RNDIS_OID_802_3_XMIT_MORE_COLLISIONS\n", __func__);
476 *outbuf = cpu_to_le32(0); 476 *outbuf = cpu_to_le32(0);
477 retval = 0; 477 retval = 0;
478 break; 478 break;
@@ -516,7 +516,7 @@ static int gen_ndis_set_resp(u8 configNr, u32 OID, u8 *buf, u32 buf_len,
516 516
517 params = &rndis_per_dev_params[configNr]; 517 params = &rndis_per_dev_params[configNr];
518 switch (OID) { 518 switch (OID) {
519 case OID_GEN_CURRENT_PACKET_FILTER: 519 case RNDIS_OID_GEN_CURRENT_PACKET_FILTER:
520 520
521 /* these NDIS_PACKET_TYPE_* bitflags are shared with 521 /* these NDIS_PACKET_TYPE_* bitflags are shared with
522 * cdc_filter; it's not RNDIS-specific 522 * cdc_filter; it's not RNDIS-specific
@@ -525,7 +525,7 @@ static int gen_ndis_set_resp(u8 configNr, u32 OID, u8 *buf, u32 buf_len,
525 * MULTICAST, ALL_MULTICAST, BROADCAST 525 * MULTICAST, ALL_MULTICAST, BROADCAST
526 */ 526 */
527 *params->filter = (u16)get_unaligned_le32(buf); 527 *params->filter = (u16)get_unaligned_le32(buf);
528 pr_debug("%s: OID_GEN_CURRENT_PACKET_FILTER %08x\n", 528 pr_debug("%s: RNDIS_OID_GEN_CURRENT_PACKET_FILTER %08x\n",
529 __func__, *params->filter); 529 __func__, *params->filter);
530 530
531 /* this call has a significant side effect: it's 531 /* this call has a significant side effect: it's
@@ -545,9 +545,9 @@ static int gen_ndis_set_resp(u8 configNr, u32 OID, u8 *buf, u32 buf_len,
545 } 545 }
546 break; 546 break;
547 547
548 case OID_802_3_MULTICAST_LIST: 548 case RNDIS_OID_802_3_MULTICAST_LIST:
549 /* I think we can ignore this */ 549 /* I think we can ignore this */
550 pr_debug("%s: OID_802_3_MULTICAST_LIST\n", __func__); 550 pr_debug("%s: RNDIS_OID_802_3_MULTICAST_LIST\n", __func__);
551 retval = 0; 551 retval = 0;
552 break; 552 break;
553 553
@@ -577,7 +577,7 @@ static int rndis_init_response(int configNr, rndis_init_msg_type *buf)
577 return -ENOMEM; 577 return -ENOMEM;
578 resp = (rndis_init_cmplt_type *)r->buf; 578 resp = (rndis_init_cmplt_type *)r->buf;
579 579
580 resp->MessageType = cpu_to_le32(REMOTE_NDIS_INITIALIZE_CMPLT); 580 resp->MessageType = cpu_to_le32(RNDIS_MSG_INIT_C);
581 resp->MessageLength = cpu_to_le32(52); 581 resp->MessageLength = cpu_to_le32(52);
582 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ 582 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
583 resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); 583 resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
@@ -621,7 +621,7 @@ static int rndis_query_response(int configNr, rndis_query_msg_type *buf)
621 return -ENOMEM; 621 return -ENOMEM;
622 resp = (rndis_query_cmplt_type *)r->buf; 622 resp = (rndis_query_cmplt_type *)r->buf;
623 623
624 resp->MessageType = cpu_to_le32(REMOTE_NDIS_QUERY_CMPLT); 624 resp->MessageType = cpu_to_le32(RNDIS_MSG_QUERY_C);
625 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ 625 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
626 626
627 if (gen_ndis_query_resp(configNr, le32_to_cpu(buf->OID), 627 if (gen_ndis_query_resp(configNr, le32_to_cpu(buf->OID),
@@ -668,7 +668,7 @@ static int rndis_set_response(int configNr, rndis_set_msg_type *buf)
668 pr_debug("\n"); 668 pr_debug("\n");
669#endif 669#endif
670 670
671 resp->MessageType = cpu_to_le32(REMOTE_NDIS_SET_CMPLT); 671 resp->MessageType = cpu_to_le32(RNDIS_MSG_SET_C);
672 resp->MessageLength = cpu_to_le32(16); 672 resp->MessageLength = cpu_to_le32(16);
673 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ 673 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
674 if (gen_ndis_set_resp(configNr, le32_to_cpu(buf->OID), 674 if (gen_ndis_set_resp(configNr, le32_to_cpu(buf->OID),
@@ -692,7 +692,7 @@ static int rndis_reset_response(int configNr, rndis_reset_msg_type *buf)
692 return -ENOMEM; 692 return -ENOMEM;
693 resp = (rndis_reset_cmplt_type *)r->buf; 693 resp = (rndis_reset_cmplt_type *)r->buf;
694 694
695 resp->MessageType = cpu_to_le32(REMOTE_NDIS_RESET_CMPLT); 695 resp->MessageType = cpu_to_le32(RNDIS_MSG_RESET_C);
696 resp->MessageLength = cpu_to_le32(16); 696 resp->MessageLength = cpu_to_le32(16);
697 resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); 697 resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
698 /* resent information */ 698 /* resent information */
@@ -716,8 +716,7 @@ static int rndis_keepalive_response(int configNr,
716 return -ENOMEM; 716 return -ENOMEM;
717 resp = (rndis_keepalive_cmplt_type *)r->buf; 717 resp = (rndis_keepalive_cmplt_type *)r->buf;
718 718
719 resp->MessageType = cpu_to_le32( 719 resp->MessageType = cpu_to_le32(RNDIS_MSG_KEEPALIVE_C);
720 REMOTE_NDIS_KEEPALIVE_CMPLT);
721 resp->MessageLength = cpu_to_le32(16); 720 resp->MessageLength = cpu_to_le32(16);
722 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ 721 resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
723 resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); 722 resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
@@ -745,7 +744,7 @@ static int rndis_indicate_status_msg(int configNr, u32 status)
745 return -ENOMEM; 744 return -ENOMEM;
746 resp = (rndis_indicate_status_msg_type *)r->buf; 745 resp = (rndis_indicate_status_msg_type *)r->buf;
747 746
748 resp->MessageType = cpu_to_le32(REMOTE_NDIS_INDICATE_STATUS_MSG); 747 resp->MessageType = cpu_to_le32(RNDIS_MSG_INDICATE);
749 resp->MessageLength = cpu_to_le32(20); 748 resp->MessageLength = cpu_to_le32(20);
750 resp->Status = cpu_to_le32(status); 749 resp->Status = cpu_to_le32(status);
751 resp->StatusBufferLength = cpu_to_le32(0); 750 resp->StatusBufferLength = cpu_to_le32(0);
@@ -758,7 +757,7 @@ static int rndis_indicate_status_msg(int configNr, u32 status)
758int rndis_signal_connect(int configNr) 757int rndis_signal_connect(int configNr)
759{ 758{
760 rndis_per_dev_params[configNr].media_state 759 rndis_per_dev_params[configNr].media_state
761 = NDIS_MEDIA_STATE_CONNECTED; 760 = RNDIS_MEDIA_STATE_CONNECTED;
762 return rndis_indicate_status_msg(configNr, 761 return rndis_indicate_status_msg(configNr,
763 RNDIS_STATUS_MEDIA_CONNECT); 762 RNDIS_STATUS_MEDIA_CONNECT);
764} 763}
@@ -766,7 +765,7 @@ int rndis_signal_connect(int configNr)
766int rndis_signal_disconnect(int configNr) 765int rndis_signal_disconnect(int configNr)
767{ 766{
768 rndis_per_dev_params[configNr].media_state 767 rndis_per_dev_params[configNr].media_state
769 = NDIS_MEDIA_STATE_DISCONNECTED; 768 = RNDIS_MEDIA_STATE_DISCONNECTED;
770 return rndis_indicate_status_msg(configNr, 769 return rndis_indicate_status_msg(configNr,
771 RNDIS_STATUS_MEDIA_DISCONNECT); 770 RNDIS_STATUS_MEDIA_DISCONNECT);
772} 771}
@@ -817,15 +816,15 @@ int rndis_msg_parser(u8 configNr, u8 *buf)
817 816
818 /* For USB: responses may take up to 10 seconds */ 817 /* For USB: responses may take up to 10 seconds */
819 switch (MsgType) { 818 switch (MsgType) {
820 case REMOTE_NDIS_INITIALIZE_MSG: 819 case RNDIS_MSG_INIT:
821 pr_debug("%s: REMOTE_NDIS_INITIALIZE_MSG\n", 820 pr_debug("%s: RNDIS_MSG_INIT\n",
822 __func__); 821 __func__);
823 params->state = RNDIS_INITIALIZED; 822 params->state = RNDIS_INITIALIZED;
824 return rndis_init_response(configNr, 823 return rndis_init_response(configNr,
825 (rndis_init_msg_type *)buf); 824 (rndis_init_msg_type *)buf);
826 825
827 case REMOTE_NDIS_HALT_MSG: 826 case RNDIS_MSG_HALT:
828 pr_debug("%s: REMOTE_NDIS_HALT_MSG\n", 827 pr_debug("%s: RNDIS_MSG_HALT\n",
829 __func__); 828 __func__);
830 params->state = RNDIS_UNINITIALIZED; 829 params->state = RNDIS_UNINITIALIZED;
831 if (params->dev) { 830 if (params->dev) {
@@ -834,24 +833,24 @@ int rndis_msg_parser(u8 configNr, u8 *buf)
834 } 833 }
835 return 0; 834 return 0;
836 835
837 case REMOTE_NDIS_QUERY_MSG: 836 case RNDIS_MSG_QUERY:
838 return rndis_query_response(configNr, 837 return rndis_query_response(configNr,
839 (rndis_query_msg_type *)buf); 838 (rndis_query_msg_type *)buf);
840 839
841 case REMOTE_NDIS_SET_MSG: 840 case RNDIS_MSG_SET:
842 return rndis_set_response(configNr, 841 return rndis_set_response(configNr,
843 (rndis_set_msg_type *)buf); 842 (rndis_set_msg_type *)buf);
844 843
845 case REMOTE_NDIS_RESET_MSG: 844 case RNDIS_MSG_RESET:
846 pr_debug("%s: REMOTE_NDIS_RESET_MSG\n", 845 pr_debug("%s: RNDIS_MSG_RESET\n",
847 __func__); 846 __func__);
848 return rndis_reset_response(configNr, 847 return rndis_reset_response(configNr,
849 (rndis_reset_msg_type *)buf); 848 (rndis_reset_msg_type *)buf);
850 849
851 case REMOTE_NDIS_KEEPALIVE_MSG: 850 case RNDIS_MSG_KEEPALIVE:
852 /* For USB: host does this every 5 seconds */ 851 /* For USB: host does this every 5 seconds */
853 if (rndis_debug > 1) 852 if (rndis_debug > 1)
854 pr_debug("%s: REMOTE_NDIS_KEEPALIVE_MSG\n", 853 pr_debug("%s: RNDIS_MSG_KEEPALIVE\n",
855 __func__); 854 __func__);
856 return rndis_keepalive_response(configNr, 855 return rndis_keepalive_response(configNr,
857 (rndis_keepalive_msg_type *) 856 (rndis_keepalive_msg_type *)
@@ -963,7 +962,7 @@ void rndis_add_hdr(struct sk_buff *skb)
963 return; 962 return;
964 header = (void *)skb_push(skb, sizeof(*header)); 963 header = (void *)skb_push(skb, sizeof(*header));
965 memset(header, 0, sizeof *header); 964 memset(header, 0, sizeof *header);
966 header->MessageType = cpu_to_le32(REMOTE_NDIS_PACKET_MSG); 965 header->MessageType = cpu_to_le32(RNDIS_MSG_PACKET);
967 header->MessageLength = cpu_to_le32(skb->len); 966 header->MessageLength = cpu_to_le32(skb->len);
968 header->DataOffset = cpu_to_le32(36); 967 header->DataOffset = cpu_to_le32(36);
969 header->DataLength = cpu_to_le32(skb->len - sizeof(*header)); 968 header->DataLength = cpu_to_le32(skb->len - sizeof(*header));
@@ -1031,7 +1030,7 @@ int rndis_rm_hdr(struct gether *port,
1031 __le32 *tmp = (void *)skb->data; 1030 __le32 *tmp = (void *)skb->data;
1032 1031
1033 /* MessageType, MessageLength */ 1032 /* MessageType, MessageLength */
1034 if (cpu_to_le32(REMOTE_NDIS_PACKET_MSG) 1033 if (cpu_to_le32(RNDIS_MSG_PACKET)
1035 != get_unaligned(tmp++)) { 1034 != get_unaligned(tmp++)) {
1036 dev_kfree_skb_any(skb); 1035 dev_kfree_skb_any(skb);
1037 return -EINVAL; 1036 return -EINVAL;
@@ -1173,7 +1172,7 @@ int rndis_init(void)
1173 rndis_per_dev_params[i].used = 0; 1172 rndis_per_dev_params[i].used = 0;
1174 rndis_per_dev_params[i].state = RNDIS_UNINITIALIZED; 1173 rndis_per_dev_params[i].state = RNDIS_UNINITIALIZED;
1175 rndis_per_dev_params[i].media_state 1174 rndis_per_dev_params[i].media_state
1176 = NDIS_MEDIA_STATE_DISCONNECTED; 1175 = RNDIS_MEDIA_STATE_DISCONNECTED;
1177 INIT_LIST_HEAD(&(rndis_per_dev_params[i].resp_queue)); 1176 INIT_LIST_HEAD(&(rndis_per_dev_params[i].resp_queue));
1178 } 1177 }
1179 1178
diff --git a/drivers/usb/gadget/rndis.h b/drivers/usb/gadget/rndis.h
index 907c33008118..0647f2f34e89 100644
--- a/drivers/usb/gadget/rndis.h
+++ b/drivers/usb/gadget/rndis.h
@@ -15,58 +15,12 @@
15#ifndef _LINUX_RNDIS_H 15#ifndef _LINUX_RNDIS_H
16#define _LINUX_RNDIS_H 16#define _LINUX_RNDIS_H
17 17
18#include <linux/rndis.h>
18#include "ndis.h" 19#include "ndis.h"
19 20
20#define RNDIS_MAXIMUM_FRAME_SIZE 1518 21#define RNDIS_MAXIMUM_FRAME_SIZE 1518
21#define RNDIS_MAX_TOTAL_SIZE 1558 22#define RNDIS_MAX_TOTAL_SIZE 1558
22 23
23/* Remote NDIS Versions */
24#define RNDIS_MAJOR_VERSION 1
25#define RNDIS_MINOR_VERSION 0
26
27/* Status Values */
28#define RNDIS_STATUS_SUCCESS 0x00000000U /* Success */
29#define RNDIS_STATUS_FAILURE 0xC0000001U /* Unspecified error */
30#define RNDIS_STATUS_INVALID_DATA 0xC0010015U /* Invalid data */
31#define RNDIS_STATUS_NOT_SUPPORTED 0xC00000BBU /* Unsupported request */
32#define RNDIS_STATUS_MEDIA_CONNECT 0x4001000BU /* Device connected */
33#define RNDIS_STATUS_MEDIA_DISCONNECT 0x4001000CU /* Device disconnected */
34/* For all not specified status messages:
35 * RNDIS_STATUS_Xxx -> NDIS_STATUS_Xxx
36 */
37
38/* Message Set for Connectionless (802.3) Devices */
39#define REMOTE_NDIS_PACKET_MSG 0x00000001U
40#define REMOTE_NDIS_INITIALIZE_MSG 0x00000002U /* Initialize device */
41#define REMOTE_NDIS_HALT_MSG 0x00000003U
42#define REMOTE_NDIS_QUERY_MSG 0x00000004U
43#define REMOTE_NDIS_SET_MSG 0x00000005U
44#define REMOTE_NDIS_RESET_MSG 0x00000006U
45#define REMOTE_NDIS_INDICATE_STATUS_MSG 0x00000007U
46#define REMOTE_NDIS_KEEPALIVE_MSG 0x00000008U
47
48/* Message completion */
49#define REMOTE_NDIS_INITIALIZE_CMPLT 0x80000002U
50#define REMOTE_NDIS_QUERY_CMPLT 0x80000004U
51#define REMOTE_NDIS_SET_CMPLT 0x80000005U
52#define REMOTE_NDIS_RESET_CMPLT 0x80000006U
53#define REMOTE_NDIS_KEEPALIVE_CMPLT 0x80000008U
54
55/* Device Flags */
56#define RNDIS_DF_CONNECTIONLESS 0x00000001U
57#define RNDIS_DF_CONNECTION_ORIENTED 0x00000002U
58
59#define RNDIS_MEDIUM_802_3 0x00000000U
60
61/* from drivers/net/sk98lin/h/skgepnmi.h */
62#define OID_PNP_CAPABILITIES 0xFD010100
63#define OID_PNP_SET_POWER 0xFD010101
64#define OID_PNP_QUERY_POWER 0xFD010102
65#define OID_PNP_ADD_WAKE_UP_PATTERN 0xFD010103
66#define OID_PNP_REMOVE_WAKE_UP_PATTERN 0xFD010104
67#define OID_PNP_ENABLE_WAKE_UP 0xFD010106
68
69
70typedef struct rndis_init_msg_type 24typedef struct rndis_init_msg_type
71{ 25{
72 __le32 MessageType; 26 __le32 MessageType;
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 2fa9865babed..e5e44f8cde9a 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -263,8 +263,8 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
263 263
264 if (udc_is_newstyle(udc)) { 264 if (udc_is_newstyle(udc)) {
265 udc->driver->disconnect(udc->gadget); 265 udc->driver->disconnect(udc->gadget);
266 udc->driver->unbind(udc->gadget);
267 usb_gadget_disconnect(udc->gadget); 266 usb_gadget_disconnect(udc->gadget);
267 udc->driver->unbind(udc->gadget);
268 usb_gadget_udc_stop(udc->gadget, udc->driver); 268 usb_gadget_udc_stop(udc->gadget, udc->driver);
269 } else { 269 } else {
270 usb_gadget_stop(udc->gadget, udc->driver); 270 usb_gadget_stop(udc->gadget, udc->driver);
@@ -415,9 +415,9 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
415 usb_gadget_udc_start(udc->gadget, udc->driver); 415 usb_gadget_udc_start(udc->gadget, udc->driver);
416 usb_gadget_connect(udc->gadget); 416 usb_gadget_connect(udc->gadget);
417 } else if (sysfs_streq(buf, "disconnect")) { 417 } else if (sysfs_streq(buf, "disconnect")) {
418 usb_gadget_disconnect(udc->gadget);
418 if (udc_is_newstyle(udc)) 419 if (udc_is_newstyle(udc))
419 usb_gadget_udc_stop(udc->gadget, udc->driver); 420 usb_gadget_udc_stop(udc->gadget, udc->driver);
420 usb_gadget_disconnect(udc->gadget);
421 } else { 421 } else {
422 dev_err(dev, "unsupported command '%s'\n", buf); 422 dev_err(dev, "unsupported command '%s'\n", buf);
423 return -EINVAL; 423 return -EINVAL;
diff --git a/drivers/usb/gadget/uvc.h b/drivers/usb/gadget/uvc.h
index bc78c606c12b..ca4e03a1c73a 100644
--- a/drivers/usb/gadget/uvc.h
+++ b/drivers/usb/gadget/uvc.h
@@ -28,7 +28,7 @@
28 28
29struct uvc_request_data 29struct uvc_request_data
30{ 30{
31 unsigned int length; 31 __s32 length;
32 __u8 data[60]; 32 __u8 data[60];
33}; 33};
34 34
diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
index f6e083b50191..54d7ca559cb2 100644
--- a/drivers/usb/gadget/uvc_v4l2.c
+++ b/drivers/usb/gadget/uvc_v4l2.c
@@ -39,7 +39,7 @@ uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data)
39 if (data->length < 0) 39 if (data->length < 0)
40 return usb_ep_set_halt(cdev->gadget->ep0); 40 return usb_ep_set_halt(cdev->gadget->ep0);
41 41
42 req->length = min(uvc->event_length, data->length); 42 req->length = min_t(unsigned int, uvc->event_length, data->length);
43 req->zero = data->length < uvc->event_length; 43 req->zero = data->length < uvc->event_length;
44 req->dma = DMA_ADDR_INVALID; 44 req->dma = DMA_ADDR_INVALID;
45 45
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 01bb7241d6ef..fe8dc069164e 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -144,6 +144,14 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
144 hcd->has_tt = 1; 144 hcd->has_tt = 1;
145 tdi_reset(ehci); 145 tdi_reset(ehci);
146 } 146 }
147 if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) {
148 /* EHCI #1 or #2 on 6 Series/C200 Series chipset */
149 if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) {
150 ehci_info(ehci, "broken D3 during system sleep on ASUS\n");
151 hcd->broken_pci_sleep = 1;
152 device_set_wakeup_capable(&pdev->dev, false);
153 }
154 }
147 break; 155 break;
148 case PCI_VENDOR_ID_TDI: 156 case PCI_VENDOR_ID_TDI:
149 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { 157 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 86183366647f..f214a80cdee2 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -24,6 +24,7 @@
24#include <linux/gpio.h> 24#include <linux/gpio.h>
25#include <linux/of.h> 25#include <linux/of.h>
26#include <linux/of_gpio.h> 26#include <linux/of_gpio.h>
27#include <linux/pm_runtime.h>
27 28
28#include <mach/usb_phy.h> 29#include <mach/usb_phy.h>
29#include <mach/iomap.h> 30#include <mach/iomap.h>
@@ -37,9 +38,7 @@ struct tegra_ehci_hcd {
37 struct clk *emc_clk; 38 struct clk *emc_clk;
38 struct usb_phy *transceiver; 39 struct usb_phy *transceiver;
39 int host_resumed; 40 int host_resumed;
40 int bus_suspended;
41 int port_resuming; 41 int port_resuming;
42 int power_down_on_bus_suspend;
43 enum tegra_usb_phy_port_speed port_speed; 42 enum tegra_usb_phy_port_speed port_speed;
44}; 43};
45 44
@@ -273,120 +272,6 @@ static void tegra_ehci_restart(struct usb_hcd *hcd)
273 up_write(&ehci_cf_port_reset_rwsem); 272 up_write(&ehci_cf_port_reset_rwsem);
274} 273}
275 274
276static int tegra_usb_suspend(struct usb_hcd *hcd)
277{
278 struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
279 struct ehci_regs __iomem *hw = tegra->ehci->regs;
280 unsigned long flags;
281
282 spin_lock_irqsave(&tegra->ehci->lock, flags);
283
284 tegra->port_speed = (readl(&hw->port_status[0]) >> 26) & 0x3;
285 ehci_halt(tegra->ehci);
286 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
287
288 spin_unlock_irqrestore(&tegra->ehci->lock, flags);
289
290 tegra_ehci_power_down(hcd);
291 return 0;
292}
293
294static int tegra_usb_resume(struct usb_hcd *hcd)
295{
296 struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
297 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
298 struct ehci_regs __iomem *hw = ehci->regs;
299 unsigned long val;
300
301 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
302 tegra_ehci_power_up(hcd);
303
304 if (tegra->port_speed > TEGRA_USB_PHY_PORT_SPEED_HIGH) {
305 /* Wait for the phy to detect new devices
306 * before we restart the controller */
307 msleep(10);
308 goto restart;
309 }
310
311 /* Force the phy to keep data lines in suspend state */
312 tegra_ehci_phy_restore_start(tegra->phy, tegra->port_speed);
313
314 /* Enable host mode */
315 tdi_reset(ehci);
316
317 /* Enable Port Power */
318 val = readl(&hw->port_status[0]);
319 val |= PORT_POWER;
320 writel(val, &hw->port_status[0]);
321 udelay(10);
322
323 /* Check if the phy resume from LP0. When the phy resume from LP0
324 * USB register will be reset. */
325 if (!readl(&hw->async_next)) {
326 /* Program the field PTC based on the saved speed mode */
327 val = readl(&hw->port_status[0]);
328 val &= ~PORT_TEST(~0);
329 if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_HIGH)
330 val |= PORT_TEST_FORCE;
331 else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_FULL)
332 val |= PORT_TEST(6);
333 else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW)
334 val |= PORT_TEST(7);
335 writel(val, &hw->port_status[0]);
336 udelay(10);
337
338 /* Disable test mode by setting PTC field to NORMAL_OP */
339 val = readl(&hw->port_status[0]);
340 val &= ~PORT_TEST(~0);
341 writel(val, &hw->port_status[0]);
342 udelay(10);
343 }
344
345 /* Poll until CCS is enabled */
346 if (handshake(ehci, &hw->port_status[0], PORT_CONNECT,
347 PORT_CONNECT, 2000)) {
348 pr_err("%s: timeout waiting for PORT_CONNECT\n", __func__);
349 goto restart;
350 }
351
352 /* Poll until PE is enabled */
353 if (handshake(ehci, &hw->port_status[0], PORT_PE,
354 PORT_PE, 2000)) {
355 pr_err("%s: timeout waiting for USB_PORTSC1_PE\n", __func__);
356 goto restart;
357 }
358
359 /* Clear the PCI status, to avoid an interrupt taken upon resume */
360 val = readl(&hw->status);
361 val |= STS_PCD;
362 writel(val, &hw->status);
363
364 /* Put controller in suspend mode by writing 1 to SUSP bit of PORTSC */
365 val = readl(&hw->port_status[0]);
366 if ((val & PORT_POWER) && (val & PORT_PE)) {
367 val |= PORT_SUSPEND;
368 writel(val, &hw->port_status[0]);
369
370 /* Wait until port suspend completes */
371 if (handshake(ehci, &hw->port_status[0], PORT_SUSPEND,
372 PORT_SUSPEND, 1000)) {
373 pr_err("%s: timeout waiting for PORT_SUSPEND\n",
374 __func__);
375 goto restart;
376 }
377 }
378
379 tegra_ehci_phy_restore_end(tegra->phy);
380 return 0;
381
382restart:
383 if (tegra->port_speed <= TEGRA_USB_PHY_PORT_SPEED_HIGH)
384 tegra_ehci_phy_restore_end(tegra->phy);
385
386 tegra_ehci_restart(hcd);
387 return 0;
388}
389
390static void tegra_ehci_shutdown(struct usb_hcd *hcd) 275static void tegra_ehci_shutdown(struct usb_hcd *hcd)
391{ 276{
392 struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller); 277 struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
@@ -434,36 +319,6 @@ static int tegra_ehci_setup(struct usb_hcd *hcd)
434 return retval; 319 return retval;
435} 320}
436 321
437#ifdef CONFIG_PM
438static int tegra_ehci_bus_suspend(struct usb_hcd *hcd)
439{
440 struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
441 int error_status = 0;
442
443 error_status = ehci_bus_suspend(hcd);
444 if (!error_status && tegra->power_down_on_bus_suspend) {
445 tegra_usb_suspend(hcd);
446 tegra->bus_suspended = 1;
447 }
448
449 return error_status;
450}
451
452static int tegra_ehci_bus_resume(struct usb_hcd *hcd)
453{
454 struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
455
456 if (tegra->bus_suspended && tegra->power_down_on_bus_suspend) {
457 tegra_usb_resume(hcd);
458 tegra->bus_suspended = 0;
459 }
460
461 tegra_usb_phy_preresume(tegra->phy);
462 tegra->port_resuming = 1;
463 return ehci_bus_resume(hcd);
464}
465#endif
466
467struct temp_buffer { 322struct temp_buffer {
468 void *kmalloc_ptr; 323 void *kmalloc_ptr;
469 void *old_xfer_buffer; 324 void *old_xfer_buffer;
@@ -574,8 +429,8 @@ static const struct hc_driver tegra_ehci_hc_driver = {
574 .hub_control = tegra_ehci_hub_control, 429 .hub_control = tegra_ehci_hub_control,
575 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, 430 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
576#ifdef CONFIG_PM 431#ifdef CONFIG_PM
577 .bus_suspend = tegra_ehci_bus_suspend, 432 .bus_suspend = ehci_bus_suspend,
578 .bus_resume = tegra_ehci_bus_resume, 433 .bus_resume = ehci_bus_resume,
579#endif 434#endif
580 .relinquish_port = ehci_relinquish_port, 435 .relinquish_port = ehci_relinquish_port,
581 .port_handed_over = ehci_port_handed_over, 436 .port_handed_over = ehci_port_handed_over,
@@ -603,11 +458,187 @@ static int setup_vbus_gpio(struct platform_device *pdev)
603 dev_err(&pdev->dev, "can't enable vbus\n"); 458 dev_err(&pdev->dev, "can't enable vbus\n");
604 return err; 459 return err;
605 } 460 }
606 gpio_set_value(gpio, 1);
607 461
608 return err; 462 return err;
609} 463}
610 464
465#ifdef CONFIG_PM
466
467static int controller_suspend(struct device *dev)
468{
469 struct tegra_ehci_hcd *tegra =
470 platform_get_drvdata(to_platform_device(dev));
471 struct ehci_hcd *ehci = tegra->ehci;
472 struct usb_hcd *hcd = ehci_to_hcd(ehci);
473 struct ehci_regs __iomem *hw = ehci->regs;
474 unsigned long flags;
475
476 if (time_before(jiffies, ehci->next_statechange))
477 msleep(10);
478
479 spin_lock_irqsave(&ehci->lock, flags);
480
481 tegra->port_speed = (readl(&hw->port_status[0]) >> 26) & 0x3;
482 ehci_halt(ehci);
483 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
484
485 spin_unlock_irqrestore(&ehci->lock, flags);
486
487 tegra_ehci_power_down(hcd);
488 return 0;
489}
490
491static int controller_resume(struct device *dev)
492{
493 struct tegra_ehci_hcd *tegra =
494 platform_get_drvdata(to_platform_device(dev));
495 struct ehci_hcd *ehci = tegra->ehci;
496 struct usb_hcd *hcd = ehci_to_hcd(ehci);
497 struct ehci_regs __iomem *hw = ehci->regs;
498 unsigned long val;
499
500 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
501 tegra_ehci_power_up(hcd);
502
503 if (tegra->port_speed > TEGRA_USB_PHY_PORT_SPEED_HIGH) {
504 /* Wait for the phy to detect new devices
505 * before we restart the controller */
506 msleep(10);
507 goto restart;
508 }
509
510 /* Force the phy to keep data lines in suspend state */
511 tegra_ehci_phy_restore_start(tegra->phy, tegra->port_speed);
512
513 /* Enable host mode */
514 tdi_reset(ehci);
515
516 /* Enable Port Power */
517 val = readl(&hw->port_status[0]);
518 val |= PORT_POWER;
519 writel(val, &hw->port_status[0]);
520 udelay(10);
521
522 /* Check if the phy resume from LP0. When the phy resume from LP0
523 * USB register will be reset. */
524 if (!readl(&hw->async_next)) {
525 /* Program the field PTC based on the saved speed mode */
526 val = readl(&hw->port_status[0]);
527 val &= ~PORT_TEST(~0);
528 if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_HIGH)
529 val |= PORT_TEST_FORCE;
530 else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_FULL)
531 val |= PORT_TEST(6);
532 else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW)
533 val |= PORT_TEST(7);
534 writel(val, &hw->port_status[0]);
535 udelay(10);
536
537 /* Disable test mode by setting PTC field to NORMAL_OP */
538 val = readl(&hw->port_status[0]);
539 val &= ~PORT_TEST(~0);
540 writel(val, &hw->port_status[0]);
541 udelay(10);
542 }
543
544 /* Poll until CCS is enabled */
545 if (handshake(ehci, &hw->port_status[0], PORT_CONNECT,
546 PORT_CONNECT, 2000)) {
547 pr_err("%s: timeout waiting for PORT_CONNECT\n", __func__);
548 goto restart;
549 }
550
551 /* Poll until PE is enabled */
552 if (handshake(ehci, &hw->port_status[0], PORT_PE,
553 PORT_PE, 2000)) {
554 pr_err("%s: timeout waiting for USB_PORTSC1_PE\n", __func__);
555 goto restart;
556 }
557
558 /* Clear the PCI status, to avoid an interrupt taken upon resume */
559 val = readl(&hw->status);
560 val |= STS_PCD;
561 writel(val, &hw->status);
562
563 /* Put controller in suspend mode by writing 1 to SUSP bit of PORTSC */
564 val = readl(&hw->port_status[0]);
565 if ((val & PORT_POWER) && (val & PORT_PE)) {
566 val |= PORT_SUSPEND;
567 writel(val, &hw->port_status[0]);
568
569 /* Wait until port suspend completes */
570 if (handshake(ehci, &hw->port_status[0], PORT_SUSPEND,
571 PORT_SUSPEND, 1000)) {
572 pr_err("%s: timeout waiting for PORT_SUSPEND\n",
573 __func__);
574 goto restart;
575 }
576 }
577
578 tegra_ehci_phy_restore_end(tegra->phy);
579 goto done;
580
581 restart:
582 if (tegra->port_speed <= TEGRA_USB_PHY_PORT_SPEED_HIGH)
583 tegra_ehci_phy_restore_end(tegra->phy);
584
585 tegra_ehci_restart(hcd);
586
587 done:
588 tegra_usb_phy_preresume(tegra->phy);
589 tegra->port_resuming = 1;
590 return 0;
591}
592
593static int tegra_ehci_suspend(struct device *dev)
594{
595 struct tegra_ehci_hcd *tegra =
596 platform_get_drvdata(to_platform_device(dev));
597 struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
598 int rc = 0;
599
600 /*
601 * When system sleep is supported and USB controller wakeup is
602 * implemented: If the controller is runtime-suspended and the
603 * wakeup setting needs to be changed, call pm_runtime_resume().
604 */
605 if (HCD_HW_ACCESSIBLE(hcd))
606 rc = controller_suspend(dev);
607 return rc;
608}
609
610static int tegra_ehci_resume(struct device *dev)
611{
612 int rc;
613
614 rc = controller_resume(dev);
615 if (rc == 0) {
616 pm_runtime_disable(dev);
617 pm_runtime_set_active(dev);
618 pm_runtime_enable(dev);
619 }
620 return rc;
621}
622
623static int tegra_ehci_runtime_suspend(struct device *dev)
624{
625 return controller_suspend(dev);
626}
627
628static int tegra_ehci_runtime_resume(struct device *dev)
629{
630 return controller_resume(dev);
631}
632
633static const struct dev_pm_ops tegra_ehci_pm_ops = {
634 .suspend = tegra_ehci_suspend,
635 .resume = tegra_ehci_resume,
636 .runtime_suspend = tegra_ehci_runtime_suspend,
637 .runtime_resume = tegra_ehci_runtime_resume,
638};
639
640#endif
641
611static u64 tegra_ehci_dma_mask = DMA_BIT_MASK(32); 642static u64 tegra_ehci_dma_mask = DMA_BIT_MASK(32);
612 643
613static int tegra_ehci_probe(struct platform_device *pdev) 644static int tegra_ehci_probe(struct platform_device *pdev)
@@ -722,7 +753,6 @@ static int tegra_ehci_probe(struct platform_device *pdev)
722 } 753 }
723 754
724 tegra->host_resumed = 1; 755 tegra->host_resumed = 1;
725 tegra->power_down_on_bus_suspend = pdata->power_down_on_bus_suspend;
726 tegra->ehci = hcd_to_ehci(hcd); 756 tegra->ehci = hcd_to_ehci(hcd);
727 757
728 irq = platform_get_irq(pdev, 0); 758 irq = platform_get_irq(pdev, 0);
@@ -746,6 +776,14 @@ static int tegra_ehci_probe(struct platform_device *pdev)
746 goto fail; 776 goto fail;
747 } 777 }
748 778
779 pm_runtime_set_active(&pdev->dev);
780 pm_runtime_get_noresume(&pdev->dev);
781
782 /* Don't skip the pm_runtime_forbid call if wakeup isn't working */
783 /* if (!pdata->power_down_on_bus_suspend) */
784 pm_runtime_forbid(&pdev->dev);
785 pm_runtime_enable(&pdev->dev);
786 pm_runtime_put_sync(&pdev->dev);
749 return err; 787 return err;
750 788
751fail: 789fail:
@@ -772,33 +810,6 @@ fail_hcd:
772 return err; 810 return err;
773} 811}
774 812
775#ifdef CONFIG_PM
776static int tegra_ehci_resume(struct platform_device *pdev)
777{
778 struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
779 struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
780
781 if (tegra->bus_suspended)
782 return 0;
783
784 return tegra_usb_resume(hcd);
785}
786
787static int tegra_ehci_suspend(struct platform_device *pdev, pm_message_t state)
788{
789 struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
790 struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
791
792 if (tegra->bus_suspended)
793 return 0;
794
795 if (time_before(jiffies, tegra->ehci->next_statechange))
796 msleep(10);
797
798 return tegra_usb_suspend(hcd);
799}
800#endif
801
802static int tegra_ehci_remove(struct platform_device *pdev) 813static int tegra_ehci_remove(struct platform_device *pdev)
803{ 814{
804 struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev); 815 struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
@@ -807,6 +818,10 @@ static int tegra_ehci_remove(struct platform_device *pdev)
807 if (tegra == NULL || hcd == NULL) 818 if (tegra == NULL || hcd == NULL)
808 return -EINVAL; 819 return -EINVAL;
809 820
821 pm_runtime_get_sync(&pdev->dev);
822 pm_runtime_disable(&pdev->dev);
823 pm_runtime_put_noidle(&pdev->dev);
824
810#ifdef CONFIG_USB_OTG_UTILS 825#ifdef CONFIG_USB_OTG_UTILS
811 if (tegra->transceiver) { 826 if (tegra->transceiver) {
812 otg_set_host(tegra->transceiver->otg, NULL); 827 otg_set_host(tegra->transceiver->otg, NULL);
@@ -847,13 +862,12 @@ static struct of_device_id tegra_ehci_of_match[] __devinitdata = {
847static struct platform_driver tegra_ehci_driver = { 862static struct platform_driver tegra_ehci_driver = {
848 .probe = tegra_ehci_probe, 863 .probe = tegra_ehci_probe,
849 .remove = tegra_ehci_remove, 864 .remove = tegra_ehci_remove,
850#ifdef CONFIG_PM
851 .suspend = tegra_ehci_suspend,
852 .resume = tegra_ehci_resume,
853#endif
854 .shutdown = tegra_ehci_hcd_shutdown, 865 .shutdown = tegra_ehci_hcd_shutdown,
855 .driver = { 866 .driver = {
856 .name = "tegra-ehci", 867 .name = "tegra-ehci",
857 .of_match_table = tegra_ehci_of_match, 868 .of_match_table = tegra_ehci_of_match,
869#ifdef CONFIG_PM
870 .pm = &tegra_ehci_pm_ops,
871#endif
858 } 872 }
859}; 873};
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 97ab975fa442..768b4b55c816 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -386,7 +386,7 @@ static int davinci_musb_init(struct musb *musb)
386 usb_nop_xceiv_register(); 386 usb_nop_xceiv_register();
387 musb->xceiv = usb_get_transceiver(); 387 musb->xceiv = usb_get_transceiver();
388 if (!musb->xceiv) 388 if (!musb->xceiv)
389 return -ENODEV; 389 goto unregister;
390 390
391 musb->mregs += DAVINCI_BASE_OFFSET; 391 musb->mregs += DAVINCI_BASE_OFFSET;
392 392
@@ -444,6 +444,7 @@ static int davinci_musb_init(struct musb *musb)
444 444
445fail: 445fail:
446 usb_put_transceiver(musb->xceiv); 446 usb_put_transceiver(musb->xceiv);
447unregister:
447 usb_nop_xceiv_unregister(); 448 usb_nop_xceiv_unregister();
448 return -ENODEV; 449 return -ENODEV;
449} 450}
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 93de517a32a0..f4a40f001c88 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -449,7 +449,7 @@ struct musb {
449 * We added this flag to forcefully disable double 449 * We added this flag to forcefully disable double
450 * buffering until we get it working. 450 * buffering until we get it working.
451 */ 451 */
452 unsigned double_buffer_not_ok:1 __deprecated; 452 unsigned double_buffer_not_ok:1;
453 453
454 struct musb_hdrc_config *config; 454 struct musb_hdrc_config *config;
455 455
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index 1d5eda26fbd1..f7c1c8e2dc3f 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -40,7 +40,7 @@
40#if !defined(CONFIG_ARM) && !defined(CONFIG_SUPERH) \ 40#if !defined(CONFIG_ARM) && !defined(CONFIG_SUPERH) \
41 && !defined(CONFIG_AVR32) && !defined(CONFIG_PPC32) \ 41 && !defined(CONFIG_AVR32) && !defined(CONFIG_PPC32) \
42 && !defined(CONFIG_PPC64) && !defined(CONFIG_BLACKFIN) \ 42 && !defined(CONFIG_PPC64) && !defined(CONFIG_BLACKFIN) \
43 && !defined(CONFIG_MIPS) 43 && !defined(CONFIG_MIPS) && !defined(CONFIG_M68K)
44static inline void readsl(const void __iomem *addr, void *buf, int len) 44static inline void readsl(const void __iomem *addr, void *buf, int len)
45 { insl((unsigned long)addr, buf, len); } 45 { insl((unsigned long)addr, buf, len); }
46static inline void readsw(const void __iomem *addr, void *buf, int len) 46static inline void readsw(const void __iomem *addr, void *buf, int len)
diff --git a/drivers/usb/otg/gpio_vbus.c b/drivers/usb/otg/gpio_vbus.c
index 3ece43a2e4c1..a0a2178974fe 100644
--- a/drivers/usb/otg/gpio_vbus.c
+++ b/drivers/usb/otg/gpio_vbus.c
@@ -96,7 +96,7 @@ static void gpio_vbus_work(struct work_struct *work)
96 struct gpio_vbus_data *gpio_vbus = 96 struct gpio_vbus_data *gpio_vbus =
97 container_of(work, struct gpio_vbus_data, work); 97 container_of(work, struct gpio_vbus_data, work);
98 struct gpio_vbus_mach_info *pdata = gpio_vbus->dev->platform_data; 98 struct gpio_vbus_mach_info *pdata = gpio_vbus->dev->platform_data;
99 int gpio; 99 int gpio, status;
100 100
101 if (!gpio_vbus->phy.otg->gadget) 101 if (!gpio_vbus->phy.otg->gadget)
102 return; 102 return;
@@ -108,7 +108,9 @@ static void gpio_vbus_work(struct work_struct *work)
108 */ 108 */
109 gpio = pdata->gpio_pullup; 109 gpio = pdata->gpio_pullup;
110 if (is_vbus_powered(pdata)) { 110 if (is_vbus_powered(pdata)) {
111 status = USB_EVENT_VBUS;
111 gpio_vbus->phy.state = OTG_STATE_B_PERIPHERAL; 112 gpio_vbus->phy.state = OTG_STATE_B_PERIPHERAL;
113 gpio_vbus->phy.last_event = status;
112 usb_gadget_vbus_connect(gpio_vbus->phy.otg->gadget); 114 usb_gadget_vbus_connect(gpio_vbus->phy.otg->gadget);
113 115
114 /* drawing a "unit load" is *always* OK, except for OTG */ 116 /* drawing a "unit load" is *always* OK, except for OTG */
@@ -117,6 +119,9 @@ static void gpio_vbus_work(struct work_struct *work)
117 /* optionally enable D+ pullup */ 119 /* optionally enable D+ pullup */
118 if (gpio_is_valid(gpio)) 120 if (gpio_is_valid(gpio))
119 gpio_set_value(gpio, !pdata->gpio_pullup_inverted); 121 gpio_set_value(gpio, !pdata->gpio_pullup_inverted);
122
123 atomic_notifier_call_chain(&gpio_vbus->phy.notifier,
124 status, gpio_vbus->phy.otg->gadget);
120 } else { 125 } else {
121 /* optionally disable D+ pullup */ 126 /* optionally disable D+ pullup */
122 if (gpio_is_valid(gpio)) 127 if (gpio_is_valid(gpio))
@@ -125,7 +130,12 @@ static void gpio_vbus_work(struct work_struct *work)
125 set_vbus_draw(gpio_vbus, 0); 130 set_vbus_draw(gpio_vbus, 0);
126 131
127 usb_gadget_vbus_disconnect(gpio_vbus->phy.otg->gadget); 132 usb_gadget_vbus_disconnect(gpio_vbus->phy.otg->gadget);
133 status = USB_EVENT_NONE;
128 gpio_vbus->phy.state = OTG_STATE_B_IDLE; 134 gpio_vbus->phy.state = OTG_STATE_B_IDLE;
135 gpio_vbus->phy.last_event = status;
136
137 atomic_notifier_call_chain(&gpio_vbus->phy.notifier,
138 status, gpio_vbus->phy.otg->gadget);
129 } 139 }
130} 140}
131 141
@@ -287,6 +297,9 @@ static int __init gpio_vbus_probe(struct platform_device *pdev)
287 irq, err); 297 irq, err);
288 goto err_irq; 298 goto err_irq;
289 } 299 }
300
301 ATOMIC_INIT_NOTIFIER_HEAD(&gpio_vbus->phy.notifier);
302
290 INIT_WORK(&gpio_vbus->work, gpio_vbus_work); 303 INIT_WORK(&gpio_vbus->work, gpio_vbus_work);
291 304
292 gpio_vbus->vbus_draw = regulator_get(&pdev->dev, "vbus_draw"); 305 gpio_vbus->vbus_draw = regulator_get(&pdev->dev, "vbus_draw");
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f0da2c32fbde..f82a7394756e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -24,6 +24,7 @@
24#include <linux/if_arp.h> 24#include <linux/if_arp.h>
25#include <linux/if_tun.h> 25#include <linux/if_tun.h>
26#include <linux/if_macvlan.h> 26#include <linux/if_macvlan.h>
27#include <linux/if_vlan.h>
27 28
28#include <net/sock.h> 29#include <net/sock.h>
29 30
@@ -166,7 +167,7 @@ static void handle_tx(struct vhost_net *net)
166 if (wmem < sock->sk->sk_sndbuf / 2) 167 if (wmem < sock->sk->sk_sndbuf / 2)
167 tx_poll_stop(net); 168 tx_poll_stop(net);
168 hdr_size = vq->vhost_hlen; 169 hdr_size = vq->vhost_hlen;
169 zcopy = vhost_sock_zcopy(sock); 170 zcopy = vq->ubufs;
170 171
171 for (;;) { 172 for (;;) {
172 /* Release DMAs done buffers first */ 173 /* Release DMAs done buffers first */
@@ -238,7 +239,7 @@ static void handle_tx(struct vhost_net *net)
238 239
239 vq->heads[vq->upend_idx].len = len; 240 vq->heads[vq->upend_idx].len = len;
240 ubuf->callback = vhost_zerocopy_callback; 241 ubuf->callback = vhost_zerocopy_callback;
241 ubuf->arg = vq->ubufs; 242 ubuf->ctx = vq->ubufs;
242 ubuf->desc = vq->upend_idx; 243 ubuf->desc = vq->upend_idx;
243 msg.msg_control = ubuf; 244 msg.msg_control = ubuf;
244 msg.msg_controllen = sizeof(ubuf); 245 msg.msg_controllen = sizeof(ubuf);
@@ -257,7 +258,8 @@ static void handle_tx(struct vhost_net *net)
257 UIO_MAXIOV; 258 UIO_MAXIOV;
258 } 259 }
259 vhost_discard_vq_desc(vq, 1); 260 vhost_discard_vq_desc(vq, 1);
260 tx_poll_start(net, sock); 261 if (err == -EAGAIN || err == -ENOBUFS)
262 tx_poll_start(net, sock);
261 break; 263 break;
262 } 264 }
263 if (err != len) 265 if (err != len)
@@ -265,6 +267,8 @@ static void handle_tx(struct vhost_net *net)
265 " len %d != %zd\n", err, len); 267 " len %d != %zd\n", err, len);
266 if (!zcopy) 268 if (!zcopy)
267 vhost_add_used_and_signal(&net->dev, vq, head, 0); 269 vhost_add_used_and_signal(&net->dev, vq, head, 0);
270 else
271 vhost_zerocopy_signal_used(vq);
268 total_len += len; 272 total_len += len;
269 if (unlikely(total_len >= VHOST_NET_WEIGHT)) { 273 if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
270 vhost_poll_queue(&vq->poll); 274 vhost_poll_queue(&vq->poll);
@@ -283,8 +287,12 @@ static int peek_head_len(struct sock *sk)
283 287
284 spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); 288 spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
285 head = skb_peek(&sk->sk_receive_queue); 289 head = skb_peek(&sk->sk_receive_queue);
286 if (likely(head)) 290 if (likely(head)) {
287 len = head->len; 291 len = head->len;
292 if (vlan_tx_tag_present(head))
293 len += VLAN_HLEN;
294 }
295
288 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags); 296 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
289 return len; 297 return len;
290} 298}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 947f00d8e091..94dbd25caa30 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1598,12 +1598,12 @@ void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
1598 kfree(ubufs); 1598 kfree(ubufs);
1599} 1599}
1600 1600
1601void vhost_zerocopy_callback(void *arg) 1601void vhost_zerocopy_callback(struct ubuf_info *ubuf)
1602{ 1602{
1603 struct ubuf_info *ubuf = arg; 1603 struct vhost_ubuf_ref *ubufs = ubuf->ctx;
1604 struct vhost_ubuf_ref *ubufs = ubuf->arg;
1605 struct vhost_virtqueue *vq = ubufs->vq; 1604 struct vhost_virtqueue *vq = ubufs->vq;
1606 1605
1606 vhost_poll_queue(&vq->poll);
1607 /* set len = 1 to mark this desc buffers done DMA */ 1607 /* set len = 1 to mark this desc buffers done DMA */
1608 vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN; 1608 vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
1609 kref_put(&ubufs->kref, vhost_zerocopy_done_signal); 1609 kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 8dcf4cca6bf2..8de1fd5b8efb 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -188,7 +188,7 @@ bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
188 188
189int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, 189int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
190 unsigned int log_num, u64 len); 190 unsigned int log_num, u64 len);
191void vhost_zerocopy_callback(void *arg); 191void vhost_zerocopy_callback(struct ubuf_info *);
192int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq); 192int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq);
193 193
194#define vq_err(vq, fmt, ...) do { \ 194#define vq_err(vq, fmt, ...) do { \
diff --git a/drivers/video/bfin-lq035q1-fb.c b/drivers/video/bfin-lq035q1-fb.c
index 86922ac84412..353c02fe8a95 100644
--- a/drivers/video/bfin-lq035q1-fb.c
+++ b/drivers/video/bfin-lq035q1-fb.c
@@ -13,6 +13,7 @@
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/string.h> 14#include <linux/string.h>
15#include <linux/fb.h> 15#include <linux/fb.h>
16#include <linux/gpio.h>
16#include <linux/slab.h> 17#include <linux/slab.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/types.h> 19#include <linux/types.h>
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index 6468a297e341..39571f9e0162 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -22,7 +22,9 @@
22#include <linux/font.h> 22#include <linux/font.h>
23 23
24#include <asm/hardware.h> 24#include <asm/hardware.h>
25#include <asm/page.h>
25#include <asm/parisc-device.h> 26#include <asm/parisc-device.h>
27#include <asm/pdc.h>
26#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
27#include <asm/grfioctl.h> 29#include <asm/grfioctl.h>
28 30
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 26e83d7fdd6f..b0e2a4261afe 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -73,7 +73,7 @@ static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *ns
73 struct uvesafb_task *utask; 73 struct uvesafb_task *utask;
74 struct uvesafb_ktask *task; 74 struct uvesafb_ktask *task;
75 75
76 if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) 76 if (!capable(CAP_SYS_ADMIN))
77 return; 77 return;
78 78
79 if (msg->seq >= UVESAFB_TASKS_MAX) 79 if (msg->seq >= UVESAFB_TASKS_MAX)
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index cb4529c40d74..b7f5173ff9e9 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -365,7 +365,7 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
365 struct fb_info *fb_info; 365 struct fb_info *fb_info;
366 int fb_size; 366 int fb_size;
367 int val; 367 int val;
368 int ret; 368 int ret = 0;
369 369
370 info = kzalloc(sizeof(*info), GFP_KERNEL); 370 info = kzalloc(sizeof(*info), GFP_KERNEL);
371 if (info == NULL) { 371 if (info == NULL) {
@@ -458,26 +458,31 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
458 xenfb_init_shared_page(info, fb_info); 458 xenfb_init_shared_page(info, fb_info);
459 459
460 ret = xenfb_connect_backend(dev, info); 460 ret = xenfb_connect_backend(dev, info);
461 if (ret < 0) 461 if (ret < 0) {
462 goto error; 462 xenbus_dev_fatal(dev, ret, "xenfb_connect_backend");
463 goto error_fb;
464 }
463 465
464 ret = register_framebuffer(fb_info); 466 ret = register_framebuffer(fb_info);
465 if (ret) { 467 if (ret) {
466 fb_deferred_io_cleanup(fb_info);
467 fb_dealloc_cmap(&fb_info->cmap);
468 framebuffer_release(fb_info);
469 xenbus_dev_fatal(dev, ret, "register_framebuffer"); 468 xenbus_dev_fatal(dev, ret, "register_framebuffer");
470 goto error; 469 goto error_fb;
471 } 470 }
472 info->fb_info = fb_info; 471 info->fb_info = fb_info;
473 472
474 xenfb_make_preferred_console(); 473 xenfb_make_preferred_console();
475 return 0; 474 return 0;
476 475
477 error_nomem: 476error_fb:
478 ret = -ENOMEM; 477 fb_deferred_io_cleanup(fb_info);
479 xenbus_dev_fatal(dev, ret, "allocating device memory"); 478 fb_dealloc_cmap(&fb_info->cmap);
480 error: 479 framebuffer_release(fb_info);
480error_nomem:
481 if (!ret) {
482 ret = -ENOMEM;
483 xenbus_dev_fatal(dev, ret, "allocating device memory");
484 }
485error:
481 xenfb_remove(dev); 486 xenfb_remove(dev);
482 return ret; 487 return ret;
483} 488}
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index c2d05a8279fd..8807fe501d20 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -390,6 +390,7 @@ static void __devexit virtballoon_remove(struct virtio_device *vdev)
390 /* There might be pages left in the balloon: free them. */ 390 /* There might be pages left in the balloon: free them. */
391 while (vb->num_pages) 391 while (vb->num_pages)
392 leak_balloon(vb, vb->num_pages); 392 leak_balloon(vb, vb->num_pages);
393 update_balloon_size(vb);
393 394
394 /* Now we reset the device so we can clean up the queues. */ 395 /* Now we reset the device so we can clean up the queues. */
395 vdev->config->reset(vdev); 396 vdev->config->reset(vdev);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index cbc7ceef2786..9f13b897fd64 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -435,16 +435,16 @@ static void hpwdt_start(void)
435{ 435{
436 reload = SECS_TO_TICKS(soft_margin); 436 reload = SECS_TO_TICKS(soft_margin);
437 iowrite16(reload, hpwdt_timer_reg); 437 iowrite16(reload, hpwdt_timer_reg);
438 iowrite16(0x85, hpwdt_timer_con); 438 iowrite8(0x85, hpwdt_timer_con);
439} 439}
440 440
441static void hpwdt_stop(void) 441static void hpwdt_stop(void)
442{ 442{
443 unsigned long data; 443 unsigned long data;
444 444
445 data = ioread16(hpwdt_timer_con); 445 data = ioread8(hpwdt_timer_con);
446 data &= 0xFE; 446 data &= 0xFE;
447 iowrite16(data, hpwdt_timer_con); 447 iowrite8(data, hpwdt_timer_con);
448} 448}
449 449
450static void hpwdt_ping(void) 450static void hpwdt_ping(void)
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 94243136f6bf..ea20c51d24c7 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -183,15 +183,17 @@ config XEN_ACPI_PROCESSOR
183 depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ 183 depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ
184 default m 184 default m
185 help 185 help
186 This ACPI processor uploads Power Management information to the Xen hypervisor. 186 This ACPI processor uploads Power Management information to the Xen
187 187 hypervisor.
188 To do that the driver parses the Power Management data and uploads said 188
189 information to the Xen hypervisor. Then the Xen hypervisor can select the 189 To do that the driver parses the Power Management data and uploads
190 proper Cx and Pxx states. It also registers itslef as the SMM so that 190 said information to the Xen hypervisor. Then the Xen hypervisor can
191 other drivers (such as ACPI cpufreq scaling driver) will not load. 191 select the proper Cx and Pxx states. It also registers itslef as the
192 192 SMM so that other drivers (such as ACPI cpufreq scaling driver) will
193 To compile this driver as a module, choose M here: the 193 not load.
194 module will be called xen_acpi_processor If you do not know what to choose, 194
195 select M here. If the CPUFREQ drivers are built in, select Y here. 195 To compile this driver as a module, choose M here: the module will be
196 called xen_acpi_processor If you do not know what to choose, select
197 M here. If the CPUFREQ drivers are built in, select Y here.
196 198
197endmenu 199endmenu
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 4b33acd8ed4e..0a8a17cd80be 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -274,7 +274,7 @@ static unsigned int cpu_from_evtchn(unsigned int evtchn)
274 274
275static bool pirq_check_eoi_map(unsigned irq) 275static bool pirq_check_eoi_map(unsigned irq)
276{ 276{
277 return test_bit(irq, pirq_eoi_map); 277 return test_bit(pirq_from_irq(irq), pirq_eoi_map);
278} 278}
279 279
280static bool pirq_needs_eoi_flag(unsigned irq) 280static bool pirq_needs_eoi_flag(unsigned irq)
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 174b5653cd8a..0b48579a9cd6 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -128,7 +128,10 @@ static int push_cxx_to_hypervisor(struct acpi_processor *_pr)
128 pr_debug(" C%d: %s %d uS\n", 128 pr_debug(" C%d: %s %d uS\n",
129 cx->type, cx->desc, (u32)cx->latency); 129 cx->type, cx->desc, (u32)cx->latency);
130 } 130 }
131 } else 131 } else if (ret != -EINVAL)
132 /* EINVAL means the ACPI ID is incorrect - meaning the ACPI
133 * table is referencing a non-existing CPU - which can happen
134 * with broken ACPI tables. */
132 pr_err(DRV_NAME "(CX): Hypervisor error (%d) for ACPI CPU%u\n", 135 pr_err(DRV_NAME "(CX): Hypervisor error (%d) for ACPI CPU%u\n",
133 ret, _pr->acpi_id); 136 ret, _pr->acpi_id);
134 137